[
  {
    "path": ".gitignore",
    "content": "*.py[cod]\n\n# C extensions\n*.so\n\n# Packages\n*.egg*\n*.egg-info\ndist\nbuild\neggs\nparts\nvar\nsdist\ndevelop-eggs\n.installed.cfg\nlib\nlib64\n\n# Installer logs\npip-log.txt\n\n# Unit test / coverage reports\ncover/\n.coverage*\n!.coveragerc\n.tox\nnosetests.xml\n.testrepository\n.venv\n\n# Translations\n*.mo\n\n# Mr Developer\n.mr.developer.cfg\n.project\n.pydevproject\n\n# Complexity\noutput/*.html\noutput/*/index.html\n\n# Sphinx\ndoc/build\ndoc/source/chart/*\n!doc/source/chart/index.rst\n!doc/source/chart/openstack_charts.rst\n!doc/source/chart/infra_charts.rst\n\n# installed tools\ntools/helm-docs\n\n# pbr generates these\nAUTHORS\nChangeLog\n\n# Editors\n*~\n.*.swp\n.*sw?\n\n# Files created by releasenotes build\nreleasenotes/build\n\n# Dev tools\n.idea/\n.vscode/\n.devcontainer/\n**/.vagrant\n**/*.log\n\n# Helm internals\n*.lock\n*/*.lock\n*.tgz\n**/*.tgz\n**/_partials.tpl\n**/_globals.tpl\n\n# Gate and Check Logs\nlogs/\ntools/gate/local-overrides/\nplaybooks/*.retry\ntmp/\n\n# Helm-toolkit dev\nhelm-toolkit/templates/test.yaml\nhelm-toolkit/values.yaml\n"
  },
  {
    "path": ".gitreview",
    "content": "[gerrit]\nhost=review.opendev.org\nport=29418\nproject=openstack/openstack-helm.git\n"
  },
  {
    "path": ".pre-commit-config.yaml",
    "content": "---\nrepos:\n  - repo: https://github.com/pre-commit/pre-commit-hooks\n    rev: v4.5.0\n    hooks:\n      - id: trailing-whitespace\n      - id: fix-byte-order-marker\n      - id: mixed-line-ending\n        args: ['--fix', 'lf']\n      - id: check-merge-conflict\n  - repo: https://github.com/sphinx-contrib/sphinx-lint\n    rev: v1.0.0\n    hooks:\n      - id: sphinx-lint\n        args: [--enable=default-role]\n        files: ^doc/|releasenotes\n"
  },
  {
    "path": "CONTRIBUTING.rst",
    "content": "The source repository for this project can be found at:\n\n   https://opendev.org/openstack/openstack-helm.git\n\nPull requests submitted through GitHub are not monitored.\n\nTo start contributing to OpenStack, follow the steps in the contribution guide\nto set up and use Gerrit:\n\n   https://docs.openstack.org/contributors/code-and-documentation/quick-start.html\n\nBugs should be filed on StoryBoard:\n\n   https://storyboard.openstack.org/#!/project/openstack/openstack-helm\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"{}\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright {yyyy} {name of copyright owner}\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "Makefile",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# It's necessary to set this because some environments don't link sh -> bash.\nSHELL := /bin/bash\nHELM := helm\nPYTHON := python3\nTASK  := build\nHELM_DOCS := tools/helm-docs\nUNAME_OS := $(shell uname -s)\nUNAME_ARCH := $(shell uname -m)\n# We generate CHANGELOG.md files by default which\n# requires reno>=4.1.0 installed.\n# To skip generating it use the following:\n# make all SKIP_CHANGELOG=1\nSKIP_CHANGELOG ?= 0\n\nPKG_ARGS =\nifdef VERSION\n\tPKG_ARGS += --version $(VERSION)\nendif\n\nifdef PACKAGE_DIR\n\tPKG_ARGS += --destination $(PACKAGE_DIR)\nendif\n\nBASE_VERSION ?= 2025.2.0\n\nCHART_DIRS := $(subst /,,$(dir $(wildcard */Chart.yaml)))\nCHARTS := $(sort helm-toolkit $(CHART_DIRS))\n\n.PHONY: $(CHARTS)\n\nall: $(CHARTS)\n\ncharts:\n\t@echo $(CHART_DIRS)\n\n$(CHARTS):\n\t@if [ -d $@ ]; then \\\n\t\techo; \\\n\t\techo \"===== Processing [$@] chart =====\"; \\\n\t\tmake $(TASK)-$@; \\\n\tfi\n\nHELM_DOCS_VERSION ?= 1.14.2\n.PHONY: helm-docs ## Download helm-docs locally if necessary\nhelm-docs: $(HELM_DOCS)\n$(HELM_DOCS):\n\t{ \\\n\t\tcurl -fsSL -o tools/helm-docs.tar.gz https://github.com/norwoodj/helm-docs/releases/download/v$(HELM_DOCS_VERSION)/helm-docs_$(HELM_DOCS_VERSION)_$(UNAME_OS)_$(UNAME_ARCH).tar.gz && \\\n\t\ttar -zxf tools/helm-docs.tar.gz -C tools helm-docs && \\\n\t\trm -f tools/helm-docs.tar.gz && \\\n\t\tchmod +x tools/helm-docs; \\\n\t}\n\ninit-%:\n\tif [ -f $*/Makefile ]; then make -C $*; fi\n\tif grep -qE \"^dependencies:\" $*/Chart.yaml; then $(HELM) dep up $*; fi\n\nlint-%: init-%\n\tif [ -d $* ]; then $(HELM) lint $*; fi\n\n# reno required for changelog generation\n%/CHANGELOG.md:\n\tif [ -d $* ]; then $(PYTHON) tools/changelog.py --charts $*; fi\n\nbuild-%: lint-% $(if $(filter-out 1,$(SKIP_CHANGELOG)),%/CHANGELOG.md)\n\tif [ -d $* ]; then \\\n\t\t$(HELM) package $* --version $$(tools/chart_version.sh $* $(BASE_VERSION)) $(PKG_ARGS); \\\n\tfi\n\n# This is used exclusively with helm3 building in the gate to publish\npackage-%: init-%\n\tif [ -d $* ]; then $(HELM) package $* $(PKG_ARGS); fi\n\nclean:\n\t@echo \"Clean all build artifacts\"\n\trm -f */templates/_partials.tpl */templates/_globals.tpl\n\trm -f *tgz */charts/*tgz */requirements.lock\n\trm -rf */charts */tmpcharts\n\npull-all-images:\n\t@./tools/deployment/common/pull-images.sh\n\npull-images:\n\t@./tools/deployment/common/pull-images.sh $(filter-out $@,$(MAKECMDGOALS))\n\ndev-deploy:\n\t@./tools/gate/devel/start.sh $(filter-out $@,$(MAKECMDGOALS))\n\n%:\n\t@:\n"
  },
  {
    "path": "README.rst",
    "content": "==============\nOpenStack-Helm\n==============\n\nMission\n-------\n\nThe goal of OpenStack-Helm is to provide a collection of Helm charts that\nsimply, resiliently, and flexibly deploy OpenStack and related services\non Kubernetes.\n\nVersions supported\n------------------\n\nThe table below shows the combinations of the Openstack/Platform/Kubernetes versions\nthat are tested and proved to work.\n\n.. list-table::\n   :widths: 30 30 30 30\n   :header-rows: 1\n\n   * - Openstack version\n     - Host OS\n     - Image OS\n     - Kubernetes version\n   * - 2024.1 (Caracal)\n     - Ubuntu Jammy\n     - Ubuntu Jammy\n     - >=1.29,<=1.31\n   * - 2024.2 (Dalmatian)\n     - Ubuntu Jammy\n     - Ubuntu Jammy\n     - >=1.29,<=1.31\n   * - 2025.1 (Epoxy)\n     - Ubuntu Jammy\n     - Ubuntu Jammy\n     - >=1.29,<=1.31\n   * - 2025.1 (Epoxy)\n     - Ubuntu Noble\n     - Ubuntu Noble\n     - >=1.29,<=1.31\n\nCommunication\n-------------\n\n* Join us on `IRC <irc://chat.oftc.net/openstack-helm>`_:\n  ``#openstack-helm`` on oftc\n* Join us on `Slack <https://kubernetes.slack.com/messages/C3WERB7DE/>`_\n  (this is preferable way of communication): ``#openstack-helm``\n* Join us on `Openstack-discuss <https://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss>`_\n  mailing list (use subject prefix ``[openstack-helm]``)\n\nThe list of Openstack-Helm core team members is available here\n`openstack-helm-core <https://review.opendev.org/#/admin/groups/1749,members>`_.\n\nStoryboard\n----------\n\nYou found an issue and want to make sure we are aware of it? You can do so on our\n`Storyboard <https://storyboard.openstack.org/#!/project_group/64>`_.\n\nBugs should be filed as stories in Storyboard, not GitHub.\n\nPlease be as much specific as possible while describing an issue. Usually having\nmore context in the bug description means less efforts for a developer to\nreproduce the bug and understand how to fix it.\n\nAlso before filing a bug to the Openstack-Helm `Storyboard <https://storyboard.openstack.org/#!/project_group/64>`_\nplease try to identify if the issue is indeed related to the deployment\nprocess and not to the deployable software.\n\nOther links\n-----------\n\nOur documentation is available `here <https://docs.openstack.org/openstack-helm/latest/>`_.\n\nThis project is under active development. We encourage anyone interested in\nOpenStack-Helm to review the `code changes <https://review.opendev.org/q/(project:openstack/openstack-helm+OR+project:openstack/openstack-helm-images+OR+project:openstack/loci)+AND+-is:abandoned>`_\n\nOur repositories:\n\n* OpenStack charts `openstack-helm <https://opendev.org/openstack/openstack-helm.git>`_\n* OpenStack-Helm plugin `openstack-helm-plugin <https://opendev.org/openstack/openstack-helm-plugin.git>`_\n* Build non-OpenStack images `openstack-helm-images <https://opendev.org/openstack/openstack-helm-images.git>`_\n* Build Openstack images `loci <https://opendev.org/openstack/loci.git>`_\n\nWe welcome contributions in any form: code review, code changes, usage feedback, updating documentation.\n\nRelease notes\n-------------\n\nWe use `reno <https://opendev.org/openstack/reno.git>`_ for managing release notes. If you update\na chart, please add a release note using the following command:\n\n.. code-block:: bash\n\n    reno new <chart_name>\n\nThis will create a new release note file ``releasenotes/notes/<chart_name>-<sha>.yaml``. Fill in the\nnecessary information and commit the release note file.\n\nIf you update multiple charts in a single commit use the following command:\n\n.. code-block:: bash\n\n    reno new common\n\nThis will create a new release note file ``releasenotes/notes/common-<sha>.yaml``. In this case you\ncan add multiple chart specific sections in this release note file.\n\nWhen building tarballs, we will use the ``reno`` features to combine release notes from all files and\ngenerate  ``<chart_name>/CHANGELOG.md`` files.\n"
  },
  {
    "path": "aodh/Chart.yaml",
    "content": "# Copyright 2019 Wind River Systems, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: Openstack-Helm Aodh\nname: aodh\nversion: 2025.2.0\nhome: https://docs.openstack.org/aodh/latest/\nsources:\n  - https://opendev.org/openstack/aodh\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "aodh/templates/bin/_aodh-alarms-cleaner.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec aodh-expirer \\\n     --config-file=/etc/aodh/aodh.conf \\\n     --config-dir=/etc/aodh/aodh.conf.d\n"
  },
  {
    "path": "aodh/templates/bin/_aodh-api.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n\n  cp -a $(type -p aodh-api) /var/www/cgi-bin/aodh/\n\n  if [ -f /etc/apache2/envvars ]; then\n    # Loading Apache2 ENV variables\n    source /etc/apache2/envvars\n    # The directory below has to be created due to the fact that\n    # libapache2-mod-wsgi-py3 doesn't create it in contrary by libapache2-mod-wsgi\n    if [ ! -d ${APACHE_RUN_DIR} ]; then\n       mkdir -p ${APACHE_RUN_DIR}\n    fi\n  fi\n\n  # Get rid of stale pid file if present.\n  rm -f /var/run/apache2/*.pid\n\n  # Start Apache2\n  exec apache2 -DFOREGROUND\n}\n\nfunction stop () {\n  apachectl -k graceful-stop\n}\n\n$COMMAND\n"
  },
  {
    "path": "aodh/templates/bin/_aodh-evaluator.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec aodh-evaluator \\\n     --config-file=/etc/aodh/aodh.conf \\\n     --config-dir=/etc/aodh/aodh.conf.d\n"
  },
  {
    "path": "aodh/templates/bin/_aodh-listener.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec aodh-listener \\\n     --config-file=/etc/aodh/aodh.conf \\\n     --config-dir=/etc/aodh/aodh.conf.d\n"
  },
  {
    "path": "aodh/templates/bin/_aodh-notifier.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec aodh-notifier \\\n     --config-file=/etc/aodh/aodh.conf \\\n     --config-dir=/etc/aodh/aodh.conf.d\n"
  },
  {
    "path": "aodh/templates/bin/_aodh-test.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexport HOME=/tmp\n\necho \"Test: create an alarm\"\naodh alarm create \\\n    --name test_cpu_aggregation \\\n    --type gnocchi_aggregation_by_resources_threshold \\\n    --metric cpu --threshold 214106115 \\\n    --comparison-operator lt \\\n    --aggregation-method mean \\\n    --granularity 300  \\\n    --evaluation-periods 1 \\\n    --alarm-action 'http://localhost:8776/alarm' \\\n    --resource-type instance \\\n    --query '{\"=\": {\"flavor_name\": \"small\"}}'\nsleep 5\n\necho \"Test: list alarms\"\naodh alarm list\nsleep 5\n\necho \"Test: show an alarm\"\nALARM_UUID=$(aodh alarm list -c alarm_id -f value | head -1)\naodh alarm show ${ALARM_UUID}\nsleep 5\n\necho \"Test: update an alarm\"\naodh alarm update ${ALARM_UUID} --comparison-operator gt\nsleep 5\n\necho \"Test: delete an alarm\"\naodh alarm delete ${ALARM_UUID}\n\nexit 0\n\n"
  },
  {
    "path": "aodh/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "aodh/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec aodh-dbsync\n"
  },
  {
    "path": "aodh/templates/configmap-bin.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: aodh-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  aodh-test.sh: |\n{{ tuple \"bin/_aodh-test.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  aodh-api.sh: |\n{{ tuple \"bin/_aodh-api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  aodh-evaluator.sh: |\n{{ tuple \"bin/_aodh-evaluator.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  aodh-listener.sh: |\n{{ tuple \"bin/_aodh-listener.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  aodh-notifier.sh: |\n{{ tuple \"bin/_aodh-notifier.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  aodh-alarms-cleaner.sh: |\n{{ tuple \"bin/_aodh-alarms-cleaner.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/configmap-etc.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n{{- if empty .Values.conf.aodh.keystone_authtoken.auth_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.aodh.keystone_authtoken \"auth_uri\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.aodh.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.aodh.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.aodh.keystone_authtoken.region_name -}}\n{{- $_ := set .Values.conf.aodh.keystone_authtoken \"region_name\" .Values.endpoints.identity.auth.aodh.region_name -}}\n{{- end -}}\n\n{{- if empty .Values.conf.aodh.keystone_authtoken.project_name -}}\n{{- $_ := set .Values.conf.aodh.keystone_authtoken \"project_name\" .Values.endpoints.identity.auth.aodh.project_name -}}\n{{- end -}}\n\n{{- if empty .Values.conf.aodh.keystone_authtoken.project_domain_name -}}\n{{- $_ := set .Values.conf.aodh.keystone_authtoken \"project_domain_name\" .Values.endpoints.identity.auth.aodh.project_domain_name -}}\n{{- end -}}\n\n{{- if empty .Values.conf.aodh.keystone_authtoken.user_domain_name -}}\n{{- $_ := set .Values.conf.aodh.keystone_authtoken \"user_domain_name\" .Values.endpoints.identity.auth.aodh.user_domain_name -}}\n{{- end -}}\n\n{{- if empty .Values.conf.aodh.keystone_authtoken.username -}}\n{{- $_ := set .Values.conf.aodh.keystone_authtoken \"username\" .Values.endpoints.identity.auth.aodh.username -}}\n{{- end -}}\n\n{{- if empty .Values.conf.aodh.keystone_authtoken.password -}}\n{{- $_ := set .Values.conf.aodh.keystone_authtoken \"password\" .Values.endpoints.identity.auth.aodh.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.aodh.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.aodh.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.aodh.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.aodh.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.aodh.database.connection)) (empty .Values.conf.aodh.database.connection) -}}\n{{- $_ := tuple \"oslo_db\" \"internal\" \"aodh\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | set .Values.conf.aodh.database \"connection\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.aodh.DEFAULT.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"aodh\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.aodh.DEFAULT \"transport_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.aodh.service_credentials.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.aodh.service_credentials \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.aodh.service_credentials.region_name -}}\n{{- $_ := set .Values.conf.aodh.service_credentials \"region_name\" .Values.endpoints.identity.auth.aodh.region_name -}}\n{{- end -}}\n\n{{- if empty .Values.conf.aodh.service_credentials.project_name -}}\n{{- $_ := set .Values.conf.aodh.service_credentials \"project_name\" .Values.endpoints.identity.auth.aodh.project_name -}}\n{{- end -}}\n\n{{- if empty .Values.conf.aodh.service_credentials.project_domain_name -}}\n{{- $_ := set .Values.conf.aodh.service_credentials \"project_domain_name\" .Values.endpoints.identity.auth.aodh.project_domain_name -}}\n{{- end -}}\n\n{{- if empty .Values.conf.aodh.service_credentials.user_domain_name -}}\n{{- $_ := set .Values.conf.aodh.service_credentials \"user_domain_name\" .Values.endpoints.identity.auth.aodh.user_domain_name -}}\n{{- end -}}\n\n{{- if empty .Values.conf.aodh.service_credentials.username -}}\n{{- $_ := set .Values.conf.aodh.service_credentials \"username\" .Values.endpoints.identity.auth.aodh.username -}}\n{{- end -}}\n\n{{- if empty .Values.conf.aodh.service_credentials.password -}}\n{{- $_ := set .Values.conf.aodh.service_credentials \"password\" .Values.endpoints.identity.auth.aodh.password -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.handler_fluent) (has \"fluent\" .Values.conf.logging.handlers.keys) -}}\n{{- $fluentd_host := tuple \"fluentd\" \"internal\" $envAll | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\n{{- $fluentd_port := tuple \"fluentd\" \"internal\" \"service\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $fluent_args := printf \"('%s.%s', '%s', %s)\" .Release.Namespace .Release.Name $fluentd_host $fluentd_port }}\n{{- $handler_fluent := dict \"class\" \"fluent.handler.FluentHandler\" \"formatter\" \"fluent\" \"args\" $fluent_args -}}\n{{- $_ := set .Values.conf.logging \"handler_fluent\" $handler_fluent -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.formatter_fluent) (has \"fluent\" .Values.conf.logging.formatters.keys) -}}\n{{- $formatter_fluent := dict \"class\" \"oslo_log.formatters.FluentFormatter\" -}}\n{{- $_ := set .Values.conf.logging \"formatter_fluent\" $formatter_fluent -}}\n{{- end -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: aodh-etc\ntype: Opaque\ndata:\n  aodh.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.aodh | b64enc }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.logging | b64enc }}\n  api-paste.ini: {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.paste | b64enc }}\n  policy.yaml: {{ toYaml .Values.conf.policy | b64enc }}\n{{ include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.wsgi_aodh \"key\" \"wsgi-aodh.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/cron-job-alarms-cleaner.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.cron_job_alarms_cleaner }}\n{{- $envAll := . }}\n\n{{- $mounts_aodh_alarms_cleaner := .Values.pod.mounts.aodh_alarms_cleaner.aodh_alarms_cleaner }}\n{{- $mounts_aodh_alarms_cleaner_init := .Values.pod.mounts.aodh_alarms_cleaner.init_container }}\n\n{{- $serviceAccountName := \"aodh-alarms-cleaner\" }}\n{{ tuple $envAll \"alarms_cleaner\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.aodh_alarms_cleaner }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: aodh-alarms-cleaner\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  schedule: {{ .Values.jobs.alarms_cleaner.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.alarms_cleaner.history.success }}\n  failedJobsHistoryLimit: {{ .Values.jobs.alarms_cleaner.history.failed }}\n  concurrencyPolicy: Forbid\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"aodh\" \"alarms-cleaner\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"aodh\" \"alarms-cleaner\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n        spec:\n          serviceAccountName: {{ $serviceAccountName }}\n          restartPolicy: OnFailure\n          nodeSelector:\n            {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.aodh.enabled }}\n{{ tuple $envAll \"aodh\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 10 }}\n{{ end }}\n          initContainers:\n{{ tuple $envAll \"alarms_cleaner\" $mounts_aodh_alarms_cleaner_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 12 }}\n          containers:\n            - name: aodh-alarms-cleaner\n{{ tuple $envAll \"aodh_alarms_cleaner\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.alarms_cleaner | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n              command:\n                - /tmp/aodh-alarms-cleaner.sh\n              volumeMounts:\n                - name: pod-tmp\n                  mountPath: /tmp\n                - name: aodh-bin\n                  mountPath: /tmp/aodh-alarms-cleaner.sh\n                  subPath: aodh-alarms-cleaner.sh\n                  readOnly: true\n                - name: pod-etc-aodh\n                  mountPath: /etc/aodh\n                - name: aodh-etc\n                  mountPath: /etc/aodh/aodh.conf\n                  subPath: aodh.conf\n                  readOnly: true\n                - name: aodh-etc-snippets\n                  mountPath: /etc/aodh/aodh.conf.d/\n                  readOnly: true\n                {{- if .Values.conf.aodh.DEFAULT.log_config_append }}\n                - name: aodh-etc\n                  mountPath: {{ .Values.conf.aodh.DEFAULT.log_config_append }}\n                  subPath: {{ base .Values.conf.aodh.DEFAULT.log_config_append }}\n                  readOnly: true\n                {{- end }}\n{{ if $mounts_aodh_alarms_cleaner.volumeMounts }}{{ toYaml $mounts_aodh_alarms_cleaner.volumeMounts | indent 16 }}{{ end }}\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: pod-etc-aodh\n              emptyDir: {}\n            - name: aodh-etc\n              secret:\n                secretName: aodh-etc\n                defaultMode: 0444\n            - name: aodh-et-etc-snippets\n{{- if $etcSources }}\n              projected:\n                sources:\n{{ toYaml $etcSources | indent 18 }}\n{{- else }}\n              emptyDir: {}\n{{ end }}\n            - name: aodh-bin\n              configMap:\n                name: aodh-bin\n                defaultMode: 0555\n{{ if $mounts_aodh_alarms_cleaner.volumes }}{{ toYaml $mounts_aodh_alarms_cleaner.volumes | indent 12 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/deployment-api.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_api }}\n{{- $envAll := . }}\n\n{{- $mounts_aodh_api := .Values.pod.mounts.aodh_api.aodh_api }}\n{{- $mounts_aodh_api_init := .Values.pod.mounts.aodh_api.init_container }}\n\n{{- $serviceAccountName := \"aodh-api\" }}\n{{ tuple $envAll \"api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.aodh_api }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: aodh-api\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"aodh\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"aodh\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"aodh\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"aodh_api\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"aodh\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"aodh\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.api.node_selector_key }}: {{ .Values.labels.api.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.aodh.enabled }}\n{{ tuple $envAll \"aodh\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.api.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"api\" $mounts_aodh_api_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: aodh-api\n{{ tuple $envAll \"aodh_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"aodh\" \"container\" \"aodh_api\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/aodh-api.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/aodh-api.sh\n                  - stop\n          ports:\n            - name: a-api\n              containerPort: {{ tuple \"alarming\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            tcpSocket:\n              port: {{ tuple \"alarming\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.aodh.oslo_concurrency.lock_path }}\n            - name: wsgi-aodh\n              mountPath: /var/www/cgi-bin/aodh\n            - name: pod-etc-aodh\n              mountPath: /etc/aodh\n            - name: aodh-etc\n              mountPath: /etc/aodh/aodh.conf\n              subPath: aodh.conf\n              readOnly: true\n            - name: aodh-etc-snippets\n              mountPath: /etc/aodh/aodh.conf.d/\n              readOnly: true\n            {{- if .Values.conf.aodh.DEFAULT.log_config_append }}\n            - name: aodh-etc\n              mountPath: {{ .Values.conf.aodh.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.aodh.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: aodh-etc\n              mountPath: /etc/aodh/api_paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: aodh-etc\n              mountPath: /etc/aodh/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: aodh-etc\n              mountPath: /etc/apache2/conf-enabled/wsgi-aodh.conf\n              subPath: wsgi-aodh.conf\n              readOnly: true\n            - name: aodh-bin\n              mountPath: /tmp/aodh-api.sh\n              subPath: aodh-api.sh\n              readOnly: true\n{{ if $mounts_aodh_api.volumeMounts }}{{ toYaml $mounts_aodh_api.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: wsgi-aodh\n          emptyDir: {}\n        - name: pod-etc-aodh\n          emptyDir: {}\n        - name: aodh-etc\n          secret:\n            secretName: aodh-etc\n            defaultMode: 0444\n        - name: aodh-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: aodh-bin\n          configMap:\n            name: aodh-bin\n            defaultMode: 0555\n{{ if $mounts_aodh_api.volumes }}{{ toYaml $mounts_aodh_api.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/deployment-evaluator.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_evaluator }}\n{{- $envAll := . }}\n\n{{- $mounts_aodh_evaluator := .Values.pod.mounts.aodh_evaluator.aodh_evaluator }}\n{{- $mounts_aodh_evaluator_init := .Values.pod.mounts.aodh_evaluator.init_container }}\n\n{{- $serviceAccountName := \"aodh-evaluator\" }}\n{{ tuple $envAll \"evaluator\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.aodh_evaluator }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: aodh-evaluator\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"aodh\" \"evaluator\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.evaluator }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"aodh\" \"evaluator\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"aodh\" \"evaluator\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"aodh_evaluator\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"aodh\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"aodh\" \"evaluator\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.evaluator.node_selector_key }}: {{ .Values.labels.evaluator.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.aodh.enabled }}\n{{ tuple $envAll \"aodh\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"evaluator\" $mounts_aodh_evaluator_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: aodh-evaluator\n{{ tuple $envAll \"aodh_evaluator\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.evaluator | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"aodh\" \"container\" \"aodh_evaluator\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/aodh-evaluator.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/aodh-evaluator.sh\n                  - stop\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.aodh.oslo_concurrency.lock_path }}\n            - name: pod-etc-aodh\n              mountPath: /etc/aodh\n            - name: aodh-etc\n              mountPath: /etc/aodh/aodh.conf\n              subPath: aodh.conf\n              readOnly: true\n            - name: aodh-etc-snippets\n              mountPath: /etc/aodh/aodh.conf.d/\n              readOnly: true\n            {{- if .Values.conf.aodh.DEFAULT.log_config_append }}\n            - name: aodh-etc\n              mountPath: {{ .Values.conf.aodh.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.aodh.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: aodh-etc\n              mountPath: /etc/aodh/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: aodh-bin\n              mountPath: /tmp/aodh-evaluator.sh\n              subPath: aodh-evaluator.sh\n              readOnly: true\n{{ if $mounts_aodh_evaluator.volumeMounts }}{{ toYaml $mounts_aodh_evaluator.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-aodh\n          emptyDir: {}\n        - name: aodh-etc\n          secret:\n            secretName: aodh-etc\n            defaultMode: 0444\n        - name: aodh-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: aodh-bin\n          configMap:\n            name: aodh-bin\n            defaultMode: 0555\n{{ if $mounts_aodh_evaluator.volumes }}{{ toYaml $mounts_aodh_evaluator.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/deployment-listener.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_listener }}\n{{- $envAll := . }}\n\n{{- $mounts_aodh_listener := .Values.pod.mounts.aodh_listener.aodh_listener }}\n{{- $mounts_aodh_listener_init := .Values.pod.mounts.aodh_listener.init_container }}\n\n{{- $serviceAccountName := \"aodh-listener\" }}\n{{ tuple $envAll \"listener\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.aodh_listener }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: aodh-listener\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"aodh\" \"listener\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.listener }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"aodh\" \"listener\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"aodh\" \"listener\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"aodh_listener\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"aodh\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"aodh\" \"listener\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.listener.node_selector_key }}: {{ .Values.labels.listener.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.aodh.enabled }}\n{{ tuple $envAll \"aodh\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"listener\" $mounts_aodh_listener_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: aodh-listener\n{{ tuple $envAll \"aodh_listener\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.listener | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"aodh\" \"container\" \"aodh_listener\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/aodh-listener.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/aodh-listener.sh\n                  - stop\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.aodh.oslo_concurrency.lock_path }}\n            - name: pod-etc-aodh\n              mountPath: /etc/aodh\n            - name: aodh-etc\n              mountPath: /etc/aodh/aodh.conf\n              subPath: aodh.conf\n              readOnly: true\n            - name: aodh-etc-snippets\n              mountPath: /etc/aodh/aodh.conf.d/\n              readOnly: true\n            {{- if .Values.conf.aodh.DEFAULT.log_config_append }}\n            - name: aodh-etc\n              mountPath: {{ .Values.conf.aodh.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.aodh.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: aodh-etc\n              mountPath: /etc/aodh/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: aodh-bin\n              mountPath: /tmp/aodh-listener.sh\n              subPath: aodh-listener.sh\n              readOnly: true\n{{ if $mounts_aodh_listener.volumeMounts }}{{ toYaml $mounts_aodh_listener.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-aodh\n          emptyDir: {}\n        - name: aodh-etc\n          secret:\n            secretName: aodh-etc\n            defaultMode: 0444\n        - name: aodh-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: aodh-bin\n          configMap:\n            name: aodh-bin\n            defaultMode: 0555\n{{ if $mounts_aodh_listener.volumes }}{{ toYaml $mounts_aodh_listener.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/deployment-notifier.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_notifier }}\n{{- $envAll := . }}\n\n{{- $mounts_aodh_notifier := .Values.pod.mounts.aodh_notifier.aodh_notifier }}\n{{- $mounts_aodh_notifier_init := .Values.pod.mounts.aodh_notifier.init_container }}\n\n{{- $serviceAccountName := \"aodh-notifier\" }}\n{{ tuple $envAll \"notifier\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.aodh_notifier }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: aodh-notifier\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"aodh\" \"notifier\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.notifier }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"aodh\" \"notifier\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"aodh\" \"notifier\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"aodh_notifier\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"aodh\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"aodh\" \"notifier\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.notifier.node_selector_key }}: {{ .Values.labels.notifier.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.aodh.enabled }}\n{{ tuple $envAll \"aodh\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"notifier\" $mounts_aodh_notifier_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: aodh-notifier\n{{ tuple $envAll \"aodh_notifier\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.notifier | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"aodh\" \"container\" \"aodh_notifier\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/aodh-notifier.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/aodh-notifier.sh\n                  - stop\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.aodh.oslo_concurrency.lock_path }}\n            - name: pod-etc-aodh\n              mountPath: /etc/aodh\n            - name: aodh-etc\n              mountPath: /etc/aodh/aodh.conf\n              subPath: aodh.conf\n              readOnly: true\n            - name: aodh-etc-snippets\n              mountPath: /etc/aodh/aodh.conf.d/\n              readOnly: true\n            {{- if .Values.conf.aodh.DEFAULT.log_config_append }}\n            - name: aodh-etc\n              mountPath: {{ .Values.conf.aodh.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.aodh.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: aodh-etc\n              mountPath: /etc/aodh/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: aodh-bin\n              mountPath: /tmp/aodh-notifier.sh\n              subPath: aodh-notifier.sh\n              readOnly: true\n{{ if $mounts_aodh_notifier.volumeMounts }}{{ toYaml $mounts_aodh_notifier.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-aodh\n          emptyDir: {}\n        - name: aodh-etc\n          secret:\n            secretName: aodh-etc\n            defaultMode: 0444\n        - name: aodh-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: aodh-bin\n          configMap:\n            name: aodh-bin\n            defaultMode: 0555\n{{ if $mounts_aodh_notifier.volumes }}{{ toYaml $mounts_aodh_notifier.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "aodh/templates/ingress-api.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_api .Values.network.api.ingress.public }}\n{{- $ingressOpts := dict \"envAll\" . \"backendServiceType\" \"alarming\" \"backendPort\" \"a-api\" -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/job-bootstrap.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $bootstrapJob := dict \"envAll\" . \"serviceName\" \"aodh\" \"keystoneUser\" .Values.bootstrap.ks_user -}}\n{{- if .Values.pod.tolerations.aodh.enabled -}}\n{{- $_ := set $bootstrapJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $bootstrapJob | include \"helm-toolkit.manifests.job_bootstrap\" }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/job-db-drop.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" \"aodh\" -}}\n{{- if .Values.pod.tolerations.aodh.enabled -}}\n{{- $_ := set $dbDropJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/job-db-init.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"aodh\" -}}\n{{- if .Values.pod.tolerations.aodh.enabled -}}\n{{- $_ := set $dbInitJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/job-db-sync.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"aodh\" -}}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"aodh\" \"podVolMounts\" .Values.pod.mounts.aodh_db_sync.aodh_db_sync.volumeMounts \"podVols\" .Values.pod.mounts.aodh_db_sync.aodh_db_sync.volumes -}}\n{{- if .Values.pod.tolerations.aodh.enabled -}}\n{{- $_ := set $dbSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"aodh\" -}}\n{{- if .Values.pod.tolerations.aodh.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"aodh\" \"serviceTypes\" ( tuple \"alarming\" ) -}}\n{{- if .Values.pod.tolerations.aodh.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/job-ks-service.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"aodh\" \"serviceTypes\" ( tuple \"alarming\" ) -}}\n{{- if .Values.pod.tolerations.aodh.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/job-ks-user.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"aodh\" -}}\n{{- if .Values.pod.tolerations.aodh.enabled -}}\n{{- $_ := set $ksUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/job-rabbit-init.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rmqUserJob := dict \"envAll\" . \"serviceName\" \"aodh\" -}}\n{{- if .Values.pod.tolerations.aodh.enabled -}}\n{{- $_ := set $rmqUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $rmqUserJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/network_policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.network_policy -}}\n{{- $opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"aodh\" -}}\n{{ $opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "aodh/templates/pdb-api.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_api }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: aodh-api\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.api.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"aodh\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/pod-aodh-test.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pod_aodh_test }}\n{{- $envAll := . }}\n\n{{- $mounts_tests := .Values.pod.mounts.aodh_tests.aodh_tests }}\n{{- $mounts_tests_init := .Values.pod.mounts.aodh_tests.init_container }}\n\n{{- $serviceAccountName := print $envAll.Release.Name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: {{ print $envAll.Release.Name \"-test\" }}\n  labels:\n{{ tuple $envAll \"aodh\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  restartPolicy: Never\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.aodh.enabled }}\n{{ tuple $envAll \"aodh\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 2 }}\n{{ end }}\n  serviceAccountName: {{ $serviceAccountName }}\n  initContainers:\n{{ tuple $envAll \"tests\" $mounts_tests_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n  containers:\n    - name: {{ .Release.Name }}-test\n{{ tuple $envAll \"aodh_api\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n      command:\n        - /tmp/aodh-test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: aodh-etc\n          mountPath: /etc/aodh/aodh.conf\n          subPath: aodh.conf\n          readOnly: true\n        - name: aodh-bin\n          mountPath: /tmp/aodh-test.sh\n          subPath: aodh-test.sh\n          readOnly: true\n{{ if $mounts_tests.volumeMounts }}{{ toYaml $mounts_tests.volumeMounts | indent 8 }}{{ end }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: aodh-etc\n      secret:\n        secretName: aodh-etc\n        defaultMode: 0444\n    - name: aodh-bin\n      configMap:\n        name: aodh-bin\n        defaultMode: 0555\n{{ if $mounts_tests.volumes }}{{ toYaml $mounts_tests.volumes | indent 4 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/secret-db.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"aodh\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  DB_CONNECTION: {{ tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{- include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"alarming\" ) }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/secret-keystone.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"aodh\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"aodh\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_messaging\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass \"http\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/service-api.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_api }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"alarming\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: a-api\n      port: {{ tuple \"alarming\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n      {{ if .Values.network.api.node_port.enabled }}\n      nodePort: {{ .Values.network.api.node_port.port }}\n      {{ end }}\n  selector:\n{{ tuple $envAll \"aodh\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.api.node_port.enabled }}\n  type: NodePort\n  {{ if .Values.network.api.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{ end }}\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "aodh/templates/service-ingress-api.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_api .Values.network.api.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"alarming\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "aodh/values.yaml",
    "content": "# Copyright 2019 Wind River Systems, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for aodh.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nrelease_group: null\n\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  evaluator:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  listener:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  notifier:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\n\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    aodh_db_sync: quay.io/airshipit/aodh:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    aodh_api: quay.io/airshipit/aodh:2025.1-ubuntu_noble\n    aodh_evaluator: quay.io/airshipit/aodh:2025.1-ubuntu_noble\n    aodh_listener: quay.io/airshipit/aodh:2025.1-ubuntu_noble\n    aodh_notifier: quay.io/airshipit/aodh:2025.1-ubuntu_noble\n    aodh_alarms_cleaner: quay.io/airshipit/aodh:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_noble\n    image_repo_sync: docker.io/docker:17.07.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\njobs:\n  alarms_cleaner:\n    # daily\n    cron: \"0 */24 * * *\"\n    history:\n      success: 3\n      failed: 1\n\npod:\n  security_context:\n    aodh:\n      pod:\n        runAsUser: 42402\n      container:\n        aodh_api:\n          runAsUser: 0\n        aodh_evaluator:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        aodh_notifier:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        aodh_listener:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    aodh:\n      enabled: false\n      tolerations:\n        - key: node-role.kubernetes.io/master\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/control-plane\n          operator: Exists\n          effect: NoSchedule\n  mounts:\n    aodh_api:\n      init_container: null\n      aodh_api:\n        volumeMounts:\n        volumes:\n    aodh_evaluator:\n      init_container: null\n      aodh_evaluator:\n        volumeMounts:\n        volumes:\n    aodh_listener:\n      init_container: null\n      aodh_listener:\n        volumeMounts:\n        volumes:\n    aodh_notifier:\n      init_container: null\n      aodh_notifier:\n        volumeMounts:\n        volumes:\n    aodh_alarms_cleaner:\n      init_container: null\n      aodh_alarms_cleaner:\n        volumeMounts:\n        volumes:\n    aodh_bootstrap:\n      init_container: null\n      aodh_bootstrap:\n        volumeMounts:\n        volumes:\n    aodh_tests:\n      init_container: null\n      aodh_tests:\n        volumeMounts:\n        volumes:\n    aodh_db_sync:\n      aodh_db_sync:\n        volumeMounts:\n        volumes:\n  # -- This allows users to add Kubernetes Projected Volumes to be mounted at /etc/aodh/aodh.conf.d/\n  ## This is a list of projected volume source objects for each deployment/statefulset/daemonset/cronjob\n  ## https://kubernetes.io/docs/concepts/storage/projected-volumes/\n  etcSources:\n    aodh_api: []\n    aodh_evaluator: []\n    aodh_listener: []\n    aodh_notifier: []\n    aodh_alarms_cleaner: []\n    aodh_db_sync: []\n  replicas:\n    api: 1\n    evaluator: 1\n    listener: 1\n    notifier: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    disruption_budget:\n      api:\n        min_available: 0\n    termination_grace_period:\n      api:\n        timeout: 30\n  resources:\n    enabled: false\n    api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    evaluator:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    listener:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    notifier:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rabbit_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      alarms_cleaner:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 8042\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - aodh-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    api:\n      jobs:\n        - aodh-db-sync\n        - aodh-ks-user\n        - aodh-ks-endpoints\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n    evaluator:\n      jobs:\n        - aodh-db-sync\n        - aodh-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: alarming\n    listener:\n      jobs:\n        - aodh-db-sync\n        - aodh-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: alarming\n    notifier:\n      jobs:\n        - aodh-db-sync\n        - aodh-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: alarming\n    rabbit_init:\n      services:\n        - service: oslo_messaging\n          endpoint: internal\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - aodh-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_drop:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    ks_endpoints:\n      jobs:\n        - aodh-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    tests:\n      jobs:\n        - aodh-db-sync\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: alarming\n\nconf:\n  wsgi_aodh: |\n    Listen 0.0.0.0:{{ tuple \"alarming\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n\n    LogFormat \"%h %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" combined\n    LogFormat \"%{X-Forwarded-For}i %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" proxy\n\n    SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n    CustomLog /dev/stdout combined env=!forwarded\n    CustomLog /dev/stdout proxy env=forwarded\n\n    <VirtualHost *:{{ tuple \"alarming\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}>\n        WSGIDaemonProcess aodh processes=2 threads=1 user=aodh group=aodh display-name=%{GROUP}\n        WSGIProcessGroup aodh\n        WSGIScriptAlias / /var/www/cgi-bin/aodh/aodh-api\n        WSGIApplicationGroup %{GLOBAL}\n        ErrorLogFormat \"%{cu}t %M\"\n        ErrorLog /dev/stdout\n        SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n        CustomLog /dev/stdout combined env=!forwarded\n        CustomLog /dev/stdout proxy env=forwarded\n    </VirtualHost>\n  paste:\n    composite:aodh+noauth:\n      use: egg:Paste#urlmap\n      /: aodhversions_pipeline\n      /v2: aodhv2_noauth_pipeline\n      /healthcheck: healthcheck\n    composite:aodh+keystone:\n      use: egg:Paste#urlmap\n      /: aodhversions_pipeline\n      /v2: aodhv2_keystone_pipeline\n      /healthcheck: healthcheck\n    app:healthcheck:\n      use: egg:oslo.middleware#healthcheck\n      oslo_config_project: aodh\n    pipeline:aodhversions_pipeline:\n      pipeline: cors http_proxy_to_wsgi aodhversions\n    app:aodhversions:\n      paste.app_factory: aodh.api.app:app_factory\n      root: aodh.api.controllers.root.VersionsController\n    pipeline:aodhv2_keystone_pipeline:\n      pipeline: cors http_proxy_to_wsgi request_id authtoken aodhv2\n    pipeline:aodhv2_noauth_pipeline:\n      pipeline: cors http_proxy_to_wsgi request_id aodhv2\n    app:aodhv2:\n      paste.app_factory: aodh.api.app:app_factory\n      root: aodh.api.controllers.v2.root.V2Controller\n    filter:authtoken:\n      paste.filter_factory: keystonemiddleware.auth_token:filter_factory\n      oslo_config_project: aodh\n    filter:request_id:\n      paste.filter_factory: oslo_middleware:RequestId.factory\n    filter:cors:\n      paste.filter_factory: oslo_middleware.cors:filter_factory\n      oslo_config_project: aodh\n    filter:http_proxy_to_wsgi:\n      paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory\n      oslo_config_project: aodh\n  policy: {}\n  aodh:\n    DEFAULT:\n      debug: false\n      log_config_append: /etc/aodh/logging.conf\n    oslo_middleware:\n      enable_proxy_headers_parsing: true\n    oslo_policy:\n      policy_file: /etc/aodh/policy.yaml\n    oslo_concurrency:\n      lock_path: /var/lock\n    database:\n      alarm_history_time_to_live: 86400\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    keystone_authtoken:\n      auth_version: v3\n      auth_type: password\n      memcache_security_strategy: ENCRYPT\n      service_type: alarming\n    service_credentials:\n      auth_type: password\n      interface: internal\n      auth_version: v3\n  logging:\n    loggers:\n      keys:\n        - root\n        - aodh\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: WARNING\n      handlers: 'null'\n    logger_aodh:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: aodh\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n    formatter_default:\n      format: \"%(message)s\"\n\nsecrets:\n  identity:\n    admin: aodh-keystone-admin\n    aodh: aodh-keystone-user\n  oslo_db:\n    admin: aodh-db-admin\n    aodh: aodh-db-user\n  oslo_messaging:\n    admin: aodh-rabbitmq-admin\n    aodh: aodh-rabbitmq-user\n  tls:\n    alarming:\n      api:\n        public: aodh-tls-public\n  oci_image_registry:\n    aodh: aodh-oci-image-registry\n\nbootstrap:\n  enabled: false\n  ks_user: aodh\n  script: |\n    openstack token issue\n\n# typically overriden by environmental\n# values, but should include all endpoints\n# required by this chart\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      aodh:\n        username: aodh\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      aodh:\n        role: admin\n        region_name: RegionOne\n        username: aodh\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 80\n        internal: 5000\n  alarming:\n    name: aodh\n    hosts:\n      default: aodh-api\n      public: aodh\n    host_fqdn_override:\n      default: null\n      # NOTE: this chart supports TLS for fqdn over-ridden public\n      # endpoints using the following format:\n      # public:\n      #   host: null\n      #   tls:\n      #     crt: null\n      #     key: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 8042\n        public: 80\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n      aodh:\n        username: aodh\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /aodh\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_cache:\n    auth:\n      # NOTE: this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  oslo_messaging:\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n      aodh:\n        username: aodh\n        password: password\n    statefulset:\n      replicas: 2\n      name: rabbitmq-rabbitmq\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /aodh\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n  fluentd:\n    namespace: null\n    name: fluentd\n    hosts:\n      default: fluentd-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: 'http'\n    port:\n      service:\n        default: 24224\n      metrics:\n        default: 24220\n\nnetwork_policy:\n  aodh:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  cron_job_alarms_cleaner: true\n  deployment_api: true\n  deployment_evaluator: true\n  deployment_listener: true\n  deployment_notifier: true\n  ingress_api: true\n  job_bootstrap: true\n  job_db_drop: false\n  job_db_init: true\n  job_image_repo_sync: true\n  job_rabbit_init: true\n  job_db_sync: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  network_policy: false\n  pdb_api: true\n  pod_aodh_test: true\n  secret_db: true\n  secret_ingress_tls: true\n  secret_keystone: true\n  secret_rabbitmq: true\n  secret_registry: true\n  service_api: true\n  service_ingress_api: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "barbican/.helmignore",
    "content": "values_overrides\n"
  },
  {
    "path": "barbican/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Barbican\nname: barbican\nversion: 2025.2.0\nhome: https://docs.openstack.org/barbican/latest/\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Barbican/OpenStack_Project_Barbican_vertical.png\nsources:\n  - https://opendev.org/openstack/barbican\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "barbican/templates/bin/_barbican-test.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nopenstack secret list\n\n# Come up with a random payload\nPAYLOAD=`cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1`\necho $PAYLOAD\n\nSECRET=`openstack secret store --name mysecret --payload ${PAYLOAD} | awk ' /href/ {print $5}'`\n\nopenstack secret list\n\nopenstack secret get $SECRET\n\nopenstack secret get --payload $SECRET\n\nopenstack secret delete $SECRET\n\nopenstack secret list\n"
  },
  {
    "path": "barbican/templates/bin/_barbican.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec uwsgi --ini /etc/barbican/barbican-api-uwsgi.ini\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "barbican/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "barbican/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nbarbican-db-manage upgrade\n\n{{- $kek := (index (index .Values.conf.barbican \"simple_crypto_plugin\" | default dict) \"kek\") | default \"\" }}\n{{- $old_kek := index .Values.conf.simple_crypto_kek_rewrap \"old_kek\" | default \"\"}}\n{{- if and (not (empty $old_kek)) (not (empty $kek)) }}\nset +x\necho \"Ensuring that project KEKs are wrapped with the target global KEK\"\n/tmp/simple_crypto_kek_rewrap.py --old-keks=\"$(cat /tmp/old_keks)\"\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/bin/_simple_crypto_kek_rewrap.py.tpl",
    "content": "#!/usr/bin/env python\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#  Unless required by applicable law or agreed to in writing, software\n#  distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#  License for the specific language governing permissions and limitations\n#  under the License.\n\nimport argparse\nimport base64\nimport sys\n\nfrom cryptography import fernet\nfrom oslo_db.sqlalchemy import session\nfrom sqlalchemy import orm\nfrom sqlalchemy.orm import scoping\n\nfrom barbican.common import utils\nfrom barbican.model import models\nfrom barbican.plugin.crypto import simple_crypto\n\n# Use config values from simple_crypto\nCONF = simple_crypto.CONF\n\n\nclass KekRewrap(object):\n\n    def __init__(self, conf, old_keks):\n        self.dry_run = False\n        self.db_engine = session.create_engine(conf.database.connection or conf.sql_connection)\n        self._session_creator = scoping.scoped_session(\n            orm.sessionmaker(\n                bind=self.db_engine\n            )\n        )\n        self.crypto_plugin = simple_crypto.SimpleCryptoPlugin(conf)\n        self.plugin_name = utils.generate_fullname_for(self.crypto_plugin)\n\n        if hasattr(self.crypto_plugin, 'master_kek'):\n            self.encryptor = fernet.Fernet(self.crypto_plugin.master_kek)\n        else:\n            self.encryptor = fernet.MultiFernet(\n                [fernet.Fernet(x) for x in self.crypto_plugin.master_keys]\n            )\n        self.decryptor = fernet.MultiFernet(\n            [fernet.Fernet(x.encode('utf-8')) for x in old_keks]\n        )\n\n    def rewrap_kek(self, project, kek):\n        db_begin_fn = self.db_session.begin_nested if (\n            self.db_session.in_transaction()) else self.db_session.begin\n        with db_begin_fn():\n\n            plugin_meta = kek.plugin_meta\n\n            # try to unwrap with the target kek, and if successful, skip\n            try:\n                if self.encryptor.decrypt(plugin_meta.encode('utf-8')):\n                    print('Project KEK {} is already wrapped with target KEK, skipping'.format(kek.id))\n                    return\n            except fernet.InvalidToken:\n                pass\n\n            # decrypt with the old kek\n            print('Unwrapping Project KEK {}'.format(kek.id))\n            try:\n                decrypted_plugin_meta = self.decryptor.decrypt(plugin_meta.encode('utf-8'))\n            except fernet.InvalidToken:\n                print('Failed to unwrap Project KEK {}'.format(kek.id))\n                raise\n\n            # encrypt with the new kek\n            print('Rewrapping Project KEK {}'.format(kek.id))\n            try:\n                new_plugin_meta = self.encryptor.encrypt(decrypted_plugin_meta).decode('utf-8')\n            except fernet.InvalidToken:\n                print('Failed to wrap Project KEK {}'.format(kek.id))\n                raise\n\n            if self.dry_run:\n                return\n\n            # Update KEK metadata in DB\n            print('Storing updated Project KEK {}'.format(kek.id))\n            kek.plugin_meta = new_plugin_meta\n\n    def get_keks_for_project(self, project):\n        keks = []\n        db_begin_fn = self.db_session.begin_nested if (\n            self.db_session.in_transaction()) else self.db_session.begin\n        with db_begin_fn() as transaction:\n            print('Retrieving KEKs for Project {}'.format(project.external_id))\n            query = transaction.session.query(models.KEKDatum)\n            query = query.filter_by(project_id=project.id)\n            query = query.filter_by(plugin_name=self.plugin_name)\n\n            keks = query.all()\n\n        return keks\n\n    def get_projects(self):\n        print('Retrieving all available projects')\n\n        projects = []\n        db_begin_fn = self.db_session.begin_nested if (\n            self.db_session.in_transaction()) else self.db_session.begin\n        with db_begin_fn() as transaction:\n            projects = transaction.session.query(models.Project).all()\n\n        return projects\n\n    @property\n    def db_session(self):\n        return self._session_creator()\n\n    def execute(self, dry_run=True):\n        self.dry_run = dry_run\n        if self.dry_run:\n            print('-- Running in dry-run mode --')\n\n        projects = self.get_projects()\n        successes = []\n        failures = []\n\n        for project in projects:\n            keks = self.get_keks_for_project(project)\n            for kek in keks:\n                try:\n                    self.rewrap_kek(project, kek)\n                    successes.append(kek.id)\n                except Exception:\n                    failures.append(kek.id)\n\n        if successes:\n            print('Sucessfully processed the following KEKs:')\n            print('\\n'.join(successes))\n\n        if failures:\n            print('Failed to rewrap the following KEKs:')\n            print('\\n'.join(failures))\n            sys.exit(1)\n\n\ndef main():\n    script_desc = 'Utility to re-wrap Project KEKs after rotating the global KEK.'\n\n    parser = argparse.ArgumentParser(description=script_desc)\n    parser.add_argument(\n        '--dry-run',\n        action='store_true',\n        help='Displays changes that will be made (Non-destructive)'\n    )\n    parser.add_argument(\n        '--old-keks',\n        default=\"dGhpcnR5X3R3b19ieXRlX2tleWJsYWhibGFoYmxhaGg=\",\n        help='Old key encryption keys previously used by Simple Crypto Plugin. '\n             'A comma separated string of list contain keys '\n             '( with formate 32 bytes and base64-encoded ). '\n             'First key in list is used for ecnrypting new data. '\n             'Additional keys used for decrypting existing data.'\n    )\n    args = parser.parse_args()\n\n    rewrapper = KekRewrap(CONF, args.old_keks.split(\",\"))\n    rewrapper.execute(args.dry_run)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "barbican/templates/certificates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.certificates -}}\n{{  dict \"envAll\" . \"service\" \"key_manager\" \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end -}}\n"
  },
  {
    "path": "barbican/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: barbican-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  barbican-test.sh: |\n{{ tuple \"bin/_barbican-test.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  barbican.sh: |\n{{ tuple \"bin/_barbican.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" . | indent 4 }}\n  simple_crypto_kek_rewrap.py: |\n{{ tuple \"bin/_simple_crypto_kek_rewrap.py.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n{{- if empty .Values.conf.barbican.keystone_authtoken.auth_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.barbican.keystone_authtoken \"auth_uri\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.barbican.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.barbican.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.barbican.keystone_authtoken.region_name -}}\n{{- $_ := set .Values.conf.barbican.keystone_authtoken \"region_name\" .Values.endpoints.identity.auth.barbican.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.barbican.keystone_authtoken.project_name -}}\n{{- $_ := set .Values.conf.barbican.keystone_authtoken \"project_name\" .Values.endpoints.identity.auth.barbican.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.barbican.keystone_authtoken.project_domain_name -}}\n{{- $_ := set .Values.conf.barbican.keystone_authtoken \"project_domain_name\" .Values.endpoints.identity.auth.barbican.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.barbican.keystone_authtoken.user_domain_name -}}\n{{- $_ := set .Values.conf.barbican.keystone_authtoken \"user_domain_name\" .Values.endpoints.identity.auth.barbican.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.barbican.keystone_authtoken.username -}}\n{{- $_ := set .Values.conf.barbican.keystone_authtoken \"username\" .Values.endpoints.identity.auth.barbican.username -}}\n{{- end -}}\n{{- if empty .Values.conf.barbican.keystone_authtoken.password -}}\n{{- $_ := set .Values.conf.barbican.keystone_authtoken \"password\" .Values.endpoints.identity.auth.barbican.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.barbican.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.barbican.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n{{- if empty .Values.conf.barbican.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.barbican.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.barbican.database.connection)) (empty .Values.conf.barbican.database.connection) -}}\n{{- $connection := tuple \"oslo_db\" \"internal\" \"barbican\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | set .Values.conf.barbican.database \"connection\" -}}\n{{- else -}}\n{{- $_ := set .Values.conf.barbican.database \"connection\" $connection -}}\n{{- end -}}\n{{- end -}}\n\n{{- if empty .Values.conf.barbican.DEFAULT.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"barbican\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.barbican.DEFAULT \"transport_url\" -}}\n{{- end -}}\n\n{{- $barbicanPath := index .Values \"endpoints\" \"key_manager\" \"path\" \"default\" }}\n{{- if empty .Values.conf.barbican.DEFAULT.host_href -}}\n{{- $_ := tuple \"key_manager\" \"public\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | trimSuffix $barbicanPath | set .Values.conf.barbican.DEFAULT \"host_href\" -}}\n{{- end -}}\n\n{{- if empty (index .Values.conf.barbican_api_uwsgi.uwsgi \"http-socket\") -}}\n{{- $http_socket_port := tuple \"key_manager\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | toString }}\n{{- $http_socket := printf \"0.0.0.0:%s\" $http_socket_port }}\n{{- $_ := set .Values.conf.barbican_api_uwsgi.uwsgi \"http-socket\" $http_socket -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.handler_fluent) (has \"fluent\" .Values.conf.logging.handlers.keys) -}}\n{{- $fluentd_host := tuple \"fluentd\" \"internal\" $envAll | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\n{{- $fluentd_port := tuple \"fluentd\" \"internal\" \"service\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $fluent_args := printf \"('%s.%s', '%s', %s)\" .Release.Namespace .Release.Name $fluentd_host $fluentd_port }}\n{{- $handler_fluent := dict \"class\" \"fluent.handler.FluentHandler\" \"formatter\" \"fluent\" \"args\" $fluent_args -}}\n{{- $_ := set .Values.conf.logging \"handler_fluent\" $handler_fluent -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.formatter_fluent) (has \"fluent\" .Values.conf.logging.formatters.keys) -}}\n{{- $formatter_fluent := dict \"class\" \"oslo_log.formatters.FluentFormatter\" -}}\n{{- $_ := set .Values.conf.logging \"formatter_fluent\" $formatter_fluent -}}\n{{- end -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: barbican-etc\ntype: Opaque\ndata:\n  barbican.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.barbican | b64enc }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.logging | b64enc }}\n  barbican-api-paste.ini: {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.paste | b64enc }}\n  api_audit_map.conf: {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.audit_map | b64enc }}\n  policy.yaml: {{ toYaml .Values.conf.policy | b64enc }}\n  barbican-api-uwsgi.ini: {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.barbican_api_uwsgi | b64enc }}\n  old_keks: {{ index .Values.conf.simple_crypto_kek_rewrap \"old_kek\" | default \"\" | b64enc | quote }}\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/deployment-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"probeTemplate\" }}\n{{- $health_path := tuple \"key_manager\" \"healthcheck\" \"internal\" . | include \"helm-toolkit.endpoints.keystone_endpoint_path_lookup\" }}\nhttpGet:\n  scheme: {{ tuple \"key_manager\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n  path: {{ $health_path }}\n  port: {{ tuple \"key_manager\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- if .Values.manifests.deployment_api }}\n{{- $envAll := . }}\n\n{{- $mounts_barbican_api := .Values.pod.mounts.barbican_api.barbican_api }}\n{{- $mounts_barbican_api_init := .Values.pod.mounts.barbican_api.init_container }}\n{{- $etcSources := .Values.pod.etcSources.barbican_api }}\n\n{{- $serviceAccountName := \"barbican-api\" }}\n{{ tuple $envAll \"api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: barbican-api\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"barbican\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"barbican\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"barbican\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"barbican_api\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"barbican-api\" \"containerNames\" (list \"init\" \"barbican-api\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"barbican\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"barbican_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"barbican_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"barbican\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.api.node_selector_key }}: {{ .Values.labels.api.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.barbican.enabled }}\n{{ tuple $envAll \"barbican\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"api\" $mounts_barbican_api_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: barbican-api\n{{ tuple $envAll \"barbican_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"barbican\" \"container\" \"barbican_api\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"api\" \"container\" \"barbican-api\" \"type\" \"readiness\" \"probeTemplate\" (include \"probeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"api\" \"container\" \"barbican-api\" \"type\" \"liveness\" \"probeTemplate\" (include \"probeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          command:\n            - /tmp/barbican.sh\n            - start\n          env:\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/barbican/certs/ca.crt\"\n{{- end }}\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/barbican.sh\n                  - stop\n          ports:\n            - name: b-api\n              containerPort: {{ tuple \"key_manager\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.barbican.oslo_concurrency.lock_path }}\n            - name: etcbarbican\n              mountPath: /etc/barbican\n            - name: barbican-etc\n              mountPath: /etc/barbican/barbican-api-uwsgi.ini\n              subPath: barbican-api-uwsgi.ini\n              readOnly: true\n            - name: barbican-etc\n              mountPath: /etc/barbican/barbican.conf\n              subPath: barbican.conf\n              readOnly: true\n            - name: barbican-etc-snippets\n              mountPath: /etc/barbican/barbican.conf.d/\n              readOnly: true\n            {{- if .Values.conf.barbican.DEFAULT.log_config_append }}\n            - name: barbican-etc\n              mountPath: {{ .Values.conf.barbican.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.barbican.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: barbican-etc\n              mountPath: /etc/barbican/api_audit_map.conf\n              subPath: api_audit_map.conf\n              readOnly: true\n            - name: barbican-etc\n              mountPath: /etc/barbican/barbican-api-paste.ini\n              subPath: barbican-api-paste.ini\n              readOnly: true\n            - name: barbican-etc\n              mountPath: /etc/barbican/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: barbican-bin\n              mountPath: /tmp/barbican.sh\n              subPath: barbican.sh\n              readOnly: true\n{{- dict \"enabled\" .Values.tls.oslo_db \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" .Values.tls.identity \"name\" .Values.secrets.tls.key_manager.api.internal \"path\" \"/etc/barbican/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.tls.oslo_messaging \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n\n{{ if $mounts_barbican_api.volumeMounts }}{{ toYaml $mounts_barbican_api.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: etcbarbican\n          emptyDir: {}\n        - name: barbican-etc\n          secret:\n            secretName: barbican-etc\n            defaultMode: 0444\n        - name: barbican-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: barbican-bin\n          configMap:\n            name: barbican-bin\n            defaultMode: 0555\n{{- dict \"enabled\" .Values.tls.oslo_db \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" .Values.tls.identity \"name\" .Values.secrets.tls.key_manager.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.tls.oslo_messaging \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n\n{{ if $mounts_barbican_api.volumes }}{{ toYaml $mounts_barbican_api.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "barbican/templates/ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_api .Values.network.api.ingress.public }}\n{{- $ingressOpts := dict \"envAll\" . \"backendServiceType\" \"key_manager\" \"backendPort\" \"b-api\" -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.bootstrap\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"5\"\n{{- end }}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $bootstrapJob := dict \"envAll\" . \"serviceName\" \"barbican\" \"keystoneUser\" .Values.bootstrap.ks_user \"logConfigFile\" .Values.conf.barbican.DEFAULT.log_config_append \"jobAnnotations\" (include \"metadata.annotations.job.bootstrap\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.barbican.enabled -}}\n{{- $_ := set $bootstrapJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $bootstrapJob \"tlsSecret\" .Values.secrets.tls.key_manager.api.internal -}}\n{{- end -}}\n{{ $bootstrapJob | include \"helm-toolkit.manifests.job_bootstrap\" }}\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" \"barbican\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbDropJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- if .Values.pod.tolerations.barbican.enabled -}}\n{{- $_ := set $dbDropJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"barbican\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbInitJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbInitJob \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.barbican.enabled -}}\n{{- $_ := set $dbInitJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- $podVolMounts := .Values.pod.mounts.barbican_db_sync.barbican_db_sync.volumeMounts | default list }}\n{{- $podVolMounts = append $podVolMounts (dict \"name\" \"db-sync-sh\" \"mountPath\" \"/tmp/simple_crypto_kek_rewrap.py\" \"subPath\" \"simple_crypto_kek_rewrap.py\" \"readOnly\" true) }}\n{{- $podVolMounts = append $podVolMounts (dict \"name\" \"db-sync-conf\" \"mountPath\" \"/tmp/old_keks\" \"subPath\" \"old_keks\" \"readOnly\" true) }}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"barbican\" \"podVolMounts\" $podVolMounts \"podVols\" .Values.pod.mounts.barbican_db_sync.barbican_db_sync.volumes \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbSyncJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- if .Values.pod.tolerations.barbican.enabled -}}\n{{- $_ := set $dbSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.repo_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\n{{- end }}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"barbican\" \"jobAnnotations\" (include \"metadata.annotations.job.repo_sync\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.barbican.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_endpoints\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-2\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"barbican\" \"serviceTypes\" ( tuple \"key-manager\" ) \"jobAnnotations\" (include \"metadata.annotations.job.ks_endpoints\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.barbican.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.key_manager.api.internal -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_service\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"barbican\" \"serviceTypes\" ( tuple \"key-manager\" ) \"jobAnnotations\" (include \"metadata.annotations.job.ks_service\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.barbican.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.key_manager.api.internal -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"barbican\" \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.barbican.enabled -}}\n{{- $_ := set $ksUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksUserJob \"tlsSecret\" .Values.secrets.tls.key_manager.api.internal -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/job-rabbit-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.rabbit_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rmqUserJob := dict \"envAll\" . \"serviceName\" \"barbican\" \"jobAnnotations\" (include \"metadata.annotations.job.rabbit_init\" . | fromYaml) -}}\n{{- if and .Values.tls.oslo_messaging .Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal -}}\n{{- $_ := set $rmqUserJob \"tlsSecret\" .Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- if .Values.pod.tolerations.barbican.enabled -}}\n{{- $_ := set $rmqUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $rmqUserJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/network_policy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"barbican\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "barbican/templates/pdb-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_api }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: barbican-api\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.api.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"barbican\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/pod-test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pod_test }}\n{{- $envAll := . }}\n{{- $dependencies := .Values.dependencies.static.tests }}\n\n{{- $mounts_barbican_tests := .Values.pod.mounts.barbican_tests.barbican_tests }}\n{{- $mounts_barbican_tests_init := .Values.pod.mounts.barbican_tests.init_container }}\n\n{{- $serviceAccountName := print .Release.Name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: \"{{.Release.Name}}-test\"\n  labels:\n{{ tuple $envAll \"barbican\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"barbican-test\" \"containerNames\" (list \"init\" \"barbican-test\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n{{ tuple \"barbican_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 2 }}\n{{ tuple \"barbican_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 2 }}\n  serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 2 }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.barbican.enabled }}\n{{ tuple $envAll \"barbican\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 2 }}\n{{ end }}\n  restartPolicy: Never\n  initContainers:\n{{ tuple $envAll \"tests\" $mounts_barbican_tests_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n  containers:\n    - name: barbican-test\n{{ tuple $envAll \"scripted_test\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" \"container\" \"barbican_test\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n      command:\n        - /tmp/barbican-test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: barbican-bin\n          mountPath: /tmp/barbican-test.sh\n          subPath: barbican-test.sh\n          readOnly: true\n{{ if $mounts_barbican_tests.volumeMounts }}{{ toYaml $mounts_barbican_tests.volumeMounts | indent 8 }}{{ end }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: barbican-bin\n      configMap:\n        name: barbican-bin\n        defaultMode: 0555\n{{ if $mounts_barbican_tests.volumes }}{{ toYaml $mounts_barbican_tests.volumes | indent 4 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"barbican\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n{{- $connection := tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- if $envAll.Values.manifests.certificates }}\n  DB_CONNECTION: {{ (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | b64enc -}}\n{{- else }}\n  DB_CONNECTION: {{  $connection | b64enc -}}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{- include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"key_manager\" ) }}\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"barbican\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"barbican\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_messaging\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass \"http\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/service-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_api }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"key-manager\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: b-api\n    port: {{ tuple \"key-manager\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.api.node_port.enabled }}\n    nodePort: {{ .Values.network.api.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"barbican\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.api.node_port.enabled }}\n  type: NodePort\n  {{ if .Values.network.api.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{ end }}\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "barbican/templates/service-ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_api .Values.network.api.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"key-manager\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "barbican/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for barbican.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nrelease_group: null\n\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    scripted_test: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    barbican_db_sync: quay.io/airshipit/barbican:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    barbican_api: quay.io/airshipit/barbican:2025.1-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    image_repo_sync: docker.io/docker:17.07.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\npod:\n  security_context:\n    barbican:\n      pod:\n        runAsUser: 42424\n      container:\n        barbican_api:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    test:\n      pod:\n        runAsUser: 42424\n      container:\n        barbican_test:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    barbican:\n      enabled: false\n      tolerations:\n        - key: node-role.kubernetes.io/master\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/control-plane\n          operator: Exists\n          effect: NoSchedule\n  mounts:\n    barbican_api:\n      init_container: null\n      barbican_api:\n        volumeMounts:\n        volumes:\n    barbican_bootstrap:\n      init_container: null\n      barbican_bootstrap:\n        volumeMounts:\n        volumes:\n    barbican_tests:\n      init_container: null\n      barbican_tests:\n        volumeMounts:\n        volumes:\n    barbican_db_sync:\n      barbican_db_sync:\n        volumeMounts:\n        volumes:\n  # -- This allows users to add Kubernetes Projected Volumes to be mounted at /etc/barbican/barbican.conf.d/\n  ## This is a list of projected volume source objects for each deployment/statefulset/daemonset/cronjob\n  ## https://kubernetes.io/docs/concepts/storage/projected-volumes/\n  etcSources:\n    barbican_api: []\n    barbican_db_sync: []\n  replicas:\n    api: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    disruption_budget:\n      api:\n        min_available: 0\n  probes:\n    api:\n      barbican-api:\n        readiness:\n          enabled: true\n          params:\n            periodSeconds: 10\n            timeoutSeconds: 5\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 5\n            periodSeconds: 10\n            timeoutSeconds: 5\n  resources:\n    enabled: false\n    api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rabbit_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30486\n\nnetwork_policy:\n  barbican:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nbootstrap:\n  enabled: false\n  ks_user: barbican\n  script: |\n    openstack token issue\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - barbican-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    api:\n      jobs:\n        - barbican-db-sync\n        - barbican-ks-user\n        - barbican-ks-endpoints\n        - barbican-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_messaging\n    db_drop:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - barbican-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    ks_endpoints:\n      jobs:\n        - barbican-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    rabbit_init:\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n\nconf:\n  paste:\n    composite:main:\n      use: egg:Paste#urlmap\n      /: barbican_version\n      /v1: barbican-api-keystone\n      /healthcheck: healthcheck\n    pipeline:barbican_version:\n      pipeline: cors http_proxy_to_wsgi versionapp\n    pipeline:barbican_api:\n      pipeline: cors http_proxy_to_wsgi unauthenticated-context apiapp\n    pipeline:barbican-profile:\n      pipeline: cors http_proxy_to_wsgi unauthenticated-context egg:Paste#cgitb egg:Paste#httpexceptions profile apiapp\n    pipeline:barbican-api-keystone:\n      pipeline: cors http_proxy_to_wsgi authtoken context apiapp\n    pipeline:barbican-api-keystone-audit:\n      pipeline: http_proxy_to_wsgi authtoken context audit apiapp\n    app:apiapp:\n      paste.app_factory: barbican.api.app:create_main_app\n    app:versionapp:\n      paste.app_factory: barbican.api.app:create_version_app\n    app:healthcheck:\n      paste.app_factory: oslo_middleware:Healthcheck.app_factory\n      backends: disable_by_file\n      disable_by_file_path: /etc/barbican/healthcheck_disable\n    filter:simple:\n      paste.filter_factory: barbican.api.middleware.simple:SimpleFilter.factory\n    filter:unauthenticated-context:\n      paste.filter_factory: barbican.api.middleware.context:UnauthenticatedContextMiddleware.factory\n    filter:context:\n      paste.filter_factory: barbican.api.middleware.context:ContextMiddleware.factory\n    filter:audit:\n      paste.filter_factory: keystonemiddleware.audit:filter_factory\n      audit_map_file: /etc/barbican/api_audit_map.conf\n    filter:authtoken:\n      paste.filter_factory: keystonemiddleware.auth_token:filter_factory\n    filter:profile:\n      use: egg:repoze.profile\n      log_filename: myapp.profile\n      cachegrind_filename: cachegrind.out.myapp\n      discard_first_request: true\n      path: /__profile__\n      flush_at_shutdown: true\n      unwind: false\n    filter:cors:\n      paste.filter_factory: oslo_middleware.cors:filter_factory\n      oslo_config_project: barbican\n    filter:http_proxy_to_wsgi:\n      paste.filter_factory: oslo_middleware:HTTPProxyToWSGI.factory\n  policy: {}\n  audit_map:\n    DEFAULT:\n      # default target endpoint type\n      # should match the endpoint type defined in service catalog\n      target_endpoint_type: key-manager\n    custom_actions:\n      # map urls ending with specific text to a unique action\n      # Don't need custom mapping for other resource operations\n      # Note: action should match action names defined in CADF taxonomy\n      acl/get: read\n    path_keywords:\n      # path of api requests for CADF target typeURI\n      # Just need to include top resource path to identify class of resources\n      secrets: null\n      containers: null\n      orders: null\n      cas: \"None\"\n      quotas: null\n      project-quotas: null\n    service_endpoints:\n      # map endpoint type defined in service catalog to CADF typeURI\n      key-manager: service/security/keymanager\n  barbican_api_uwsgi:\n    uwsgi:\n      add-header: \"Connection: close\"\n      buffer-size: 65535\n      die-on-term: true\n      enable-threads: true\n      exit-on-reload: false\n      hook-master-start: unix_signal:15 gracefully_kill_them_all\n      lazy-apps: true\n      log-x-forwarded-for: true\n      master: true\n      procname-prefix-spaced: \"barbiacan-api:\"\n      route-user-agent: '^kube-probe.* donotlog:'\n      thunder-lock: true\n      worker-reload-mercy: 80\n      wsgi-file: /var/lib/openstack/bin/barbican-wsgi-api\n      processes: 1\n      stats: 0.0.0.0:1717\n      stats-http: true\n  barbican:\n    DEFAULT:\n      transport_url: null\n      log_config_append: /etc/barbican/logging.conf\n    keystone_authtoken:\n      auth_type: password\n      auth_version: v3\n      memcache_security_strategy: ENCRYPT\n      memcache_secret_key: null\n      service_type: key-manager\n    database:\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    barbican_api:\n      # NOTE(portdirect): the bind port should not be defined, and is manipulated\n      # via the endpoints section.\n      bind_port: null\n    oslo_policy:\n      policy_file: /etc/barbican/policy.yaml\n    oslo_concurrency:\n      lock_path: /var/lock\n    # When using the simple_crypto_plugin, a kek must be provided as:\n    #   .conf.barbican.simple_crypto_plugin.kek\n    # If no kek is provided, barbican will use a well-known default.\n    # If upgrading the chart with a new kek, the old kek must be provided in:\n    #   .conf.simple_crypto_plugin_rewrap.old_kek\n    # Please refer to the .conf.simple_crypto_key_rewrap section below.\n    # The barbican defaults are included here as a reference:\n    #   secretstore:\n    #     enabled_secretstore_plugins:\n    #       - store_crypto\n    #   crypto:\n    #     enabled_crypto_plugins:\n    #       - simple_crypto\n    #   simple_crypto_plugin:\n    #     # The kek should be a 32-byte value which is base64 encoded.\n    #     # First key is used for ecnrypting new data\n    #     kek: \"dGhpcnR5X3R3b19ieXRlX2tleWJsYWhibGFoYmxhaGg=\"\n    #     # Additional keys used for decrypting existing data\n    #     kek: \"xCDpcnR5X3R3b19ieXRlX2tleWJsYWhibGFoYmxhaGg=\"\n  # KEK rotation for the simple_crypto plugin\n  simple_crypto_kek_rewrap:\n    # To allow for chart upgrades when modifying the Key Encryption Keys, the\n    # db-sync job can rewrap the existing project keys with the new kek, leaving\n    # each secret’s encrypted data unchanged.\n\n    # This feature is enabled automatically, if a kek is specified at:\n    #   .conf.barbican.simple_crypto_plugin.kek\n    # and the previous kek is also specified at:\n    #   .conf.simple_crypto_kek_rewrap.old_kek\n\n    # The project keys are decrypted with 'old_kek' and re-encrypted with the\n    # target kek (as defined in barbican.conf).\n    # This resembles the lightweight rotation described here, which was never\n    # implemented for the simple crypto plugin:\n    # https://specs.openstack.org/openstack/barbican-specs/specs/liberty/add-crypto-mkek-rotation-support-lightweight.html\n\n    # The KEK value \"dGhpcnR5X3R3b19ieXRlX2tleWJsYWhibGFoYmxhaGg=\" matches the\n    # plugin default, and is retained here for convenience, in case the chart was\n    # previously installed without explicitly specifying a kek.\n    # old_kek allows commna-separated string for keks\n    # old_kek:\n    #   # First key is used for ecnrypting new data\n    #   # Additional keys used for decrypting existing data\n    #   - \"dGhpcnR5X3R3b19ieXRlX2tleWJsYWhibGFoYmxhaGg=,dDDpcnR5X3R3b19ieXRlX2tleWJsYWhibGFoYmxhaGg=\"\n    old_kek: \"dGhpcnR5X3R3b19ieXRlX2tleWJsYWhibGFoYmxhaGg=\"\n  logging:\n    loggers:\n      keys:\n        - root\n        - barbican\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: WARNING\n      handlers: 'null'\n    logger_barbican:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: barbican\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    formatter_default:\n      format: \"%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n\n# Names of secrets used by bootstrap and environmental checks\nsecrets:\n  identity:\n    admin: barbican-keystone-admin\n    barbican: barbican-keystone-user\n  oslo_db:\n    admin: barbican-db-admin\n    barbican: barbican-db-user\n  oslo_messaging:\n    admin: barbican-rabbitmq-admin\n    barbican: barbican-rabbitmq-user\n  tls:\n    key_manager:\n      api:\n        public: barbican-tls-public\n        internal: barbican-tls-internal\n  oci_image_registry:\n    barbican: barbican-oci-image-registry\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      barbican:\n        username: barbican\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      barbican:\n        role: admin\n        region_name: RegionOne\n        username: barbican\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  key_manager:\n    name: barbican\n    hosts:\n      default: barbican-api\n      public: barbican\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: barbican-tls-internal\n          issuerRef:\n            kind: ClusterIssuer\n            name: ca-clusterissuer\n    path:\n      default: /\n      healthcheck: /healthcheck\n    scheme:\n      default: http\n      service: http\n    port:\n      api:\n        default: 9311\n        public: 80\n        service: 9311\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n        secret:\n          tls:\n            internal: mariadb-tls-direct\n      barbican:\n        username: barbican\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /barbican\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_messaging:\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n        secret:\n          tls:\n            internal: rabbitmq-tls-direct\n      barbican:\n        username: barbican\n        password: password\n    statefulset:\n      replicas: 2\n      name: rabbitmq-rabbitmq\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /barbican\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n  oslo_cache:\n    auth:\n      # NOTE(portdirect): this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  fluentd:\n    namespace: null\n    name: fluentd\n    hosts:\n      default: fluentd-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: 'http'\n    port:\n      service:\n        default: 24224\n      metrics:\n        default: 24220\n  # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress\n  # They are using to enable the Egress K8s network policy.\n  kube_dns:\n    namespace: kube-system\n    name: kubernetes-dns\n    hosts:\n      default: kube-dns\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: http\n    port:\n      dns:\n        default: 53\n        protocol: UDP\n  ingress:\n    namespace: null\n    name: ingress\n    hosts:\n      default: ingress\n    port:\n      ingress:\n        default: 80\n\ntls:\n  identity: false\n  oslo_messaging: false\n  oslo_db: false\n\nmanifests:\n  certificates: false\n  configmap_bin: true\n  configmap_etc: true\n  deployment_api: true\n  ingress_api: true\n  job_bootstrap: true\n  job_db_init: true\n  job_db_sync: true\n  job_db_drop: false\n  job_image_repo_sync: true\n  job_rabbit_init: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  pdb_api: true\n  pod_test: true\n  secret_db: true\n  network_policy: false\n  secret_ingress_tls: true\n  secret_keystone: true\n  secret_rabbitmq: true\n  secret_registry: true\n  service_ingress_api: true\n  service_api: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "bindep.txt",
    "content": "# This file facilitates OpenStack-CI package installation\n# before the execution of any tests.\n\n# Required to build language docs\ngettext\n"
  },
  {
    "path": "blazar/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack Resource Reservation Service\nname: blazar\ntype: application\nversion: 2025.2.0\nhome: https://docs.openstack.org/blazar/\nicon: https://openmetal.io/wp-content/uploads/2024/10/OpenStack_Project_Blazar-300x300.jpg\nsources:\n  - https://opendev.org/openstack/blazar\nkeywords:\n  - openstack\n  - reservation\n  - helm\nmaintainers:\n  - name: OpenStack Helm Team\n    email: openstack-helm@lists.openstack.org\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "blazar/templates/bin/_blazar_api.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec blazar-api \\\n        --config-file /etc/blazar/blazar.conf\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "blazar/templates/bin/_blazar_manager.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec blazar-manager \\\n  --config-file /etc/blazar/blazar.conf\n"
  },
  {
    "path": "blazar/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nblazar-db-manage \\\n  --config-file /etc/blazar/blazar.conf \\\n  upgrade head\n"
  },
  {
    "path": "blazar/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: blazar-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n  rally-test.sh: |\n{{ tuple .Values.conf.rally_tests | include \"helm-toolkit.scripts.rally_test\" | indent 4 }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  blazar-api.sh: |\n{{ tuple \"bin/_blazar_api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  blazar-manager.sh: |\n{{ tuple \"bin/_blazar_manager.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n{{- if .Values.manifests.job_rabbit_init }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" . | indent 4 }}\n{{- end }}\n{{- end }}\n...\n"
  },
  {
    "path": "blazar/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- define \"blazar.configmap.etc\" }}\n{{- $configMapName := index . 0 }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n\n{{- if empty .Values.conf.blazar.keystone_authtoken.auth_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.blazar.keystone_authtoken \"auth_uri\" -}}\n{{- end -}}\n{{- if empty .Values.conf.blazar.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.blazar.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.blazar.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.blazar.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n{{- if empty .Values.conf.blazar.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.blazar.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.blazar.database.connection)) (empty .Values.conf.blazar.database.connection) -}}\n{{- $_ := tuple \"oslo_db\" \"internal\" \"blazar\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\"| set .Values.conf.blazar.database \"connection\" -}}\n{{- end -}}\n{{- if empty .Values.conf.blazar.DEFAULT.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"blazar\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.blazar.DEFAULT \"transport_url\" -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.handler_fluent) (has \"fluent\" .Values.conf.logging.handlers.keys) -}}\n{{- $fluentd_host := tuple \"fluentd\" \"internal\" $envAll | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\n{{- $fluentd_port := tuple \"fluentd\" \"internal\" \"service\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $fluent_args := printf \"('%s.%s', '%s', %s)\" .Release.Namespace .Release.Name $fluentd_host $fluentd_port }}\n{{- $handler_fluent := dict \"class\" \"fluent.handler.FluentHandler\" \"formatter\" \"fluent\" \"args\" $fluent_args -}}\n{{- $_ := set .Values.conf.logging \"handler_fluent\" $handler_fluent -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.formatter_fluent) (has \"fluent\" .Values.conf.logging.formatters.keys) -}}\n{{- $formatter_fluent := dict \"class\" \"oslo_log.formatters.FluentFormatter\" -}}\n{{- $_ := set .Values.conf.logging \"formatter_fluent\" $formatter_fluent -}}\n{{- end -}}\n{{- if empty .Values.conf.blazar.oslo_messaging_notifications.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"blazar\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.blazar.oslo_messaging_notifications \"transport_url\" -}}\n{{- end -}}\n\n{{/*\nOpenstack auth\n*/}}\n{{- if empty .Values.conf.blazar.DEFAULT.os_auth_host -}}\n{{- $_ := tuple \"identity\" \"internal\" . | include \"helm-toolkit.endpoints.endpoint_host_lookup\" | set .Values.conf.blazar.DEFAULT \"os_auth_host\" -}}\n{{- end -}}\n{{- if empty .Values.conf.blazar.DEFAULT.os_auth_port -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set .Values.conf.blazar.DEFAULT \"os_auth_port\" -}}\n{{- end -}}\n{{- if empty .Values.conf.blazar.DEFAULT.os_auth_protocol -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | set .Values.conf.blazar.DEFAULT \"os_auth_protocol\" -}}\n{{- end -}}\n{{- if empty .Values.conf.blazar.DEFAULT.os_region_name -}}\n{{- $_ := set .Values.conf.blazar.DEFAULT \"os_region_name\" .Values.endpoints.identity.auth.admin.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.blazar.DEFAULT.os_admin_project_name -}}\n{{- $_ := set .Values.conf.blazar.DEFAULT \"os_admin_project_name\" .Values.endpoints.identity.auth.admin.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.blazar.DEFAULT.os_admin_project_domain_name -}}\n{{- $_ := set .Values.conf.blazar.DEFAULT \"os_admin_project_domain_name\" .Values.endpoints.identity.auth.admin.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.blazar.DEFAULT.os_admin_user_domain_name -}}\n{{- $_ := set .Values.conf.blazar.DEFAULT \"os_admin_user_domain_name\" .Values.endpoints.identity.auth.admin.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.blazar.DEFAULT.os_admin_username -}}\n{{- $_ := set .Values.conf.blazar.DEFAULT \"os_admin_username\" .Values.endpoints.identity.auth.admin.username -}}\n{{- end -}}\n{{- if empty .Values.conf.blazar.DEFAULT.os_admin_password -}}\n{{- $_ := set .Values.conf.blazar.DEFAULT \"os_admin_password\" .Values.endpoints.identity.auth.admin.password -}}\n{{- end -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $configMapName }}\ntype: Opaque\ndata:\n  rally_tests.yaml: {{ toYaml .Values.conf.rally_tests.tests | b64enc }}\n  blazar.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.blazar | b64enc }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.logging | b64enc }}\n  api-paste.ini: {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.api_paste | b64enc }}\n  policy.yaml: {{ toYaml .Values.conf.policy | b64enc }}\n{{- end }}\n{{- end }}\n{{- if .Values.manifests.configmap_etc }}\n{{- list \"blazar-etc\" . | include \"blazar.configmap.etc\" }}\n{{- end }}\n...\n"
  },
  {
    "path": "blazar/templates/deployment-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"blazarApiLivenessProbeTemplate\" }}\ntcpSocket:\n  port: {{ tuple \"reservation\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- define \"blazarApiReadinessProbeTemplate\" }}\ntcpSocket:\n  port: {{ tuple \"reservation\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- if .Values.manifests.deployment_api }}\n{{- $envAll := . }}\n\n{{- $mounts_blazar_api := .Values.pod.mounts.blazar_api.blazar_api }}\n{{- $mounts_blazar_api_init := .Values.pod.mounts.blazar_api.init_container }}\n\n{{- $serviceAccountName := \"blazar-api\" }}\n{{ tuple $envAll \"api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: blazar-api\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"blazar\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"blazar\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"blazar\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"blazar_api\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"blazar-api\" \"containerNames\" (list \"blazar-api\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"blazar_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"blazar_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"blazar\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"blazar\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.api.node_selector_key }}: {{ .Values.labels.api.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.blazar.enabled }}\n{{ tuple $envAll \"blazar\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.api.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"api\" $mounts_blazar_api_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: blazar-api\n{{ tuple $envAll \"blazar_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"blazar\" \"container\" \"blazar_api\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n          env:\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/blazar/certs/ca.crt\"\n{{- end }}\n          command:\n            - /tmp/blazar-api.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/blazar-api.sh\n                  - stop\n          ports:\n            - name: b-api\n              containerPort: {{ tuple \"reservation\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{ dict \"envAll\" $envAll \"component\" \"api\" \"container\" \"default\" \"type\" \"liveness\" \"probeTemplate\" (include \"blazarApiLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"api\" \"container\" \"default\" \"type\" \"readiness\" \"probeTemplate\" (include \"blazarApiReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.blazar.oslo_concurrency.lock_path }}\n            - name: pod-etc-blazar\n              mountPath: /etc/blazar\n            - name: blazar-bin\n              mountPath: /tmp/blazar-api.sh\n              subPath: blazar-api.sh\n              readOnly: true\n            - name: blazar-etc\n              mountPath: /etc/blazar/blazar.conf\n              subPath: blazar.conf\n              readOnly: true\n            - name: blazar-etc-snippets\n              mountPath: /etc/blazar/blazar.conf.d/\n              readOnly: true\n            {{- if .Values.conf.blazar.DEFAULT.log_config_append }}\n            - name: blazar-etc\n              mountPath: {{ .Values.conf.blazar.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.blazar.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: blazar-etc\n              mountPath: /etc/blazar/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: blazar-etc\n              mountPath: /etc/blazar/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.reservation.api.internal \"path\" \"/etc/blazar/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_blazar_api.volumeMounts }}{{ toYaml $mounts_blazar_api.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-blazar\n          emptyDir: {}\n        - name: blazar-bin\n          configMap:\n            name: blazar-bin\n            defaultMode: 0555\n        - name: blazar-etc\n          secret:\n            secretName: blazar-etc\n            defaultMode: 0444\n        - name: blazar-etc-snippets\n          projected:\n            sources:\n              - secret:\n                  name: blazar-ks-etc\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.reservation.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_blazar_api.volumes }}{{ toYaml $mounts_blazar_api.volumes | indent 8 }}{{ end }}\n{{- end }}\n...\n"
  },
  {
    "path": "blazar/templates/deployment-manager.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_manager }}\n{{- $envAll := . }}\n\n{{- $mounts := .Values.pod.mounts.blazar_manager.blazar_manager }}\n{{- $mounts_init := .Values.pod.mounts.blazar_manager.init_container }}\n\n{{- $serviceAccountName := \"blazar-manager\" }}\n{{ tuple $envAll \"manager\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: blazar-manager\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"blazar\" \"manager\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.manager }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"blazar\" \"manager\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"blazar\" \"manager\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"blazar-manager\" \"containerNames\" (list \"blazar-manager\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"blazar\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"blazar\" \"manager\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.manager.node_selector_key }}: {{ .Values.labels.manager.node_selector_value | quote }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.manager.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"manager\" $mounts_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: blazar-manager\n{{ tuple $envAll \"blazar_manager\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.manager | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"blazar\" \"container\" \"blazar_manager\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/blazar-manager.sh\n          env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.blazar }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.blazar.oslo_concurrency.lock_path }}\n            - name: pod-etc-blazar\n              mountPath: /etc/blazar\n            - name: blazar-bin\n              mountPath: /tmp/blazar-manager.sh\n              subPath: blazar-manager.sh\n              readOnly: true\n            - name: blazar-etc\n              mountPath: /etc/blazar/blazar.conf\n              subPath: blazar.conf\n              readOnly: true\n            {{- if .Values.conf.blazar.DEFAULT.log_config_append }}\n            - name: blazar-etc\n              mountPath: {{ .Values.conf.blazar.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.blazar.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: blazar-etc\n              mountPath: /etc/blazar/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n{{- if $mounts.volumeMounts }}\n{{ toYaml $mounts.volumeMounts | indent 12 }}\n{{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-blazar\n          emptyDir: {}\n        - name: blazar-bin\n          configMap:\n            name: blazar-bin\n            defaultMode: 0555\n        - name: blazar-etc\n          secret:\n            secretName: blazar-etc\n            defaultMode: 0444\n{{- if $mounts.volumes }}\n{{ toYaml $mounts.volumes | indent 8 }}\n{{- end }}\n{{- end }}\n...\n"
  },
  {
    "path": "blazar/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "blazar/templates/ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_api .Values.network.api.ingress.public }}\n{{- $envAll := . }}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendService\" \"api\" \"backendServiceType\" \"reservation\" \"backendPort\" \"b-api\" -}}\n{{- $secretName := index $envAll.Values.secrets.tls.reservation.api ($envAll.Values.network.api.ingress.classes.namespace | replace \"-\" \"_\") -}}\n{{- if $envAll.Values.tls.identity -}}\n{{- $_ := set $ingressOpts \"certIssuer\" $envAll.Values.endpoints.identity.auth.blazar.tls.ca -}}\n{{- end -}}\n{{- if hasKey $envAll.Values.secrets.tls.reservation.api $envAll.Values.network.api.ingress.classes.namespace -}}\n{{- $_ := set $ingressOpts \"tlsSecret\" $secretName -}}\n{{- end -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "blazar/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $bootstrapJob := dict \"envAll\" . \"serviceName\" \"blazar\" \"keystoneUser\" .Values.bootstrap.ks_user -}}\n{{- if .Values.pod.tolerations.blazar.enabled -}}\n{{- $_ := set $bootstrapJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $bootstrapJob | include \"helm-toolkit.manifests.job_bootstrap\" }}\n{{- end }}\n"
  },
  {
    "path": "blazar/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" \"blazar\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbDropJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- if .Values.pod.tolerations.blazar.enabled -}}\n{{- $_ := set $dbDropJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "blazar/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"blazar\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbInitJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbInitJob \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.blazar.enabled -}}\n{{- $_ := set $dbInitJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "blazar/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"blazar\" \"podVolMounts\" .Values.pod.mounts.blazar_db_sync.blazar_db_sync.volumeMounts \"podVols\" .Values.pod.mounts.blazar_db_sync.blazar_db_sync.volumes -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbSyncJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.blazar.enabled -}}\n{{- $_ := set $dbSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "blazar/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_endpoints\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-2\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksEndpointsJob := dict \"envAll\" . \"serviceName\" \"blazar\" \"serviceTypes\" ( tuple \"reservation\" ) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksEndpointsJob \"tlsSecret\" .Values.secrets.tls.reservation.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksEndpointsJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_endpoints\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.blazar.enabled -}}\n{{- $_ := set $ksEndpointsJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksEndpointsJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "blazar/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_service\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"blazar\" \"serviceTypes\" ( tuple \"reservation\" ) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.reservation.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_service\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.blazar.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "blazar/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"blazar\" -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksUserJob \"tlsSecret\" .Values.secrets.tls.reservation.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksUserJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.blazar.enabled -}}\n{{- $_ := set $ksUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "blazar/templates/job-rabbit-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.rabbit_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rmqUserJob := dict \"envAll\" . \"serviceName\" \"blazar\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $rmqUserJob \"tlsSecret\" .Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $rmqUserJob \"jobAnnotations\" (include \"metadata.annotations.job.rabbit_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.blazar.enabled -}}\n{{- $_ := set $rmqUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $rmqUserJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "blazar/templates/pdb-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_api }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: blazar-api\n  labels:\n{{ tuple $envAll \"blazar\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n{{- if .Values.pod.lifecycle.disruption_budget.api.min_available }}\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.api.min_available }}\n{{- else }}\n  maxUnavailable: {{ .Values.pod.lifecycle.disruption_budget.api.max_unavailable | default 1 }}\n{{- end }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"blazar\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n...\n"
  },
  {
    "path": "blazar/templates/pdb-manager.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_manager }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: blazar-manager\n  labels:\n{{ tuple $envAll \"blazar\" \"manager\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n{{- if .Values.pod.lifecycle.disruption_budget.manager.min_available }}\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.manager.min_available }}\n{{- else }}\n  maxUnavailable: {{ .Values.pod.lifecycle.disruption_budget.manager.max_unavailable | default 1 }}\n{{- end }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"blazar\" \"manager\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n...\n"
  },
  {
    "path": "blazar/templates/pod-rally-test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if .Values.manifests.pod_rally_test }}\n{{- $envAll := . }}\n\n{{- $mounts_blazar_tests := .Values.pod.mounts.blazar_tests.blazar_tests }}\n{{- $mounts_blazar_tests_init := .Values.pod.mounts.blazar_tests.init_container }}\n\n{{- $serviceAccountName := print $envAll.deployment_name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: {{ print $envAll.Chart.Name \"-test\" }}\n  labels:\n{{ tuple $envAll \"blazar\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"blazar-test\" \"containerNames\" (list \"init\" \"blazar-test\" \"blazar-test-ks-user\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n{{ if $envAll.Values.pod.tolerations.blazar.enabled }}\n{{ tuple $envAll \"blazar\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 2 }}\n{{ end }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 2 }}\n  restartPolicy: Never\n{{ tuple \"blazar_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 2 }}\n{{ tuple \"blazar_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 2 }}\n  serviceAccountName: {{ $serviceAccountName }}\n  initContainers:\n{{ tuple $envAll \"tests\" $mounts_blazar_tests_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n    - name: blazar-test-ks-user\n{{ tuple $envAll \"ks_user\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.ks_user | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" \"container\" \"blazar_test_ks_user\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      command:\n        - /tmp/ks-user.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: blazar-bin\n          mountPath: /tmp/ks-user.sh\n          subPath: ks-user.sh\n          readOnly: true\n{{ dict \"enabled\" .Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.reservation.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\"  | indent 8 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_SERVICE_NAME\n          value: \"test\"\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.test }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_ROLE\n          value: {{ .Values.endpoints.identity.auth.test.role | quote }}\n  containers:\n    - name: blazar-test\n{{ tuple $envAll \"test\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" \"container\" \"blazar_test\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.test }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: RALLY_ENV_NAME\n          value: {{.Chart.Name}}\n      command:\n        - /tmp/rally-test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: blazar-etc\n          mountPath: /etc/rally/rally_tests.yaml\n          subPath: rally_tests.yaml\n          readOnly: true\n        - name: blazar-bin\n          mountPath: /tmp/rally-test.sh\n          subPath: rally-test.sh\n          readOnly: true\n        - name: rally-db\n          mountPath: /var/lib/rally\n        {{- range $key, $value := $envAll.Values.conf.rally_tests.templates }}\n        - name: blazar-etc\n          mountPath: {{ $value.name }}\n          subPath: {{ printf \"test_template_%d\" $key }}\n          readOnly: true\n        {{- end }}\n        - name: rally-work\n          mountPath: /home/rally/.rally\n{{ dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.reservation.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\"  | indent 8 }}\n{{ if $mounts_blazar_tests.volumeMounts }}{{ toYaml $mounts_blazar_tests.volumeMounts | indent 8 }}{{ end }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: blazar-etc\n      secret:\n        secretName: blazar-etc\n        defaultMode: 0444\n    - name: blazar-bin\n      configMap:\n        name: blazar-bin\n        defaultMode: 0555\n    - name: rally-db\n      emptyDir: {}\n    - name: rally-work\n      emptyDir: {}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.reservation.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 4 }}\n{{ if $mounts_blazar_tests.volumes }}{{ toYaml $mounts_blazar_tests.volumes | indent 4 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "blazar/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"blazar\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n{{- $connection := tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- if $envAll.Values.manifests.certificates }}\n  DB_CONNECTION: {{ (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | b64enc -}}\n{{- else }}\n  DB_CONNECTION: {{  $connection | b64enc -}}\n{{- end }}\n{{- end }}\n{{- end }}\n...\n"
  },
  {
    "path": "blazar/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"blazar\" \"service\" \"test\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n...\n"
  },
  {
    "path": "blazar/templates/secret-ks-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $envAll := . -}}\n{{/* the endpoints.identity.auth sections with the oslo conf sections they get rendered to */}}\n{{- $ksUsers := dict\n  \"blazar\" \"keystone_authtoken\"\n-}}\n{{ dict\n  \"envAll\" $envAll\n  \"serviceName\" \"blazar\"\n  \"serviceUserSections\" $ksUsers\n  | include \"helm-toolkit.manifests.secret_ks_etc\"\n}}\n{{- end }}\n"
  },
  {
    "path": "blazar/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- $rabbitmqProtocol := \"http\" }}\n{{- if and $envAll.Values.manifests.certificates $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal }}\n{{- $rabbitmqProtocol = \"https\" }}\n{{- end }}\n{{- range $key1, $userClass := tuple \"admin\" \"blazar\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_messaging\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass $rabbitmqProtocol $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n...\n"
  },
  {
    "path": "blazar/templates/service-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_api }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"reservation\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: b-api\n      port: {{ tuple \"reservation\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n      {{- if .Values.network.api.node_port.enabled }}\n      nodePort: {{ .Values.network.api.node_port.port }}\n      {{- end }}\n  selector:\n{{ tuple $envAll \"blazar\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{- if .Values.network.api.node_port.enabled }}\n  type: NodePort\n  {{- if .Values.network.api.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{- end }}\n  {{- end }}\n{{- end }}\n...\n"
  },
  {
    "path": "blazar/templates/service-ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_api .Values.network.api.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"reservation\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "blazar/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nrelease_group: null\n\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  manager:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  tags:\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    blazar_db_sync: quay.io/airshipit/blazar:2025.1-ubuntu_jammy\n    blazar_api: quay.io/airshipit/blazar:2025.1-ubuntu_jammy\n    blazar_manager: quay.io/airshipit/blazar:2025.1-ubuntu_jammy\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: docker.io/docker:17.07.0\n\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30788\n  manager:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30789\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - blazar-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    api:\n      jobs:\n        - blazar-db-sync\n        - blazar-ks-user\n        - blazar-ks-endpoints\n        - blazar-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_messaging\n    manager:\n      jobs:\n        - blazar-db-sync\n        - blazar-ks-user\n        - blazar-ks-endpoints\n        - blazar-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_messaging\n    bootstrap:\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: reservation\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_drop:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - blazar-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    ks_endpoints:\n      jobs:\n        - blazar-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    rabbit_init:\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n    tests:\n      jobs:\n        - blazar-db-sync\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: reservation\n        - endpoint: internal\n          service: compute\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\nsecrets:\n  identity:\n    admin: blazar-keystone-admin\n    blazar: blazar-keystone-user\n    service: blazar-keystone-service\n    test: blazar-keystone-test\n  oslo_db:\n    admin: blazar-db-admin\n    blazar: blazar-db-user\n  oslo_messaging:\n    admin: blazar-rabbitmq-admin\n    blazar: blazar-rabbitmq-user\n  tls:\n    reservation:\n      api:\n        public: blazar-tls-public\n        internal: blazar-tls-internal\n        nginx: blazar-tls-nginx\n        nginx_cluster: blazar-tls-nginx-cluster\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      blazar:\n        role: admin\n        region_name: RegionOne\n        username: blazar\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      service:\n        role: admin,service\n        region_name: RegionOne\n        username: blazar_service_user\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      test:\n        role: admin\n        region_name: RegionOne\n        username: blazar-test\n        password: password\n        project_name: test\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  reservation:\n    name: blazar\n    hosts:\n      default: blazar-api\n      public: blazar\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v1\n    scheme:\n      default: 'http'\n      service: 'http'\n    port:\n      api:\n        default: 1234\n        public: 80\n        service: 1234\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n        secret:\n          tls:\n            internal: mariadb-tls-direct\n      blazar:\n        username: blazar\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /blazar\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_messaging:\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n        secret:\n          tls:\n            internal: rabbitmq-tls-direct\n      blazar:\n        username: blazar\n        password: password\n    statefulset:\n      replicas: 2\n      name: rabbitmq-rabbitmq\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /blazar\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n  oslo_cache:\n    auth:\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  fluentd:\n    namespace: null\n    name: fluentd\n    hosts:\n      default: fluentd-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: 'http'\n    port:\n      service:\n        default: 24224\n      metrics:\n        default: 24220\n  compute:\n    name: nova\n    hosts:\n      default: nova-api\n      internal: nova-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: \"/v2.1\"\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 8774\n        public: 80\n  # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress\n  # They are using to enable the Egress K8s network policy.\n  kube_dns:\n    namespace: kube-system\n    name: kubernetes-dns\n    hosts:\n      default: kube-dns\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: http\n    port:\n      dns:\n        default: 53\n        protocol: UDP\n  ingress:\n    namespace: null\n    name: ingress\n    hosts:\n      default: ingress\n    port:\n      ingress:\n        default: 80\n\npod:\n  probes:\n    rpc_timeout: 60\n    rpc_retries: 2\n    api:\n      default:\n        liveness:\n          enabled: True\n          params:\n            initialDelaySeconds: 60\n            periodSeconds: 10\n            timeoutSeconds: 5\n        readiness:\n          enabled: True\n          params:\n            initialDelaySeconds: 60\n            periodSeconds: 10\n            timeoutSeconds: 5\n  security_context:\n    blazar:\n      pod:\n        runAsUser: 42424\n      container:\n        blazar_api:\n          runAsUser: 0\n        blazar_manager:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    test:\n      pod:\n        runAsUser: 42424\n      container:\n        blazar_test_ks_user:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        blazar_test:\n          runAsUser: 65500\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    blazar:\n      enabled: false\n      tolerations:\n        - key: node-role.kubernetes.io/master\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/control-plane\n          operator: Exists\n          effect: NoSchedule\n  mounts:\n    blazar_api:\n      init_container: null\n      blazar_api:\n        volumeMounts:\n        volumes:\n    blazar_manager:\n      init_container: null\n      blazar_manager:\n        volumeMounts:\n        volumes:\n    blazar_bootstrap:\n      init_container: null\n      blazar_bootstrap:\n        volumeMounts:\n        volumes:\n    blazar_db_sync:\n      blazar_db_sync:\n        volumeMounts:\n        volumes:\n    blazar_tests:\n      init_container: null\n      blazar_tests:\n        volumeMounts:\n        volumes:\n  replicas:\n    api: 1\n    manager: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    disruption_budget:\n      api:\n        min_available: 0\n      manager:\n        min_available: 0\n    termination_grace_period:\n      api:\n        timeout: 30\n      manager:\n        timeout: 30\n  resources:\n    enabled: false\n    api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    manager:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rabbit_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\njobs:\n  bootstrap:\n    backoffLimit: 5\n    activeDeadlineSeconds: 600\n  db_init:\n    backoffLimit: 5\n    activeDeadlineSeconds: 600\n  db_drop:\n    backoffLimit: 5\n    activeDeadlineSeconds: 600\n  db_sync:\n    backoffLimit: 5\n    activeDeadlineSeconds: 600\n  ks_endpoints:\n    backoffLimit: 5\n    activeDeadlineSeconds: 600\n  ks_service:\n    backoffLimit: 5\n    activeDeadlineSeconds: 600\n  ks_user:\n    backoffLimit: 5\n    activeDeadlineSeconds: 600\n  rabbit_init:\n    backoffLimit: 5\n    activeDeadlineSeconds: 600\n\nconf:\n  blazar:\n    DEFAULT:\n      debug: false\n      log_config_append: /etc/blazar/logging.conf\n      api_paste_config: /etc/blazar/api-paste.ini\n      os_auth_protocol:\n      os_auth_host:\n      os_auth_port:\n      os_region_name:\n      os_admin_username:\n      os_admin_password:\n      os_admin_project_name:\n      os_admin_user_domain_name:\n      os_admin_project_domain_name:\n    database:\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    keystone_authtoken:\n      service_token_roles: service\n      service_token_roles_required: true\n      auth_type: password\n      auth_version: v3\n      memcache_security_strategy: ENCRYPT\n      service_type: reservation\n    oslo_messaging_notifications:\n      driver: messagingv2\n    oslo_messaging_rabbit:\n      rabbit_ha_queues: true\n    oslo_concurrency:\n      lock_path: /var/lock\n    manager:\n      plugins: physical.host.plugin,virtual.instance.plugin,flavor.instance.plugin,virtual.floatingip.plugin\n    enforcement:\n      enabled_filters:\n        - MaxLeaseDurationFilter\n      max_lease_duration: 86400\n    physical_host_plugin:\n      aggregate_freepool_name: freepool\n      blazar_username: blazar\n      blazar_password: password\n      blazar_project_name: service\n      blazar_user_domain_name: service\n      blazar_project_domain_name: service\n      nova_client_timeout: 30\n      enable_host_reservation: true\n\n  logging:\n    loggers:\n      keys:\n        - root\n        - blazar\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: WARNING\n      handlers: \"null\"\n    logger_blazar:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: blazar\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    formatter_default:\n      format: \"%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n  api_paste:\n    composite:reservation:\n      use: \"egg:Paste#urlmap\"\n      \"/\": blazarversions\n      \"/v1\": blazarapi_v1\n      \"/v2\": blazarapi_v2\n    composite:blazarapi_v1:\n      use: \"call:blazar.api.middleware:pipeline_factory\"\n      noauth: \"request_id faultwrap sizelimit noauth blazarapi_v1\"\n      keystone: \"request_id faultwrap sizelimit authtoken keystonecontext blazarapi_v1\"\n    composite:blazarapi_v2:\n      use: \"call:blazar.api.middleware:pipeline_factory\"\n      noauth: \"request_id faultwrap sizelimit noauth blazarapi_v2\"\n      keystone: \"request_id faultwrap sizelimit authtoken keystonecontext blazarapi_v2\"\n    app:blazarversions:\n      paste.app_factory: \"blazar.api.versions:Versions.factory\"\n    app:blazarapi_v1:\n      paste.app_factory: \"blazar.api.v1.app:make_app\"\n    app:blazarapi_v2:\n      paste.app_factory: \"blazar.api.v2.app:make_app\"\n    filter:request_id:\n      paste.filter_factory: \"oslo_middleware:RequestId.factory\"\n    filter:faultwrap:\n      paste.filter_factory: \"blazar.api.middleware:FaultWrapper.factory\"\n    filter:noauth:\n      paste.filter_factory: \"blazar.api.middleware:NoAuthMiddleware.factory\"\n    filter:sizelimit:\n      paste.filter_factory: \"oslo_middleware:RequestBodySizeLimiter.factory\"\n    filter:authtoken:\n      paste.filter_factory: \"keystonemiddleware.auth_token:filter_factory\"\n    filter:keystonecontext:\n      paste.filter_factory: \"blazar.api.middleware:KeystoneContextMiddleware.factory\"\n  policy: {}\n  rabbitmq:\n    policies:\n      - vhost: \"blazar\"\n        name: \"ha_ttl_blazar\"\n        pattern: '^(?!(amq\\.|reply_)).*'\n        definition:\n          ha-mode: \"all\"\n          ha-sync-mode: \"automatic\"\n          message-ttl: 70000\n        priority: 0\n        apply-to: all\n\n  rally_tests:\n    run_tempest: false\n    tests:\n      # NOTE:This is a dummy test added as a placeholder and currently, Rally does not support Blazar scenarios.\n      Dummy.dummy:\n        -\n          args:\n            sleep: 5\n          runner:\n            type: \"constant\"\n            times: 20\n            concurrency: 5\n          sla:\n            failure_rate:\n              max: 0\n    templates: []\n\nbootstrap:\n  enabled: false\n  ks_user: blazar\n  script: |\n    openstack token issue\n\nmanifests:\n  certificates: false\n  configmap_bin: true\n  configmap_etc: true\n  deployment_api: true\n  deployment_manager: true\n  ingress_api: true\n  job_bootstrap: true\n  job_db_init: true\n  job_db_drop: false\n  job_db_sync: true\n  job_image_repo_sync: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  job_rabbit_init: true\n  pdb_api: true\n  pdb_manager: true\n  pod_rally_test: true\n  secret_db: true\n  secret_keystone: true\n  secret_ks_etc: true\n  secret_rabbitmq: true\n  service_api: true\n  service_ingress_api: true\n\nnetwork_policy:\n  blazar:\n    ingress:\n      - {}\n    egress:\n      - {}\n\ntls:\n  identity: false\n  oslo_messaging: false\n  oslo_db: false\n  reservation:\n    api:\n      public: false\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "ca-clusterissuer/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: \"1.0\"\ndescription: Certificate Issuer chart for OSH\nhome: https://cert-manager.io/\nname: ca-clusterissuer\nversion: 2025.2.0\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "ca-clusterissuer/templates/clusterissuer-ca.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.clusterissuer }}\n{{- $envAll := . }}\n---\napiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n  name: {{ .Values.conf.ca.issuer.name }}\n  labels:\n{{ tuple $envAll \"cert-manager\" \"clusterissuer\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  ca:\n    secretName: {{ .Values.conf.ca.secret.name }}\n...\n{{- end }}\n"
  },
  {
    "path": "ca-clusterissuer/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "ca-clusterissuer/templates/secret-ca.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ca }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ .Values.conf.ca.secret.name }}\n  namespace: {{ .Values.conf.ca.secret.namespace }}\ndata:\n  tls.crt: {{ .Values.conf.ca.secret.crt | default \"\" | b64enc }}\n  tls.key: {{ .Values.conf.ca.secret.key | default \"\" | b64enc }}\n...\n{{- end }}\n"
  },
  {
    "path": "ca-clusterissuer/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n---\nconf:\n  ca:\n    issuer:\n      name: ca-clusterissuer\n    secret:\n      name: secret-name\n      # Namespace where cert-manager is deployed.\n      namespace: cert-manager\n      crt: null\n      key: null\n\nmanifests:\n  clusterissuer: true\n  secret_ca: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "ca-issuer/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: \"1.0\"\ndescription: Certificate Issuer chart for OSH\nhome: https://cert-manager.io/\nname: ca-issuer\nversion: 2025.2.0\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "ca-issuer/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "ca-issuer/templates/issuer-ca.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.issuer }}\n{{- $envAll := . }}\n---\n{{- if semverCompare \"< v1.0.0\" .Values.cert_manager_version }}\napiVersion: cert-manager.io/v1alpha3\n{{- else }}\napiVersion: cert-manager.io/v1\n{{- end }}\nkind: Issuer\nmetadata:\n  name: {{ .Values.conf.ca.issuer.name }}\n  namespace: {{ .Release.Namespace }}\n  labels:\n{{ tuple $envAll \"cert-manager\" \"issuer\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  ca:\n    secretName: {{ .Values.conf.ca.secret.name }}\n...\n{{- end }}\n"
  },
  {
    "path": "ca-issuer/templates/secret-ca.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ca }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ .Values.conf.ca.secret.name }}\n  namespace: {{ .Release.Namespace }}\ndata:\n  tls.crt: {{ .Values.conf.ca.secret.crt | default \"\" | b64enc }}\n  tls.key: {{ .Values.conf.ca.secret.key | default \"\" | b64enc }}\n...\n{{- end }}\n"
  },
  {
    "path": "ca-issuer/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n---\nconf:\n  ca:\n    issuer:\n      name: ca-issuer\n    secret:\n      name: secret-name\n      crt: null\n      key: null\n\n# Default Version of jetstack/cert-manager being deployed.\n# Starting at v1.0.0, api-version: cert-manager.io/v1 is used\n# For previous apiVersion: cert-manager.io/v1alpha3, change to older version (such as v0.15.0)\ncert_manager_version: v1.0.0\n\nmanifests:\n  issuer: true\n  secret_ca: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "ceilometer/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Ceilometer\nname: ceilometer\nversion: 2025.2.0\nhome: https://docs.openstack.org/ceilometer/latest/\nsources:\n  - https://opendev.org/openstack/ceilometer\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "ceilometer/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "ceilometer/templates/bin/_ceilometer-central.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec ceilometer-polling \\\n  --polling-namespaces central \\\n  --config-file /etc/ceilometer/ceilometer.conf \\\n  --config-dir /etc/ceilometer/ceilometer.conf.d\n"
  },
  {
    "path": "ceilometer/templates/bin/_ceilometer-compute.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec ceilometer-polling \\\n  --polling-namespaces compute \\\n  --config-file /etc/ceilometer/ceilometer.conf \\\n  --config-dir /etc/ceilometer/ceilometer.conf.d\n"
  },
  {
    "path": "ceilometer/templates/bin/_ceilometer-ipmi.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec ceilometer-polling \\\n  --polling-namespaces ipmi \\\n  --config-file /etc/ceilometer/ceilometer.conf \\\n  --config-dir /etc/ceilometer/ceilometer.conf.d\n"
  },
  {
    "path": "ceilometer/templates/bin/_ceilometer-notification.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec ceilometer-agent-notification \\\n  --config-file /etc/ceilometer/ceilometer.conf \\\n  --config-dir /etc/ceilometer/ceilometer.conf.d\n"
  },
  {
    "path": "ceilometer/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec ceilometer-upgrade\n"
  },
  {
    "path": "ceilometer/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: ceilometer-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  ceilometer-central.sh: |\n{{ tuple \"bin/_ceilometer-central.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ceilometer-compute.sh: |\n{{ tuple \"bin/_ceilometer-compute.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ceilometer-notification.sh: |\n{{ tuple \"bin/_ceilometer-notification.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "ceilometer/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n{{- if empty .Values.conf.ceilometer.cache.memcache_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.ceilometer.cache \"memcache_servers\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ceilometer.DEFAULT.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"ceilometer\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.ceilometer.DEFAULT \"transport_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ceilometer.oslo_messaging_notifications.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"ceilometer\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.ceilometer.oslo_messaging_notifications \"transport_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ceilometer.notification.messaging_urls -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"ceilometer\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.ceilometer.notification \"messaging_urls\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ceilometer.service_credentials.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.ceilometer.service_credentials \"auth_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.ceilometer.service_credentials.region_name -}}\n{{- $_ := set .Values.conf.ceilometer.service_credentials \"region_name\" .Values.endpoints.identity.auth.ceilometer.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ceilometer.service_credentials.project_name -}}\n{{- $_ := set .Values.conf.ceilometer.service_credentials \"project_name\" .Values.endpoints.identity.auth.ceilometer.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ceilometer.service_credentials.project_domain_name -}}\n{{- $_ := set .Values.conf.ceilometer.service_credentials \"project_domain_name\" .Values.endpoints.identity.auth.ceilometer.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ceilometer.service_credentials.user_domain_name -}}\n{{- $_ := set .Values.conf.ceilometer.service_credentials \"user_domain_name\" .Values.endpoints.identity.auth.ceilometer.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ceilometer.service_credentials.username -}}\n{{- $_ := set .Values.conf.ceilometer.service_credentials \"username\" .Values.endpoints.identity.auth.ceilometer.username -}}\n{{- end -}}\n{{- if empty .Values.conf.ceilometer.service_credentials.password -}}\n{{- $_ := set .Values.conf.ceilometer.service_credentials \"password\" .Values.endpoints.identity.auth.ceilometer.password -}}\n{{- end -}}\n\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: ceilometer-etc\ntype: Opaque\ndata:\n  rally_tests.yaml: {{ toYaml .Values.conf.rally_tests | b64enc }}\n  ceilometer.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.ceilometer | b64enc }}\n  event_pipeline.yaml: {{ toYaml .Values.conf.event_pipeline | b64enc }}\n  pipeline.yaml: {{ toYaml .Values.conf.pipeline | b64enc }}\n  event_definitions.yaml: {{ toYaml .Values.conf.event_definitions | b64enc }}\n  gnocchi_resources.yaml: {{ toYaml .Values.conf.gnocchi_resources | b64enc }}\n  meters.yaml: {{ toYaml .Values.conf.meters | b64enc }}\n  polling.yaml: {{ toYaml .Values.conf.polling | b64enc }}\n{{- if .Values.conf.security }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.security \"key\" \"security.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- end}}\n{{ include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.wsgi_ceilometer \"key\" \"wsgi-ceilometer.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- end }}\n"
  },
  {
    "path": "ceilometer/templates/daemonset-compute.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.daemonset_compute }}\n{{- $envAll := . }}\n\n{{- $mounts_ceilometer_compute := .Values.pod.mounts.ceilometer_compute.ceilometer_compute }}\n{{- $mounts_ceilometer_compute_init := .Values.pod.mounts.ceilometer_compute.init_container }}\n\n{{- $serviceAccountName := \"ceilometer-compute\" }}\n{{ tuple $envAll \"compute\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.ceilometer_compute }}\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: ceilometer-compute\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"ceilometer\" \"compute\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"ceilometer\" \"compute\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"compute\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceilometer\" \"compute\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"ceilometer_compute\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"ceilometer\" \"compute\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      hostNetwork: true\n      hostPID: true\n      dnsPolicy: ClusterFirstWithHostNet\n      nodeSelector:\n        {{ .Values.labels.compute.node_selector_key }}: {{ .Values.labels.compute.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.ceilometer.enabled }}\n{{ tuple $envAll \"ceilometer\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"compute\" $mounts_ceilometer_compute_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: ceilometer-compute\n{{ tuple $envAll \"ceilometer_compute\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.compute | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/ceilometer-compute.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.ceilometer.oslo_concurrency.lock_path }}\n            - name: pod-etc-ceilometer\n              mountPath: /etc/ceilometer\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/ceilometer.conf\n              subPath: ceilometer.conf\n              readOnly: true\n            - name: ceilometer-etc-snippets\n              mountPath: /etc/ceilometer/ceilometer.conf.d/\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/api_paste.ini\n              subPath: api_paste.ini\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/event_definitions.yaml\n              subPath: event_definitions.yaml\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/event_pipeline.yaml\n              subPath: event_pipeline.yaml\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/pipeline.yaml\n              subPath: pipeline.yaml\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/gnocchi_resources.yaml\n              subPath: gnocchi_resources.yaml\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/polling.yaml\n              subPath: polling.yaml\n              readOnly: true\n            - name: ceilometer-bin\n              mountPath: /tmp/ceilometer-compute.sh\n              subPath: ceilometer-compute.sh\n              readOnly: true\n            - name: varlibnova\n              mountPath: /var/lib/nova\n            - name: varliblibvirt\n              mountPath: /var/lib/libvirt\n            - name: run\n              mountPath: /run\n            - name: cgroup\n              mountPath: /sys/fs/cgroup\n            - name: machine-id\n              mountPath: /etc/machine-id\n              readOnly: true\n{{ if $mounts_ceilometer_compute.volumeMounts }}{{ toYaml $mounts_ceilometer_compute.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-ceilometer\n          emptyDir: {}\n        - name: ceilometer-etc\n          secret:\n            secretName: ceilometer-etc\n            defaultMode: 0444\n        - name: ceilometer-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: ceilometer-bin\n          configMap:\n            name: ceilometer-bin\n            defaultMode: 0555\n        - name: varlibnova\n          hostPath:\n            path: /var/lib/nova\n        - name: varliblibvirt\n          hostPath:\n            path: /var/lib/libvirt\n        - name: run\n          hostPath:\n            path: /run\n        - name: cgroup\n          hostPath:\n            path: /sys/fs/cgroup\n        - name: machine-id\n          hostPath:\n            path: /etc/machine-id\n{{ if $mounts_ceilometer_compute.volumes }}{{ toYaml $mounts_ceilometer_compute.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "ceilometer/templates/daemonset-ipmi.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.daemonset_ipmi }}\n{{- $envAll := . }}\n\n{{- $mounts_ceilometer_ipmi := .Values.pod.mounts.ceilometer_ipmi.ceilometer_ipmi }}\n{{- $mounts_ceilometer_ipmi_init := .Values.pod.mounts.ceilometer_ipmi.init_container }}\n\n{{- $serviceAccountName := \"ceilometer-ipmi\" }}\n{{ tuple $envAll \"ipmi\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.ceilometer_ipmi }}\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: ceilometer-ipmi\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"ceilometer\" \"ipmi\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"ceilometer\" \"ipmi\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"ipmi\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceilometer\" \"ipmi\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"ceilometer_ipmi\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"ceilometer\" \"ipmi\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      hostNetwork: true\n      hostPID: true\n      dnsPolicy: ClusterFirstWithHostNet\n      nodeSelector:\n        {{ .Values.labels.ipmi.node_selector_key }}: {{ .Values.labels.ipmi.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.ceilometer.enabled }}\n{{ tuple $envAll \"ceilometer\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"ipmi\" $mounts_ceilometer_ipmi_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: ceilometer-ipmi\n{{ tuple $envAll \"ceilometer_ipmi\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.ipmi | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          securityContext:\n            privileged: true\n          command:\n            - /tmp/ceilometer-ipmi.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.ceilometer.oslo_concurrency.lock_path }}\n            - name: pod-etc-ceilometer\n              mountPath: /etc/ceilometer\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/ceilometer.conf\n              subPath: ceilometer.conf\n              readOnly: true\n            - name: ceilometer-etc-snippets\n              mountPath: /etc/ceilometer/ceilometer.conf.d/\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/api_paste.ini\n              subPath: api_paste.ini\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/event_definitions.yaml\n              subPath: event_definitions.yaml\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/event_pipeline.yaml\n              subPath: event_pipeline.yaml\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/pipeline.yaml\n              subPath: pipeline.yaml\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/gnocchi_resources.yaml\n              subPath: gnocchi_resources.yaml\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/polling.yaml\n              subPath: polling.yaml\n              readOnly: true\n            - name: ceilometer-bin\n              mountPath: /tmp/ceilometer-ipmi.sh\n              subPath: ceilometer-ipmi.sh\n              readOnly: true\n            - name: ipmi-device\n              mountPath: {{ .Values.ipmi_device }}\n              readOnly: true\n{{ if $mounts_ceilometer_ipmi.volumeMounts }}{{ toYaml $mounts_ceilometer_ipmi.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-ceilometer\n          emptyDir: {}\n        - name: ceilometer-etc\n          secret:\n            secretName: ceilometer-etc\n            defaultMode: 0444\n        - name: ceilometer-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: ceilometer-bin\n          configMap:\n            name: ceilometer-bin\n            defaultMode: 0555\n        - name: ipmi-device\n          hostPath:\n            path: {{ .Values.ipmi_device }}\n{{ if $mounts_ceilometer_ipmi.volumes }}{{ toYaml $mounts_ceilometer_ipmi.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "ceilometer/templates/deployment-central.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_central }}\n{{- $envAll := . }}\n\n{{- $mounts_ceilometer_central := .Values.pod.mounts.ceilometer_central.ceilometer_central }}\n{{- $mounts_ceilometer_central_init := .Values.pod.mounts.ceilometer_central.init_container }}\n\n{{- $serviceAccountName := \"ceilometer-central\" }}\n{{ tuple $envAll \"central\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.ceilometer_central }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: ceilometer-central\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"ceilometer\" \"central\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.central }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"ceilometer\" \"central\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceilometer\" \"central\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"ceilometer_central\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"ceilometer\" \"central\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.central.node_selector_key }}: {{ .Values.labels.central.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.ceilometer.enabled }}\n{{ tuple $envAll \"ceilometer\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"central\" $mounts_ceilometer_central_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: ceilometer-central\n{{ tuple $envAll \"ceilometer_central\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.central | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/ceilometer-central.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.ceilometer.oslo_concurrency.lock_path }}\n            - name: pod-etc-ceilometer\n              mountPath: /etc/ceilometer\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/ceilometer.conf\n              subPath: ceilometer.conf\n              readOnly: true\n            - name: ceilometer-etc-snippets\n              mountPath: /etc/ceilometer/ceilometer.conf.d/\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/api_paste.ini\n              subPath: api_paste.ini\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/event_definitions.yaml\n              subPath: event_definitions.yaml\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/event_pipeline.yaml\n              subPath: event_pipeline.yaml\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/pipeline.yaml\n              subPath: pipeline.yaml\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/gnocchi_resources.yaml\n              subPath: gnocchi_resources.yaml\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/polling.yaml\n              subPath: polling.yaml\n              readOnly: true\n            - name: ceilometer-bin\n              mountPath: /tmp/ceilometer-central.sh\n              subPath: ceilometer-central.sh\n              readOnly: true\n{{ if $mounts_ceilometer_central.volumeMounts }}{{ toYaml $mounts_ceilometer_central.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-ceilometer\n          emptyDir: {}\n        - name: ceilometer-etc\n          secret:\n            secretName: ceilometer-etc\n            defaultMode: 0444\n        - name: ceilometer-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: ceilometer-bin\n          configMap:\n            name: ceilometer-bin\n            defaultMode: 0555\n{{ if $mounts_ceilometer_central.volumes }}{{ toYaml $mounts_ceilometer_central.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "ceilometer/templates/deployment-notification.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_notification }}\n{{- $envAll := . }}\n\n{{- $mounts_ceilometer_notification := .Values.pod.mounts.ceilometer_notification.ceilometer_notification }}\n{{- $mounts_ceilometer_notification_init := .Values.pod.mounts.ceilometer_notification.init_container }}\n\n{{- $serviceAccountName := \"ceilometer-notification\" }}\n{{ tuple $envAll \"notification\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.ceilometer_notification }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: ceilometer-notification\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"ceilometer\" \"notification\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.notification }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"ceilometer\" \"notification\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceilometer\" \"notification\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"ceilometer_notification\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"ceilometer\" \"notification\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.notification.node_selector_key }}: {{ .Values.labels.notification.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.ceilometer.enabled }}\n{{ tuple $envAll \"ceilometer\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"notification\" $mounts_ceilometer_notification_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: ceilometer-notification\n{{ tuple $envAll \"ceilometer_notification\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.notification | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/ceilometer-notification.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.ceilometer.oslo_concurrency.lock_path }}\n            - name: pod-etc-ceilometer\n              mountPath: /etc/ceilometer\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/ceilometer.conf\n              subPath: ceilometer.conf\n              readOnly: true\n            - name: ceilometer-etc-snippets\n              mountPath: /etc/ceilometer/ceilometer.conf.d/\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/api_paste.ini\n              subPath: api_paste.ini\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/event_definitions.yaml\n              subPath: event_definitions.yaml\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/event_pipeline.yaml\n              subPath: event_pipeline.yaml\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/pipeline.yaml\n              subPath: pipeline.yaml\n              readOnly: true\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/gnocchi_resources.yaml\n              subPath: gnocchi_resources.yaml\n              readOnly: true\n            - name: etc-ceilometer-meters\n              mountPath: /etc/ceilometer/meters.d\n            - name: ceilometer-etc\n              mountPath: /etc/ceilometer/meters.d/meters.yaml\n              subPath: meters.yaml\n              readOnly: true\n            - name: ceilometer-bin\n              mountPath: /tmp/ceilometer-notification.sh\n              subPath: ceilometer-notification.sh\n              readOnly: true\n{{ if $mounts_ceilometer_notification.volumeMounts }}{{ toYaml $mounts_ceilometer_notification.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-ceilometer\n          emptyDir: {}\n        - name: etc-ceilometer-meters\n          emptyDir: {}\n        - name: ceilometer-etc\n          secret:\n            secretName: ceilometer-etc\n            defaultMode: 0444\n        - name: ceilometer-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: ceilometer-bin\n          configMap:\n            name: ceilometer-bin\n            defaultMode: 0555\n{{ if $mounts_ceilometer_notification.volumes }}{{ toYaml $mounts_ceilometer_notification.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "ceilometer/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "ceilometer/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $bootstrapJob := dict \"envAll\" . \"serviceName\" \"ceilometer\" \"keystoneUser\" .Values.bootstrap.ks_user -}}\n{{- if .Values.pod.tolerations.ceilometer.enabled -}}\n{{- $_ := set $bootstrapJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $bootstrapJob | include \"helm-toolkit.manifests.job_bootstrap\" }}\n{{- end }}\n"
  },
  {
    "path": "ceilometer/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"ceilometer\" \"podVolMounts\" .Values.pod.mounts.ceilometer_db_sync.ceilometer_db_sync.volumeMounts \"podVols\" .Values.pod.mounts.ceilometer_db_sync.ceilometer_db_sync.volumes -}}\n{{- if .Values.pod.tolerations.ceilometer.enabled -}}\n{{- $_ := set $dbSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "ceilometer/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"ceilometer\" -}}\n{{- if .Values.pod.tolerations.ceilometer.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "ceilometer/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"ceilometer\" -}}\n{{- if .Values.pod.tolerations.ceilometer.enabled -}}\n{{- $_ := set $ksUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "ceilometer/templates/job-rabbit-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rmqUserJob := dict \"envAll\" . \"serviceName\" \"ceilometer\" -}}\n{{- if .Values.pod.tolerations.ceilometer.enabled -}}\n{{- $_ := set $rmqUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $rmqUserJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "ceilometer/templates/network_policy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"ceilometer\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "ceilometer/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"ceilometer\" \"test\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "ceilometer/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"ceilometer\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_messaging\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass \"http\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "ceilometer/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "ceilometer/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for ceilometer.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nrelease_group: null\n\nlabels:\n  compute:\n    node_selector_key: openstack-compute-node\n    node_selector_value: enabled\n  central:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  ipmi:\n    node_selector_key: openstack-node\n    node_selector_value: enabled\n  notification:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  tags:\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ceilometer_central: quay.io/airshipit/ceilometer:2025.1-ubuntu_noble\n    ceilometer_compute: quay.io/airshipit/ceilometer:2025.1-ubuntu_noble\n    ceilometer_ipmi: quay.io/airshipit/ceilometer:2025.1-ubuntu_noble\n    ceilometer_notification: quay.io/airshipit/ceilometer:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_noble\n    image_repo_sync: docker.io/docker:17.07.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nipmi_device: /dev/ipmi0\n\nconf:\n  ceilometer:\n    DEFAULT:\n      transport_url: null\n    service_credentials:\n      auth_type: password\n      interface: internal\n    notification:\n      messaging_urls:\n        type: multistring\n        values:\n          - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/ceilometer\n          - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/cinder\n          - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/glance\n          - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/nova\n          - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/keystone\n          - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/neutron\n          - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/heat\n    oslo_messaging_notifications:\n      driver: messagingv2\n      topics:\n        - notifications\n        - profiler\n    oslo_concurrency:\n      lock_path: /var/lock\n    cache:\n      enabled: true\n      backend: dogpile.cache.memcached\n      expiration_time: 86400\n  event_definitions:\n    - event_type: 'compute.instance.*'\n      traits: &instance_traits\n        tenant_id:\n          fields: payload.tenant_id\n        user_id:\n          fields: payload.user_id\n        instance_id:\n          fields: payload.instance_id\n        resource_id:\n          fields: payload.instance_id\n        host:\n          fields: publisher_id.`split(., 1, 1)`\n        service:\n          fields: publisher_id.`split(., 0, -1)`\n        memory_mb:\n          type: int\n          fields: payload.memory_mb\n        disk_gb:\n          type: int\n          fields: payload.disk_gb\n        root_gb:\n          type: int\n          fields: payload.root_gb\n        ephemeral_gb:\n          type: int\n          fields: payload.ephemeral_gb\n        vcpus:\n          type: int\n          fields: payload.vcpus\n        instance_type_id:\n          type: int\n          fields: payload.instance_type_id\n        instance_type:\n          fields: payload.instance_type\n        state:\n          fields: payload.state\n        os_architecture:\n          fields: payload.image_meta.'org.openstack__1__architecture'\n        os_version:\n          fields: payload.image_meta.'org.openstack__1__os_version'\n        os_distro:\n          fields: payload.image_meta.'org.openstack__1__os_distro'\n        launched_at:\n          type: datetime\n          fields: payload.launched_at\n        deleted_at:\n          type: datetime\n          fields: payload.deleted_at\n    - event_type: compute.instance.update\n      traits:\n        <<: *instance_traits\n        old_state:\n          fields: payload.old_state\n    - event_type: compute.instance.exists\n      traits:\n        <<: *instance_traits\n        audit_period_beginning:\n          type: datetime\n          fields: payload.audit_period_beginning\n        audit_period_ending:\n          type: datetime\n          fields: payload.audit_period_ending\n    - event_type: ['volume.exists', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*', 'snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*']\n      traits: &cinder_traits\n        user_id:\n          fields: payload.user_id\n        project_id:\n          fields: payload.tenant_id\n        availability_zone:\n          fields: payload.availability_zone\n        display_name:\n          fields: payload.display_name\n        replication_status:\n          fields: payload.replication_status\n        status:\n          fields: payload.status\n        created_at:\n          fields: payload.created_at\n    - event_type: ['volume.exists', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*']\n      traits:\n        <<: *cinder_traits\n        resource_id:\n          fields: payload.volume_id\n        host:\n          fields: payload.host\n        size:\n          fields: payload.size\n        type:\n          fields: payload.volume_type\n        replication_status:\n          fields: payload.replication_status\n    - event_type: ['snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*']\n      traits:\n        <<: *cinder_traits\n        resource_id:\n          fields: payload.snapshot_id\n        volume_id:\n          fields: payload.volume_id\n    - event_type: ['image_volume_cache.*']\n      traits:\n        image_id:\n          fields: payload.image_id\n        host:\n          fields: payload.host\n    - event_type: ['image.create', 'image.update', 'image.upload', 'image.delete']\n      traits: &glance_crud\n        project_id:\n          fields: payload.owner\n        resource_id:\n          fields: payload.id\n        name:\n          fields: payload.name\n        status:\n          fields: payload.status\n        created_at:\n          fields: payload.created_at\n        user_id:\n          fields: payload.owner\n        deleted_at:\n          fields: payload.deleted_at\n        size:\n          fields: payload.size\n    - event_type: image.send\n      traits: &glance_send\n        receiver_project:\n          fields: payload.receiver_tenant_id\n        receiver_user:\n          fields: payload.receiver_user_id\n        user_id:\n          fields: payload.owner_id\n        image_id:\n          fields: payload.image_id\n        destination_ip:\n          fields: payload.destination_ip\n        bytes_sent:\n          type: int\n          fields: payload.bytes_sent\n    - event_type: orchestration.stack.*\n      traits: &orchestration_crud\n        project_id:\n          fields: payload.tenant_id\n        user_id:\n          fields: ['_context_trustor_user_id', '_context_user_id']\n        resource_id:\n          fields: payload.stack_identity\n    - event_type: sahara.cluster.*\n      traits: &sahara_crud\n        project_id:\n          fields: payload.project_id\n        user_id:\n          fields: _context_user_id\n        resource_id:\n          fields: payload.cluster_id\n    - event_type: sahara.cluster.health\n      traits: &sahara_health\n        <<: *sahara_crud\n        verification_id:\n          fields: payload.verification_id\n        health_check_status:\n          fields: payload.health_check_status\n        health_check_name:\n          fields: payload.health_check_name\n        health_check_description:\n          fields: payload.health_check_description\n        created_at:\n          type: datetime\n          fields: payload.created_at\n        updated_at:\n          type: datetime\n          fields: payload.updated_at\n    - event_type: ['identity.user.*', 'identity.project.*', 'identity.group.*', 'identity.role.*', 'identity.OS-TRUST:trust.*',\n                   'identity.region.*', 'identity.service.*', 'identity.endpoint.*', 'identity.policy.*']\n      traits: &identity_crud\n        resource_id:\n          fields: payload.resource_info\n        initiator_id:\n          fields: payload.initiator.id\n        project_id:\n          fields: payload.initiator.project_id\n        domain_id:\n          fields: payload.initiator.domain_id\n    - event_type: identity.role_assignment.*\n      traits: &identity_role_assignment\n        role:\n          fields: payload.role\n        group:\n          fields: payload.group\n        domain:\n          fields: payload.domain\n        user:\n          fields: payload.user\n        project:\n          fields: payload.project\n    - event_type: identity.authenticate\n      traits: &identity_authenticate\n        typeURI:\n          fields: payload.typeURI\n        id:\n          fields: payload.id\n        action:\n          fields: payload.action\n        eventType:\n          fields: payload.eventType\n        eventTime:\n          fields: payload.eventTime\n        outcome:\n          fields: payload.outcome\n        initiator_typeURI:\n          fields: payload.initiator.typeURI\n        initiator_id:\n          fields: payload.initiator.id\n        initiator_name:\n          fields: payload.initiator.name\n        initiator_host_agent:\n          fields: payload.initiator.host.agent\n        initiator_host_addr:\n          fields: payload.initiator.host.address\n        target_typeURI:\n          fields: payload.target.typeURI\n        target_id:\n          fields: payload.target.id\n        observer_typeURI:\n          fields: payload.observer.typeURI\n        observer_id:\n          fields: payload.observer.id\n    - event_type: objectstore.http.request\n      traits: &objectstore_request\n        typeURI:\n          fields: payload.typeURI\n        id:\n          fields: payload.id\n        action:\n          fields: payload.action\n        eventType:\n          fields: payload.eventType\n        eventTime:\n          fields: payload.eventTime\n        outcome:\n          fields: payload.outcome\n        initiator_typeURI:\n          fields: payload.initiator.typeURI\n        initiator_id:\n          fields: payload.initiator.id\n        initiator_project_id:\n          fields: payload.initiator.project_id\n        target_typeURI:\n          fields: payload.target.typeURI\n        target_id:\n          fields: payload.target.id\n        target_action:\n          fields: payload.target.action\n        target_metadata_path:\n          fields: payload.target.metadata.path\n        target_metadata_version:\n          fields: payload.target.metadata.version\n        target_metadata_container:\n          fields: payload.target.metadata.container\n        target_metadata_object:\n          fields: payload.target.metadata.object\n        observer_id:\n          fields: payload.observer.id\n    - event_type: ['network.*', 'subnet.*', 'port.*', 'router.*', 'floatingip.*', 'pool.*', 'vip.*', 'member.*', 'health_monitor.*', 'healthmonitor.*', 'listener.*', 'loadbalancer.*', 'firewall.*', 'firewall_policy.*', 'firewall_rule.*', 'vpnservice.*', 'ipsecpolicy.*', 'ikepolicy.*', 'ipsec_site_connection.*']\n      traits: &network_traits\n        user_id:\n          fields: _context_user_id\n        project_id:\n          fields: _context_tenant_id\n    - event_type: network.*\n      traits:\n        <<: *network_traits\n        resource_id:\n          fields: ['payload.network.id', 'payload.id']\n    - event_type: subnet.*\n      traits:\n        <<: *network_traits\n        resource_id:\n          fields: ['payload.subnet.id', 'payload.id']\n    - event_type: port.*\n      traits:\n        <<: *network_traits\n        resource_id:\n          fields: ['payload.port.id', 'payload.id']\n    - event_type: router.*\n      traits:\n        <<: *network_traits\n        resource_id:\n          fields: ['payload.router.id', 'payload.id']\n    - event_type: floatingip.*\n      traits:\n        <<: *network_traits\n        resource_id:\n          fields: ['payload.floatingip.id', 'payload.id']\n    - event_type: pool.*\n      traits:\n        <<: *network_traits\n        resource_id:\n          fields: ['payload.pool.id', 'payload.id']\n    - event_type: vip.*\n      traits:\n        <<: *network_traits\n        resource_id:\n          fields: ['payload.vip.id', 'payload.id']\n    - event_type: member.*\n      traits:\n        <<: *network_traits\n        resource_id:\n          fields: ['payload.member.id', 'payload.id']\n    - event_type: health_monitor.*\n      traits:\n        <<: *network_traits\n        resource_id:\n          fields: ['payload.health_monitor.id', 'payload.id']\n    - event_type: healthmonitor.*\n      traits:\n        <<: *network_traits\n        resource_id:\n          fields: ['payload.healthmonitor.id', 'payload.id']\n    - event_type: listener.*\n      traits:\n        <<: *network_traits\n        resource_id:\n          fields: ['payload.listener.id', 'payload.id']\n    - event_type: loadbalancer.*\n      traits:\n        <<: *network_traits\n        resource_id:\n          fields: ['payload.loadbalancer.id', 'payload.id']\n    - event_type: firewall.*\n      traits:\n        <<: *network_traits\n        resource_id:\n          fields: ['payload.firewall.id', 'payload.id']\n    - event_type: firewall_policy.*\n      traits:\n        <<: *network_traits\n        resource_id:\n          fields: ['payload.firewall_policy.id', 'payload.id']\n    - event_type: firewall_rule.*\n      traits:\n        <<: *network_traits\n        resource_id:\n          fields: ['payload.firewall_rule.id', 'payload.id']\n    - event_type: vpnservice.*\n      traits:\n        <<: *network_traits\n        resource_id:\n          fields: ['payload.vpnservice.id', 'payload.id']\n    - event_type: ipsecpolicy.*\n      traits:\n        <<: *network_traits\n        resource_id:\n          fields: ['payload.ipsecpolicy.id', 'payload.id']\n    - event_type: ikepolicy.*\n      traits:\n        <<: *network_traits\n        resource_id:\n          fields: ['payload.ikepolicy.id', 'payload.id']\n    - event_type: ipsec_site_connection.*\n      traits:\n        <<: *network_traits\n        resource_id:\n          fields: ['payload.ipsec_site_connection.id', 'payload.id']\n    - event_type: '*http.*'\n      traits: &http_audit\n        project_id:\n          fields: payload.initiator.project_id\n        user_id:\n          fields: payload.initiator.id\n        typeURI:\n          fields: payload.typeURI\n        eventType:\n          fields: payload.eventType\n        action:\n          fields: payload.action\n        outcome:\n          fields: payload.outcome\n        id:\n          fields: payload.id\n        eventTime:\n          fields: payload.eventTime\n        requestPath:\n          fields: payload.requestPath\n        observer_id:\n          fields: payload.observer.id\n        target_id:\n          fields: payload.target.id\n        target_typeURI:\n          fields: payload.target.typeURI\n        target_name:\n          fields: payload.target.name\n        initiator_typeURI:\n          fields: payload.initiator.typeURI\n        initiator_id:\n          fields: payload.initiator.id\n        initiator_name:\n          fields: payload.initiator.name\n        initiator_host_address:\n          fields: payload.initiator.host.address\n    - event_type: '*http.response'\n      traits:\n        <<: *http_audit\n        reason_code:\n          fields: payload.reason.reasonCode\n    - event_type: ['dns.domain.create', 'dns.domain.update', 'dns.domain.delete']\n      traits: &dns_domain_traits\n        status:\n          fields: payload.status\n        retry:\n          fields: payload.retry\n        description:\n          fields: payload.description\n        expire:\n          fields: payload.expire\n        email:\n          fields: payload.email\n        ttl:\n          fields: payload.ttl\n        action:\n          fields: payload.action\n        name:\n          fields: payload.name\n        resource_id:\n          fields: payload.id\n        created_at:\n          fields: payload.created_at\n        updated_at:\n          fields: payload.updated_at\n        version:\n          fields: payload.version\n        parent_domain_id:\n          fields: parent_domain_id\n        serial:\n          fields: payload.serial\n    - event_type: dns.domain.exists\n      traits:\n        <<: *dns_domain_traits\n        audit_period_beginning:\n          type: datetime\n          fields: payload.audit_period_beginning\n        audit_period_ending:\n          type: datetime\n          fields: payload.audit_period_ending\n    - event_type: trove.*\n      traits: &trove_base_traits\n        instance_type:\n          fields: payload.instance_type\n        user_id:\n          fields: payload.user_id\n        resource_id:\n          fields: payload.instance_id\n        instance_type_id:\n          fields: payload.instance_type_id\n        launched_at:\n          type: datetime\n          fields: payload.launched_at\n        instance_name:\n          fields: payload.instance_name\n        state:\n          fields: payload.state\n        nova_instance_id:\n          fields: payload.nova_instance_id\n        service_id:\n          fields: payload.service_id\n        created_at:\n          type: datetime\n          fields: payload.created_at\n        region:\n          fields: payload.region\n    - event_type: ['trove.instance.create', 'trove.instance.modify_volume', 'trove.instance.modify_flavor', 'trove.instance.delete']\n      traits: &trove_common_traits\n        name:\n          fields: payload.name\n        availability_zone:\n          fields: payload.availability_zone\n        instance_size:\n          type: int\n          fields: payload.instance_size\n        volume_size:\n          type: int\n          fields: payload.volume_size\n        nova_volume_id:\n          fields: payload.nova_volume_id\n    - event_type: trove.instance.create\n      traits:\n        <<: [*trove_base_traits, *trove_common_traits]\n    - event_type: trove.instance.modify_volume\n      traits:\n        <<: [*trove_base_traits, *trove_common_traits]\n        old_volume_size:\n          type: int\n          fields: payload.old_volume_size\n        modify_at:\n          type: datetime\n          fields: payload.modify_at\n    - event_type: trove.instance.modify_flavor\n      traits:\n        <<: [*trove_base_traits, *trove_common_traits]\n        old_instance_size:\n          type: int\n          fields: payload.old_instance_size\n        modify_at:\n          type: datetime\n          fields: payload.modify_at\n    - event_type: trove.instance.delete\n      traits:\n        <<: [*trove_base_traits, *trove_common_traits]\n        deleted_at:\n          type: datetime\n          fields: payload.deleted_at\n    - event_type: trove.instance.exists\n      traits:\n        <<: *trove_base_traits\n        display_name:\n          fields: payload.display_name\n        audit_period_beginning:\n          type: datetime\n          fields: payload.audit_period_beginning\n        audit_period_ending:\n          type: datetime\n          fields: payload.audit_period_ending\n    - event_type: profiler.*\n      traits:\n        project:\n          fields: payload.project\n        service:\n          fields: payload.service\n        name:\n          fields: payload.name\n        base_id:\n          fields: payload.base_id\n        trace_id:\n          fields: payload.trace_id\n        parent_id:\n          fields: payload.parent_id\n        timestamp:\n          fields: payload.timestamp\n        host:\n          fields: payload.info.host\n        path:\n          fields: payload.info.request.path\n        query:\n          fields: payload.info.request.query\n        method:\n          fields: payload.info.request.method\n        scheme:\n          fields: payload.info.request.scheme\n        db.statement:\n          fields: payload.info.db.statement\n        db.params:\n          fields: payload.info.db.params\n    - event_type: 'magnum.bay.*'\n      traits: &magnum_bay_crud\n        id:\n          fields: payload.id\n        typeURI:\n          fields: payload.typeURI\n        eventType:\n          fields: payload.eventType\n        eventTime:\n          fields: payload.eventTime\n        action:\n          fields: payload.action\n        outcome:\n          fields: payload.outcome\n        initiator_id:\n          fields: payload.initiator.id\n        initiator_typeURI:\n          fields: payload.initiator.typeURI\n        initiator_name:\n          fields: payload.initiator.name\n        initiator_host_agent:\n          fields: payload.initiator.host.agent\n        initiator_host_address:\n          fields: payload.initiator.host.address\n        target_id:\n          fields: payload.target.id\n        target_typeURI:\n          fields: payload.target.typeURI\n        observer_id:\n          fields: payload.observer.id\n        observer_typeURI:\n          fields: payload.observer.typeURI\n  event_pipeline:\n    sinks:\n      - name: event_sink\n        publishers:\n          - notifier://\n          - gnocchi://\n    sources:\n      - events:\n          - '*'\n        name: event_source\n        sinks:\n          - event_sink\n  gnocchi_resources:\n    archive_policy_default: ceilometer-low\n    archive_policies:\n      - name: ceilometer-low\n        aggregation_methods:\n          - mean\n        back_window: 0\n        definition:\n          - granularity: 5 minutes\n            timespan: 30 days\n    resources:\n      - metrics:\n          - identity.authenticate.success\n          - identity.authenticate.pending\n          - identity.authenticate.failure\n          - identity.user.created\n          - identity.user.deleted\n          - identity.user.updated\n          - identity.group.created\n          - identity.group.deleted\n          - identity.group.updated\n          - identity.role.created\n          - identity.role.deleted\n          - identity.role.updated\n          - identity.project.created\n          - identity.project.deleted\n          - identity.project.updated\n          - identity.trust.created\n          - identity.trust.deleted\n          - identity.role_assignment.created\n          - identity.role_assignment.deleted\n        resource_type: identity\n      - metrics:\n          - radosgw.objects\n          - radosgw.objects.size\n          - radosgw.objects.containers\n          - radosgw.api.request\n          - radosgw.containers.objects\n          - radosgw.containers.objects.size\n        resource_type: ceph_account\n      - attributes:\n          display_name: resource_metadata.display_name\n          flavor_id: resource_metadata.(instance_flavor_id|(flavor.id))\n          host: resource_metadata.(instance_host|host)\n          image_ref: resource_metadata.image_ref\n          server_group: resource_metadata.user_metadata.server_group\n        event_associated_resources:\n          instance_disk: '{\"=\": {\"instance_id\": \"%s\"}}'\n          instance_network_interface: '{\"=\": {\"instance_id\": \"%s\"}}'\n        event_attributes:\n          id: instance_id\n        event_delete: compute.instance.delete.start\n        metrics:\n          - memory\n          - memory.usage\n          - memory.resident\n          - vcpus\n          - cpu\n          - cpu.delta\n          - cpu_util\n          - disk.root.size\n          - disk.ephemeral.size\n          - disk.read.requests\n          - disk.read.requests.rate\n          - disk.write.requests\n          - disk.write.requests.rate\n          - disk.read.bytes\n          - disk.read.bytes.rate\n          - disk.write.bytes\n          - disk.write.bytes.rate\n          - disk.latency\n          - disk.iops\n          - disk.capacity\n          - disk.allocation\n          - disk.usage\n          - compute.instance.booting.time\n          - perf.cpu.cycles\n          - perf.instructions\n          - perf.cache.references\n          - perf.cache.misses\n        resource_type: instance\n      - attributes:\n          instance_id: resource_metadata.instance_id\n          name: resource_metadata.vnic_name\n        metrics:\n          - network.outgoing.packets.rate\n          - network.incoming.packets.rate\n          - network.outgoing.packets\n          - network.outgoing.packets.drop\n          - network.incoming.packets.drop\n          - network.outgoing.packets.error\n          - network.incoming.packets.error\n          - network.outgoing.bytes.rate\n          - network.incoming.bytes.rate\n          - network.outgoing.bytes\n          - network.incoming.bytes\n        resource_type: instance_network_interface\n      - attributes:\n          instance_id: resource_metadata.instance_id\n          name: resource_metadata.disk_name\n        metrics:\n          - disk.device.read.requests\n          - disk.device.read.requests.rate\n          - disk.device.write.requests\n          - disk.device.write.requests.rate\n          - disk.device.read.bytes\n          - disk.device.read.bytes.rate\n          - disk.device.write.bytes\n          - disk.device.write.bytes.rate\n          - disk.device.latency\n          - disk.device.iops\n          - disk.device.capacity\n          - disk.device.allocation\n          - disk.device.usage\n        resource_type: instance_disk\n      - attributes:\n          container_format: resource_metadata.container_format\n          disk_format: resource_metadata.disk_format\n          name: resource_metadata.name\n        event_attributes:\n          id: resource_id\n        event_delete: image.delete\n        metrics:\n          - image.size\n          - image.download\n          - image.serve\n        resource_type: image\n      - metrics:\n          - hardware.ipmi.node.power\n          - hardware.ipmi.node.temperature\n          - hardware.ipmi.node.inlet_temperature\n          - hardware.ipmi.node.outlet_temperature\n          - hardware.ipmi.node.fan\n          - hardware.ipmi.node.current\n          - hardware.ipmi.node.voltage\n          - hardware.ipmi.node.airflow\n          - hardware.ipmi.node.cups\n          - hardware.ipmi.node.cpu_util\n          - hardware.ipmi.node.mem_util\n          - hardware.ipmi.node.io_util\n        resource_type: ipmi\n      - event_delete: floatingip.delete.end\n        event_attributes:\n          id: resource_id\n        metrics:\n          - bandwidth\n          - network\n          - network.create\n          - network.update\n          - subnet\n          - subnet.create\n          - subnet.update\n          - port\n          - port.create\n          - port.update\n          - router\n          - router.create\n          - router.update\n          - ip.floating\n          - ip.floating.create\n          - ip.floating.update\n        resource_type: network\n      - metrics:\n          - stack.create\n          - stack.update\n          - stack.delete\n          - stack.resume\n          - stack.suspend\n        resource_type: stack\n      - metrics:\n          - storage.objects.incoming.bytes\n          - storage.objects.outgoing.bytes\n          - storage.api.request\n          - storage.objects.size\n          - storage.objects\n          - storage.objects.containers\n          - storage.containers.objects\n          - storage.containers.objects.size\n        resource_type: swift_account\n      - attributes:\n          display_name: resource_metadata.display_name\n          volume_type: resource_metadata.volume_type\n        event_delete: volume.delete.start\n        event_attributes:\n          id: resource_id\n        metrics:\n          - volume\n          - volume.size\n          - snapshot.size\n          - volume.snapshot.size\n          - volume.backup.size\n        resource_type: volume\n      - attributes:\n          host_name: resource_metadata.resource_url\n        metrics:\n          - hardware.cpu.load.1min\n          - hardware.cpu.load.5min\n          - hardware.cpu.load.15min\n          - hardware.cpu.util\n          - hardware.memory.total\n          - hardware.memory.used\n          - hardware.memory.swap.total\n          - hardware.memory.swap.avail\n          - hardware.memory.buffer\n          - hardware.memory.cached\n          - hardware.network.ip.outgoing.datagrams\n          - hardware.network.ip.incoming.datagrams\n          - hardware.system_stats.cpu.idle\n          - hardware.system_stats.io.outgoing.blocks\n          - hardware.system_stats.io.incoming.blocks\n        resource_type: host\n      - attributes:\n          device_name: resource_metadata.device\n          host_name: resource_metadata.resource_url\n        metrics:\n          - hardware.disk.size.total\n          - hardware.disk.size.used\n        resource_type: host_disk\n      - attributes:\n          device_name: resource_metadata.name\n          host_name: resource_metadata.resource_url\n        metrics:\n          - hardware.network.incoming.bytes\n          - hardware.network.outgoing.bytes\n          - hardware.network.outgoing.errors\n        resource_type: host_network_interface\n  meters:\n    metric:\n      - name: \"image.size\"\n        event_type:\n          - \"image.upload\"\n          - \"image.delete\"\n          - \"image.update\"\n        type: \"gauge\"\n        unit: B\n        volume: $.payload.size\n        resource_id: $.payload.id\n        project_id: $.payload.owner\n      - name: \"image.download\"\n        event_type: \"image.send\"\n        type: \"delta\"\n        unit: \"B\"\n        volume: $.payload.bytes_sent\n        resource_id: $.payload.image_id\n        user_id: $.payload.receiver_user_id\n        project_id: $.payload.receiver_tenant_id\n      - name: \"image.serve\"\n        event_type: \"image.send\"\n        type: \"delta\"\n        unit: \"B\"\n        volume: $.payload.bytes_sent\n        resource_id: $.payload.image_id\n        project_id: $.payload.owner_id\n      - name: 'volume.size'\n        event_type:\n          - 'volume.exists'\n          - 'volume.create.*'\n          - 'volume.delete.*'\n          - 'volume.resize.*'\n          - 'volume.attach.*'\n          - 'volume.detach.*'\n          - 'volume.update.*'\n        type: 'gauge'\n        unit: 'GB'\n        volume: $.payload.size\n        user_id: $.payload.user_id\n        project_id: $.payload.tenant_id\n        resource_id: $.payload.volume_id\n        metadata:\n          display_name: $.payload.display_name\n          volume_type: $.payload.volume_type\n      - name: 'snapshot.size'\n        event_type:\n          - 'snapshot.exists'\n          - 'snapshot.create.*'\n          - 'snapshot.delete.*'\n        type: 'gauge'\n        unit: 'GB'\n        volume: $.payload.volume_size\n        user_id: $.payload.user_id\n        project_id: $.payload.tenant_id\n        resource_id: $.payload.snapshot_id\n        metadata:\n          display_name: $.payload.display_name\n      - name: 'backup.size'\n        event_type:\n          - 'backup.exists'\n          - 'backup.create.*'\n          - 'backup.delete.*'\n          - 'backup.restore.*'\n        type: 'gauge'\n        unit: 'GB'\n        volume: $.payload.size\n        user_id: $.payload.user_id\n        project_id: $.payload.tenant_id\n        resource_id: $.payload.backup_id\n        metadata:\n          display_name: $.payload.display_name\n      - name: $.payload.metrics.[*].name\n        event_type: 'magnum.bay.metrics.*'\n        type: 'gauge'\n        unit: $.payload.metrics.[*].unit\n        volume: $.payload.metrics.[*].value\n        user_id: $.payload.user_id\n        project_id: $.payload.project_id\n        resource_id: $.payload.resource_id\n        lookup: ['name', 'unit', 'volume']\n      - name: $.payload.measurements.[*].metric.[*].name\n        event_type: 'objectstore.http.request'\n        type: 'delta'\n        unit: $.payload.measurements.[*].metric.[*].unit\n        volume: $.payload.measurements.[*].result\n        resource_id: $.payload.target.id\n        user_id: $.payload.initiator.id\n        project_id: $.payload.initiator.project_id\n        lookup: ['name', 'unit', 'volume']\n      - name: 'memory'\n        event_type: 'compute.instance.*'\n        type: 'gauge'\n        unit: 'MB'\n        volume: $.payload.memory_mb\n        user_id: $.payload.user_id\n        project_id: $.payload.tenant_id\n        resource_id: $.payload.instance_id\n        user_metadata: $.payload.metadata\n        metadata: &instance_meta\n          host: $.payload.host\n          flavor_id: $.payload.instance_flavor_id\n          flavor_name: $.payload.instance_type\n          display_name: $.payload.display_name\n          image_ref: $.payload.image_meta.base_image_ref\n      - name: 'vcpus'\n        event_type: 'compute.instance.*'\n        type: 'gauge'\n        unit: 'vcpu'\n        volume: $.payload.vcpus\n        user_id: $.payload.user_id\n        project_id: $.payload.tenant_id\n        resource_id: $.payload.instance_id\n        user_metadata: $.payload.metadata\n        metadata:\n          <<: *instance_meta\n      - name: 'compute.instance.booting.time'\n        event_type: 'compute.instance.create.end'\n        type: 'gauge'\n        unit: 'sec'\n        volume:\n          fields: [$.payload.created_at, $.payload.launched_at]\n          plugin: 'timedelta'\n        project_id: $.payload.tenant_id\n        resource_id: $.payload.instance_id\n        user_metadata: $.payload.metadata\n        metadata:\n          <<: *instance_meta\n      - name: 'disk.root.size'\n        event_type: 'compute.instance.*'\n        type: 'gauge'\n        unit: 'GB'\n        volume: $.payload.root_gb\n        user_id: $.payload.user_id\n        project_id: $.payload.tenant_id\n        resource_id: $.payload.instance_id\n        user_metadata: $.payload.metadata\n        metadata:\n          <<: *instance_meta\n      - name: 'disk.ephemeral.size'\n        event_type: 'compute.instance.*'\n        type: 'gauge'\n        unit: 'GB'\n        volume: $.payload.ephemeral_gb\n        user_id: $.payload.user_id\n        project_id: $.payload.tenant_id\n        resource_id: $.payload.instance_id\n        user_metadata: $.payload.metadata\n        metadata:\n          <<: *instance_meta\n      - name: 'bandwidth'\n        event_type: 'l3.meter'\n        type: 'delta'\n        unit: 'B'\n        volume: $.payload.bytes\n        project_id: $.payload.tenant_id\n        resource_id: $.payload.label_id\n      - name: 'compute.node.cpu.frequency'\n        event_type: 'compute.metrics.update'\n        type: 'gauge'\n        unit: 'MHz'\n        volume: $.payload.metrics[?(@.name='cpu.frequency')].value\n        resource_id: $.payload.host + \"_\" + $.payload.nodename\n        timestamp: $.payload.metrics[?(@.name='cpu.frequency')].timestamp\n        metadata:\n          event_type: $.event_type\n          host: $.publisher_id\n          source: $.payload.metrics[?(@.name='cpu.frequency')].source\n      - name: 'compute.node.cpu.user.time'\n        event_type: 'compute.metrics.update'\n        type: 'cumulative'\n        unit: 'ns'\n        volume: $.payload.metrics[?(@.name='cpu.user.time')].value\n        resource_id: $.payload.host + \"_\" + $.payload.nodename\n        timestamp: $.payload.metrics[?(@.name='cpu.user.time')].timestamp\n        metadata:\n          event_type: $.event_type\n          host: $.publisher_id\n          source: $.payload.metrics[?(@.name='cpu.user.time')].source\n      - name: 'compute.node.cpu.kernel.time'\n        event_type: 'compute.metrics.update'\n        type: 'cumulative'\n        unit: 'ns'\n        volume: $.payload.metrics[?(@.name='cpu.kernel.time')].value\n        resource_id: $.payload.host + \"_\" + $.payload.nodename\n        timestamp: $.payload.metrics[?(@.name='cpu.kernel.time')].timestamp\n        metadata:\n          event_type: $.event_type\n          host: $.publisher_id\n          source: $.payload.metrics[?(@.name='cpu.kernel.time')].source\n      - name: 'compute.node.cpu.idle.time'\n        event_type: 'compute.metrics.update'\n        type: 'cumulative'\n        unit: 'ns'\n        volume: $.payload.metrics[?(@.name='cpu.idle.time')].value\n        resource_id: $.payload.host + \"_\" + $.payload.nodename\n        timestamp: $.payload.metrics[?(@.name='cpu.idle.time')].timestamp\n        metadata:\n          event_type: $.event_type\n          host: $.publisher_id\n          source: $.payload.metrics[?(@.name='cpu.idle.time')].source\n      - name: 'compute.node.cpu.iowait.time'\n        event_type: 'compute.metrics.update'\n        type: 'cumulative'\n        unit: 'ns'\n        volume: $.payload.metrics[?(@.name='cpu.iowait.time')].value\n        resource_id: $.payload.host + \"_\" + $.payload.nodename\n        timestamp: $.payload.metrics[?(@.name='cpu.iowait.time')].timestamp\n        metadata:\n          event_type: $.event_type\n          host: $.publisher_id\n          source: $.payload.metrics[?(@.name='cpu.iowait.time')].source\n      - name: 'compute.node.cpu.kernel.percent'\n        event_type: 'compute.metrics.update'\n        type: 'gauge'\n        unit: 'percent'\n        volume: $.payload.metrics[?(@.name='cpu.kernel.percent')].value * 100\n        resource_id: $.payload.host + \"_\" + $.payload.nodename\n        timestamp: $.payload.metrics[?(@.name='cpu.kernel.percent')].timestamp\n        metadata:\n          event_type: $.event_type\n          host: $.publisher_id\n          source: $.payload.metrics[?(@.name='cpu.kernel.percent')].source\n      - name: 'compute.node.cpu.idle.percent'\n        event_type: 'compute.metrics.update'\n        type: 'gauge'\n        unit: 'percent'\n        volume: $.payload.metrics[?(@.name='cpu.idle.percent')].value * 100\n        resource_id: $.payload.host + \"_\" + $.payload.nodename\n        timestamp: $.payload.metrics[?(@.name='cpu.idle.percent')].timestamp\n        metadata:\n          event_type: $.event_type\n          host: $.publisher_id\n          source: $.payload.metrics[?(@.name='cpu.idle.percent')].source\n      - name: 'compute.node.cpu.user.percent'\n        event_type: 'compute.metrics.update'\n        type: 'gauge'\n        unit: 'percent'\n        volume: $.payload.metrics[?(@.name='cpu.user.percent')].value * 100\n        resource_id: $.payload.host + \"_\" + $.payload.nodename\n        timestamp: $.payload.metrics[?(@.name='cpu.user.percent')].timestamp\n        metadata:\n          event_type: $.event_type\n          host: $.publisher_id\n          source: $.payload.metrics[?(@.name='cpu.user.percent')].source\n      - name: 'compute.node.cpu.iowait.percent'\n        event_type: 'compute.metrics.update'\n        type: 'gauge'\n        unit: 'percent'\n        volume: $.payload.metrics[?(@.name='cpu.iowait.percent')].value * 100\n        resource_id: $.payload.host + \"_\" + $.payload.nodename\n        timestamp: $.payload.metrics[?(@.name='cpu.iowait.percent')].timestamp\n        metadata:\n          event_type: $.event_type\n          host: $.publisher_id\n          source: $.payload.metrics[?(@.name='cpu.iowait.percent')].source\n      - name: 'compute.node.cpu.percent'\n        event_type: 'compute.metrics.update'\n        type: 'gauge'\n        unit: 'percent'\n        volume: $.payload.metrics[?(@.name='cpu.percent')].value * 100\n        resource_id: $.payload.host + \"_\" + $.payload.nodename\n        timestamp: $.payload.metrics[?(@.name='cpu.percent')].timestamp\n        metadata:\n          event_type: $.event_type\n          host: $.publisher_id\n          source: $.payload.metrics[?(@.name='cpu.percent')].source\n      - name: $.payload.outcome - $.payload.outcome + 'identity.authenticate.' + $.payload.outcome\n        type: 'delta'\n        unit: 'user'\n        volume: 1\n        event_type:\n          - 'identity.authenticate'\n        resource_id: $.payload.initiator.id\n        user_id: $.payload.initiator.id\n      - name: 'dns.domain.exists'\n        event_type: 'dns.domain.exists'\n        type: 'cumulative'\n        unit: 's'\n        volume:\n          fields: [$.payload.audit_period_beginning, $.payload.audit_period_ending]\n          plugin: 'timedelta'\n        project_id: $.payload.tenant_id\n        resource_id: $.payload.id\n        user_id: $._context_user\n        metadata:\n          status: $.payload.status\n          pool_id: $.payload.pool_id\n          host: $.publisher_id\n      - name: 'trove.instance.exists'\n        event_type: 'trove.instance.exists'\n        type: 'cumulative'\n        unit: 's'\n        volume:\n          fields: [$.payload.audit_period_beginning, $.payload.audit_period_ending]\n          plugin: 'timedelta'\n        project_id: $.payload.tenant_id\n        resource_id: $.payload.instance_id\n        user_id: $.payload.user_id\n        metadata:\n          nova_instance_id: $.payload.nova_instance_id\n          state: $.payload.state\n          service_id: $.payload.service_id\n          instance_type: $.payload.instance_type\n          instance_type_id: $.payload.instance_type_id\n  polling:\n    sources:\n      - name: all_pollsters\n        interval: 600\n        meters:\n          - \"*\"\n  pipeline:\n    sinks:\n      - name: meter_sink\n        publishers:\n          - notifier://\n          - gnocchi://\n    sources:\n      - meters:\n          - \"*\"\n        name: meter_source\n        sinks:\n          - meter_sink\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - ceilometer-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    api:\n      jobs:\n        - ceilometer-db-sync\n        - ceilometer-rabbit-init\n        - ceilometer-ks-user\n        - ceilometer-ks-endpoints\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: metric\n    central:\n      jobs:\n        - ceilometer-db-sync\n        - ceilometer-rabbit-init\n        - ceilometer-ks-user\n        - ceilometer-ks-endpoints\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: metric\n    ipmi:\n      jobs:\n        - ceilometer-db-sync\n        - ceilometer-rabbit-init\n        - ceilometer-ks-user\n        - ceilometer-ks-endpoints\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: metric\n    compute:\n      jobs:\n        - ceilometer-db-sync\n        - ceilometer-rabbit-init\n        - ceilometer-ks-user\n        - ceilometer-ks-endpoints\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: metric\n    db_sync:\n      services:\n        - endpoint: internal\n          service: metric\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    rabbit_init:\n      services:\n      - service: oslo_messaging\n        endpoint: internal\n    notification:\n      jobs:\n        - ceilometer-db-sync\n        - ceilometer-rabbit-init\n        - ceilometer-ks-user\n        - ceilometer-ks-endpoints\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: metric\n    tests:\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: metering\n        - endpoint: internal\n          service: metric\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\n# Names of secrets used by bootstrap and environmental checks\nsecrets:\n  identity:\n    admin: ceilometer-keystone-admin\n    ceilometer: ceilometer-keystone-user\n    test: ceilometer-keystone-test\n  oslo_messaging:\n    admin: ceilometer-rabbitmq-admin\n    ceilometer: ceilometer-rabbitmq-user\n  oci_image_registry:\n    ceilometer: ceilometer-oci-image-registry\n\nbootstrap:\n  enabled: false\n  ks_user: ceilometer\n  script: |\n    openstack token issue\n\n# typically overridden by environmental\n# values, but should include all endpoints\n# required by this chart\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      ceilometer:\n        username: ceilometer\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      ceilometer:\n        role: admin\n        region_name: RegionOne\n        username: ceilometer\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      test:\n        role: admin\n        region_name: RegionOne\n        username: ceilometer-test\n        password: password\n        project_name: test\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 80\n        internal: 5000\n  metric:\n    name: gnocchi\n    hosts:\n      default: gnocchi-api\n      public: gnocchi\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 8041\n        public: 80\n  alarming:\n    name: aodh\n    hosts:\n      default: aodh-api\n      public: aodh\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 8042\n        public: 80\n  oslo_cache:\n    auth:\n      # NOTE(portdirect): this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  oslo_messaging:\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n      ceilometer:\n        username: ceilometer\n        password: password\n    statefulset:\n      replicas: 2\n      name: rabbitmq-rabbitmq\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /ceilometer\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n\npod:\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    ceilometer:\n      enabled: false\n      tolerations:\n        - key: node-role.kubernetes.io/master\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/control-plane\n          operator: Exists\n          effect: NoSchedule\n  mounts:\n    ceilometer_tests:\n      init_container: null\n      ceilometer_tests:\n        volumeMounts:\n        volumes:\n    ceilometer_compute:\n      init_container: null\n      ceilometer_compute:\n        volumeMounts:\n        volumes:\n    ceilometer_central:\n      init_container: null\n      ceilometer_central:\n        volumeMounts:\n        volumes:\n    ceilometer_ipmi:\n      init_container: null\n      ceilometer_ipmi:\n        volumeMounts:\n        volumes:\n    ceilometer_notification:\n      init_container: null\n      ceilometer_notification:\n        volumeMounts:\n        volumes:\n    ceilometer_db_sync:\n      ceilometer_db_sync:\n        volumeMounts:\n        volumes:\n  # -- This allows users to add Kubernetes Projected Volumes to be mounted at /etc/ceilometer/ceilometer.conf.d/\n  ## This is a list of projected volume source objects for each deployment/statefulset/daemonset/cronjob\n  ## https://kubernetes.io/docs/concepts/storage/projected-volumes/\n  etcSources:\n    ceilometer_compute: []\n    ceilometer_central: []\n    ceilometer_ipmi: []\n    ceilometer_notification: []\n    ceilometer_db_sync: []\n  replicas:\n    central: 1\n    notification: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        compute:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n  resources:\n    enabled: true\n    compute:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    notification:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    central:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    ipmi:\n      requests:\n        memory: \"124Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rabbit_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  deployment_central: true\n  daemonset_compute: true\n  daemonset_ipmi: false\n  deployment_notification: true\n  job_bootstrap: true\n  job_db_sync: true\n  job_image_repo_sync: true\n  job_ks_user: true\n  job_rabbit_init: true\n  secret_keystone: true\n  secret_rabbitmq: true\n  secret_registry: true\n  service_api: true\n  service_ingress_api: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "ceph-adapter-rook/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Ceph Adapter Rook\nname: ceph-adapter-rook\nversion: 2025.2.0\nhome: https://github.com/ceph/ceph\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "ceph-adapter-rook/README.md",
    "content": "# Summary\nThis is the minimal set of templates necessary to make the rest\nof Openstack-Helm charts work with Ceph clusters managed by the\nRook operator. Rook operator not only deploys Ceph clusters but\nalso provides convenience when interfacing with those clusters\nvia CRDs which can be used for managing pools/keys/users etc.\nHowever Openstack-Helm charts do not utilize Rook CRDs but instead\nmanage Ceph assets like pools/keyrings/users/buckets etc. by means\nof running bootstrap scripts. Before using Openstack-Helm charts we\nhave to provision a minimal set of assets like Ceph admin key and\nCeph client config.\n\n# Usage\n```\nhelm upgrade --install ceph-adapter-rook ./ceph-adapter-rook \\\n  --namespace=openstack\n```\n\nOnce all the jobs are finished you can deploy other Openstack-Helm charts.\n"
  },
  {
    "path": "ceph-adapter-rook/templates/bin/_config-manager.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{- $envAll := . }}\n\nENDPOINTS=$(kubectl --namespace ${CEPH_CLUSTER_NAMESPACE} get configmap rook-ceph-mon-endpoints -o jsonpath='{.data.data}' | sed 's/.=//g')\n\nkubectl get cm ${CEPH_CONF_ETC} -n  ${DEPLOYMENT_NAMESPACE}  -o yaml | \\\n  sed \"s#mon_host.*#mon_host = ${ENDPOINTS}#g\" | \\\n  kubectl apply -f -\n\nkubectl get cm ${CEPH_CONF_ETC} -n  ${DEPLOYMENT_NAMESPACE}  -o yaml\n"
  },
  {
    "path": "ceph-adapter-rook/templates/bin/_key-manager.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{- $envAll := . }}\n\n# We expect rook-ceph-tools pod to be up and running\nROOK_CEPH_TOOLS_POD=$(kubectl -n ${CEPH_CLUSTER_NAMESPACE} get pods --no-headers | awk '/rook-ceph-tools/{print $1}')\nCEPH_ADMIN_KEY=$(kubectl -n ${CEPH_CLUSTER_NAMESPACE} exec ${ROOK_CEPH_TOOLS_POD} -- ceph auth ls | grep -A1 \"client.admin\" | awk '/key:/{print $2}')\n\nceph_activate_namespace() {\n  kube_namespace=$1\n  secret_type=$2\n  secret_name=$3\n  ceph_key=$4\n  {\n  cat <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: \"${secret_name}\"\n  labels:\n{{ tuple $envAll \"ceph\" \"rbd\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\ntype: \"${secret_type}\"\ndata:\n  key: $( echo ${ceph_key} | base64 | tr -d '\\n' )\nEOF\n  } | kubectl apply --namespace ${kube_namespace} -f -\n}\n\nceph_activate_namespace ${DEPLOYMENT_NAMESPACE} \"kubernetes.io/rbd\" ${SECRET_NAME} \"${CEPH_ADMIN_KEY}\"\n"
  },
  {
    "path": "ceph-adapter-rook/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.Release.Name \"bin\" | quote }}\ndata:\n  key-manager.sh: |\n{{ tuple \"bin/_key-manager.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  config-manager.sh: |\n{{ tuple \"bin/_config-manager.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n\n{{- end }}"
  },
  {
    "path": "ceph-adapter-rook/templates/configmap-etc-client.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"ceph.configmap.etc\" }}\n{{- $configMapName := index . 0 }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n\n{{/*\n{{- if empty .Values.conf.ceph.global.mon_host -}}\n{{- $monHost := tuple \"ceph_mon\" \"internal\" \"mon_msgr2\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n{{- $_ := $monHost | set .Values.conf.ceph.global \"mon_host\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ceph.osd.cluster_network -}}\n{{- $_ := .Values.network.cluster | set .Values.conf.ceph.osd \"cluster_network\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ceph.osd.public_network -}}\n{{- $_ := .Values.network.public | set .Values.conf.ceph.osd \"public_network\" -}}\n{{- end -}}\n*/}}\n\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ $configMapName }}\ndata:\n  ceph.conf: |\n{{ include \"helm-toolkit.utils.to_ini\" .Values.conf.ceph | indent 4 }}\n\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.configmap_etc_client }}\n{{- list  .Values.configmap_name . | include \"ceph.configmap.etc\" }}\n{{- end }}\n"
  },
  {
    "path": "ceph-adapter-rook/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "ceph-adapter-rook/templates/job-namespace-client-ceph-config.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_namespace_client_ceph_config }}\n{{- $envAll := . }}\n\n{{- $randStringSuffix := randAlphaNum 5 | lower }}\n\n{{- $serviceAccountName := print $envAll.Release.Name \"-namespace-client-ceph-config\" }}\n{{ tuple $envAll \"namespace_client_ceph_config\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - configmaps\n    verbs:\n      - get\n      - create\n      - update\n      - patch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceAccountName $randStringSuffix }}\n  namespace: {{ .Values.ceph_cluster_namespace }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - configmaps\n    verbs:\n      - get\n      - list\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceAccountName $randStringSuffix }}\n  namespace: {{ .Values.ceph_cluster_namespace }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ printf \"%s-%s\" $serviceAccountName $randStringSuffix }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ $serviceAccountName }}\n  labels:\n{{ tuple $envAll \"ceph\" \"namespace-client-ceph-config\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"namespace-client-ceph-config\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" $serviceAccountName \"containerNames\" (list \"ceph-storage-keys-generator\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"namespace_client_ceph_config\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"namespace-client-ceph-config-init\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: namespace-client-ceph-config\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.namespace_client_ceph_config | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"namespace_client_ceph_config\" \"container\" \"ceph_storage_keys_generator\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: CEPH_CONF_ETC\n              value: {{ .Values.configmap_name }}\n            - name: DEPLOYMENT_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: CEPH_CLUSTER_NAMESPACE\n              value: {{ .Values.ceph_cluster_namespace }}\n          command:\n            - /tmp/config-manager.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: bin\n              mountPath: /tmp/config-manager.sh\n              subPath: config-manager.sh\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: bin\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"bin\" | quote }}\n            defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "ceph-adapter-rook/templates/job-namespace-client-key.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_namespace_client_key }}\n{{- $envAll := . }}\n\n{{- $randStringSuffix := randAlphaNum 5 | lower }}\n\n{{- $serviceAccountName := print $envAll.Release.Name \"-namespace-client-key\" }}\n{{ tuple $envAll \"namespace-client-key\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - create\n      - update\n      - patch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceAccountName $randStringSuffix }}\n  namespace: {{ .Values.ceph_cluster_namespace }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - pods\n    verbs:\n      - get\n      - list\n  - apiGroups:\n      - \"\"\n    resources:\n      - pods/exec\n    verbs:\n      - create\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceAccountName $randStringSuffix }}\n  namespace: {{ .Values.ceph_cluster_namespace }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ printf \"%s-%s\" $serviceAccountName $randStringSuffix }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ $serviceAccountName }}\n  labels:\n{{ tuple $envAll \"ceph\" \"namespace-client-key\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"namespace-client-key\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" $serviceAccountName \"containerNames\" (list \"ceph-storage-keys-generator\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"namespace-client-key\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"namespace-client-key-init\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: namespace-client-key\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.namespace_client_key | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"namespace-client-key\" \"container\" \"namespace-client-key\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: DEPLOYMENT_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: SECRET_NAME\n              value: {{ .Values.secret_name }}\n            - name: CEPH_CLUSTER_NAMESPACE\n              value: {{ .Values.ceph_cluster_namespace }}\n          command:\n            - /tmp/key-manager.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: bin\n              mountPath: /tmp/key-manager.sh\n              subPath: key-manager.sh\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: bin\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"bin\" | quote }}\n            defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "ceph-adapter-rook/values.yaml",
    "content": "---\nimages:\n  pull_policy: IfNotPresent\n  tags:\n    ceph_config_helper: 'quay.io/airshipit/ceph-config-helper:ubuntu_jammy_20.2.1-1-20260407'\n    dep_check: 'quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy'\n    image_repo_sync: 'quay.io/airshipit/docker:27.5.0'\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\npod:\n  security_context:\n    namespace_client_key:\n      pod:\n        runAsUser: 99\n      container:\n        namespace_client_key:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n  dns_policy: \"ClusterFirstWithHostNet\"\n  resources:\n    enabled: false\n    jobs:\n      namespace_client_key:\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n        requests:\n          memory: \"128Mi\"\n          cpu: \"500m\"\n      namespace_client_ceph_config:\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n        requests:\n          memory: \"128Mi\"\n          cpu: \"500m\"\n\n\nceph_cluster_namespace: ceph\n\nsecret_name: pvc-ceph-client-key\nconfigmap_name: ceph-etc\n\nconf:\n  ceph:\n    global:\n      # TODO: Get mon host from rook-ceph-mon-endpoints configmap\n      mon_host: \"will be discovered\"\n\ndependencies:\n  static:\n    namespace_client_key:\n      jobs: null\n    namespace_client_ceph_config:\n      jobs: null\n\nmanifests:\n  configmap_bin: true\n  configmap_etc_client: true\n  job_namespace_client_ceph_config: true\n  job_namespace_client_key: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "ceph-client/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Ceph Client\nname: ceph-client\nversion: 2025.2.0\nhome: https://github.com/ceph/ceph-client\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "ceph-client/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "ceph-client/templates/bin/_helm-tests.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nfunction check_cluster_status() {\n  echo \"#### Start: Checking Ceph cluster status ####\"\n  ceph_status_output=$(ceph -s -f json | jq -r '.health')\n  ceph_health_status=$(echo $ceph_status_output | jq -r '.status')\n\n  if [ \"x${ceph_health_status}\" == \"xHEALTH_OK\" ]; then\n    echo \"Ceph status is HEALTH_OK\"\n  else\n    echo \"Ceph cluster status is not HEALTH_OK, checking PG states\"\n    pg_validation\n  fi\n}\n\nfunction check_recovery_flags() {\n  echo \"### Start: Checking for flags that will prevent recovery\"\n\n  # Ensure there are no flags set that will prevent recovery of degraded PGs\n  if [[ $(ceph osd stat | grep \"norecover\\|nobackfill\\|norebalance\") ]]; then\n    ceph osd stat\n    echo \"Flags are set that prevent recovery of degraded PGs\"\n    exit 1\n  fi\n}\n\nfunction check_osd_count() {\n  echo \"#### Start: Checking OSD count ####\"\n  noup_flag=$(ceph osd stat | awk '/noup/ {print $2}')\n  osd_stat=$(ceph osd stat -f json-pretty)\n  num_osd=$(awk '/\"num_osds\"/{print $2}' <<< \"$osd_stat\" | cut -d, -f1)\n  num_in_osds=$(awk '/\"num_in_osds\"/{print $2}' <<< \"$osd_stat\" | cut -d, -f1)\n  num_up_osds=$(awk '/\"num_up_osds\"/{print $2}' <<< \"$osd_stat\" | cut -d, -f1)\n\n  MIN_OSDS=$((${num_osd}*$REQUIRED_PERCENT_OF_OSDS/100))\n  if [ ${MIN_OSDS} -lt 1 ]; then\n    MIN_OSDS=1\n  fi\n\n  if [ \"${noup_flag}\" ]; then\n    osd_status=$(ceph osd dump -f json | jq -c '.osds[] | .state')\n    count=0\n    for osd in $osd_status; do\n      if [[ \"$osd\" == *\"up\"* || \"$osd\" == *\"new\"* ]]; then\n        ((count=count+1))\n      fi\n    done\n    echo \"Caution: noup flag is set. ${count} OSDs in up/new state. Required number of OSDs: ${MIN_OSDS}.\"\n    if [ $MIN_OSDS -gt $count ]; then\n      exit 1\n    fi\n  else\n    if [ \"${num_osd}\" -eq 0 ]; then\n      echo \"There are no osds in the cluster\"\n      exit 1\n    elif [ \"${num_in_osds}\" -ge \"${MIN_OSDS}\" ] && [ \"${num_up_osds}\" -ge \"${MIN_OSDS}\"  ]; then\n      echo \"Required number of OSDs (${MIN_OSDS}) are UP and IN status\"\n    else\n      echo \"Required number of OSDs (${MIN_OSDS}) are NOT UP and IN status. Cluster shows OSD count=${num_osd}, UP=${num_up_osds}, IN=${num_in_osds}\"\n      exit 1\n    fi\n  fi\n}\n\nfunction check_failure_domain_count_per_pool() {\n  echo \"#### Start: Checking failure domain count per pool ####\"\n  pools=$(ceph osd pool ls)\n  for pool in ${pools}\n  do\n    crush_rule=$(ceph osd pool get ${pool} crush_rule | awk '{print $2}')\n    bucket_type=$(ceph osd crush rule dump ${crush_rule} | grep '\"type\":' | awk -F'\"' 'NR==2 {print $4}')\n    num_failure_domains=$(ceph osd tree | grep ${bucket_type} | wc -l)\n    pool_replica_size=$(ceph osd pool get ${pool} size | awk '{print $2}')\n    if [[ ${num_failure_domains} -ge ${pool_replica_size} ]]; then\n      echo \"--> Info: Pool ${pool} is configured with enough failure domains ${num_failure_domains} to satisfy pool replica size ${pool_replica_size}\"\n    else\n      echo \"--> Error : Pool ${pool} is NOT configured with enough failure domains ${num_failure_domains} to satisfy pool replica size ${pool_replica_size}\"\n      exit 1\n    fi\n  done\n}\n\nfunction mgr_validation() {\n  echo \"#### Start: MGR validation ####\"\n  mgr_dump=$(ceph mgr dump -f json-pretty)\n  echo \"Checking for ${MGR_COUNT} MGRs\"\n\n  mgr_avl=$(echo ${mgr_dump} | jq -r '.[\"available\"]')\n\n  if [ \"x${mgr_avl}\" == \"xtrue\" ]; then\n    mgr_active=$(echo ${mgr_dump} | jq -r '.[\"active_name\"]')\n    echo \"Out of ${MGR_COUNT}, 1 MGR is active\"\n\n    # Now lets check for standby managers\n    mgr_stdby_count=$(echo ${mgr_dump} | jq -r '.[\"standbys\"]' | jq length)\n\n    #Total MGR Count - 1 Active = Expected MGRs\n    expected_standbys=$(( MGR_COUNT -1 ))\n\n    if [ $mgr_stdby_count -eq $expected_standbys ]\n    then\n      echo \"Cluster has 1 Active MGR, $mgr_stdby_count Standbys MGR\"\n    else\n      echo \"Warning. Cluster Standbys MGR: Expected count= $expected_standbys Available=$mgr_stdby_count\"\n      echo \"If this is not expected behavior, please investigate and take some additional actions.\"\n    fi\n\n  else\n    echo \"No Active Manager found, Expected 1 MGR to be active out of ${MGR_COUNT}\"\n    retcode=1\n  fi\n\n  if [ \"x${retcode}\" == \"x1\" ]\n  then\n    exit 1\n  fi\n}\n\nfunction pool_validation() {\n\n  echo \"#### Start: Checking Ceph pools ####\"\n\n  echo \"From env variables, RBD pool replication count is: ${RBD}\"\n\n  # Assuming all pools have same replication count as RBD\n  # If RBD replication count is greater then 1, POOLMINSIZE should be 1 less then replication count\n  # If RBD replication count is not greate then 1, then POOLMINSIZE should be 1\n\n  if [ ${RBD} -gt 1 ]; then\n    EXPECTED_POOLMINSIZE=$[${RBD}-1]\n  else\n    EXPECTED_POOLMINSIZE=1\n  fi\n\n  echo \"EXPECTED_POOLMINSIZE: ${EXPECTED_POOLMINSIZE}\"\n\n  expectedCrushRuleId=\"\"\n  nrules=$(echo ${OSD_CRUSH_RULE_DUMP} | jq length)\n  c=$[nrules-1]\n  for n in $(seq 0 ${c})\n  do\n    osd_crush_rule_obj=$(echo ${OSD_CRUSH_RULE_DUMP} | jq -r .[${n}])\n\n    name=$(echo ${osd_crush_rule_obj} | jq -r .rule_name)\n    echo \"Expected Crushrule: ${EXPECTED_CRUSHRULE}, Pool Crushmap: ${name}\"\n\n    if [ \"x${EXPECTED_CRUSHRULE}\" == \"x${name}\" ]; then\n      expectedCrushRuleId=$(echo ${osd_crush_rule_obj} | jq .rule_id)\n      echo \"Checking against rule: id: ${expectedCrushRuleId}, name:${name}\"\n    else\n      echo \"Didn't match\"\n    fi\n  done\n  echo \"Checking cluster for size:${RBD}, min_size:${EXPECTED_POOLMINSIZE}, crush_rule:${EXPECTED_CRUSHRULE}, crush_rule_id:${expectedCrushRuleId}\"\n\n  npools=$(echo ${OSD_POOLS_DETAILS} | jq length)\n  i=$[npools - 1]\n  for n in $(seq 0 ${i})\n  do\n    pool_obj=$(echo ${OSD_POOLS_DETAILS} | jq -r \".[${n}]\")\n\n    size=$(echo ${pool_obj} | jq -r .size)\n    min_size=$(echo ${pool_obj} | jq -r .min_size)\n    pg_num=$(echo ${pool_obj} | jq -r .pg_num)\n    pg_placement_num=$(echo ${pool_obj} | jq -r .pg_placement_num)\n    crush_rule=$(echo ${pool_obj} | jq -r .crush_rule)\n    name=$(echo ${pool_obj} | jq -r .pool_name)\n    pg_autoscale_mode=$(echo ${pool_obj} | jq -r .pg_autoscale_mode)\n    if [[ \"${ENABLE_AUTOSCALER}\" == \"true\" ]]; then\n      if [[ \"${pg_autoscale_mode}\" != \"on\" ]]; then\n        echo \"pg autoscaler not enabled on ${name} pool\"\n        exit 1\n      fi\n    fi\n    if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then\n      if [ \"x${size}\" != \"x${RBD}\" ] || [ \"x${min_size}\" != \"x${EXPECTED_POOLMINSIZE}\" ] \\\n        || [ \"x${crush_rule}\" != \"x${expectedCrushRuleId}\" ]; then\n        echo \"Pool ${name} has incorrect parameters!!! Size=${size}, Min_Size=${min_size}, Rule=${crush_rule}, PG_Autoscale_Mode=${pg_autoscale_mode}\"\n        exit 1\n      else\n        echo \"Pool ${name} seems configured properly. Size=${size}, Min_Size=${min_size}, Rule=${crush_rule}, PG_Autoscale_Mode=${pg_autoscale_mode}\"\n      fi\n    else\n      if [ \"x${size}\" != \"x${RBD}\" ] || [ \"x${min_size}\" != \"x${EXPECTED_POOLMINSIZE}\" ] \\\n      || [ \"x${pg_num}\" != \"x${pg_placement_num}\" ] || [ \"x${crush_rule}\" != \"x${expectedCrushRuleId}\" ]; then\n        echo \"Pool ${name} has incorrect parameters!!! Size=${size}, Min_Size=${min_size}, PG=${pg_num}, PGP=${pg_placement_num}, Rule=${crush_rule}\"\n        exit 1\n      else\n        echo \"Pool ${name} seems configured properly. Size=${size}, Min_Size=${min_size}, PG=${pg_num}, PGP=${pg_placement_num}, Rule=${crush_rule}\"\n      fi\n    fi\n  done\n}\n\nfunction pool_failuredomain_validation() {\n  echo \"#### Start: Checking Pools are configured with specific failure domain ####\"\n\n  expectedCrushRuleId=\"\"\n  nrules=$(echo ${OSD_CRUSH_RULE_DUMP} | jq length)\n  c=$[nrules-1]\n  for n in $(seq 0 ${c})\n  do\n    osd_crush_rule_obj=$(echo ${OSD_CRUSH_RULE_DUMP} | jq -r .[${n}])\n\n    name=$(echo ${osd_crush_rule_obj} | jq -r .rule_name)\n\n    if [ \"x${EXPECTED_CRUSHRULE}\" == \"x${name}\" ]; then\n      expectedCrushRuleId=$(echo ${osd_crush_rule_obj} | jq .rule_id)\n      echo \"Checking against rule: id: ${expectedCrushRuleId}, name:${name}\"\n    fi\n  done\n\n  echo \"Checking OSD pools are configured with Crush rule name:${EXPECTED_CRUSHRULE}, id:${expectedCrushRuleId}\"\n\n  npools=$(echo ${OSD_POOLS_DETAILS} | jq length)\n  i=$[npools-1]\n  for p in $(seq 0 ${i})\n  do\n    pool_obj=$(echo ${OSD_POOLS_DETAILS} | jq -r \".[${p}]\")\n\n    pool_crush_rule_id=$(echo $pool_obj | jq -r .crush_rule)\n    pool_name=$(echo $pool_obj | jq -r .pool_name)\n\n    if [ \"x${pool_crush_rule_id}\" == \"x${expectedCrushRuleId}\" ]; then\n      echo \"--> Info: Pool ${pool_name} is configured with the correct rule ${pool_crush_rule_id}\"\n    else\n      echo \"--> Error : Pool ${pool_name} is NOT configured with the correct rule ${pool_crush_rule_id}\"\n      exit 1\n    fi\n  done\n}\n\nfunction check_transient_pgs_file() {\n  current_time=$1\n  pg_failed_list=()\n\n  # Remove the lines NOT having the word \"current\" as these are the old\n  # PGs that are no longer in transition.\n  sed -i '/current/!d' ${transient_pgs_file}\n\n  # For all remaining lines (PGs currently inactive), check for PGs which\n  # are older than the limit.\n  IFS=$'\\n' read -d '' -r -a lines < ${transient_pgs_file} || true\n  for pg_data in \"${lines[@]}\"; do\n    pg=$(echo ${pg_data} | awk '{print $1}')\n    pg_ts=$(echo ${pg_data} | awk '{print $2}')\n    if [[ $((${current_time} - ${pg_ts})) -gt ${pg_inactive_timeout} ]]; then\n      pg_failed_list+=(\"${pg}\")\n    fi\n  done\n\n  # Remove the current designation for all PGs, as we no longer need it\n  # for this check.\n  sed -i 's/ current//g' ${transient_pgs_file}\n\n  cat ${transient_pgs_file}\n  if [[ ${#pg_failed_list[@]} -gt 0 ]]; then\n    echo \"The following PGs have been in a transient state for longer than ${pg_inactive_timeout} seconds:\"\n    echo ${pg_failed_list[*]}\n    exit 1\n  fi\n}\n\nfunction update_transient_pgs_file() {\n  pg=$1\n  current_ts=$2\n\n  pg_data=$(grep \"${pg} \" ${transient_pgs_file} || true)\n  if [[ \"${pg_data}\" == \"\" ]]; then\n    echo \"${pg} ${current_ts} current\" >> ${transient_pgs_file}\n  else\n    # Add the word \"current\" to the end of the line which has this PG\n    sed -i '/^'\"${pg} \"'/s/$/ current/' ${transient_pgs_file}\n  fi\n}\n\nfunction check_transient_pgs() {\n  local -n pg_array=$1\n\n  # Use a temporary transient PGs file to track the amount of time PGs\n  # are spending in a transitional state.\n  now=$(date +%s)\n  for pg in \"${pg_array[@]}\"; do\n    update_transient_pgs_file ${pg} ${now}\n  done\n  check_transient_pgs_file ${now}\n}\n\nfunction check_pgs() {\n  pgs_transitioning=false\n\n  ceph --cluster ${CLUSTER} pg dump_stuck inactive -f json-pretty > ${stuck_pgs_file}\n\n  # Check if there are any stuck PGs, which could indicate a serious problem\n  # if it does not resolve itself soon.\n  stuck_pgs=(`cat ${stuck_pgs_file} | awk -F \"\\\"\" '/pgid/{print $4}'`)\n  if [[ ${#stuck_pgs[*]} -gt 0 ]]; then\n    # We have at least one stuck pg\n    echo \"Some PGs are stuck: \"\n    echo ${stuck_pgs[*]}\n    # Not a critical error - yet\n    pgs_transitioning=true\n\n    # Check to see if any transitioning PG has been stuck for too long\n    check_transient_pgs stuck_pgs\n  else\n    # Examine the PGs that have non-active states. Consider those PGs that\n    # are in a \"premerge\" state to be similar to active. \"premerge\" PGs may\n    # stay in that state for several minutes, and this is considered ok.\n    ceph --cluster ${CLUSTER} pg ls -f json-pretty | grep '\"pgid\":\\|\"state\":' | grep -v -E \"active|premerge\" | grep -B1 '\"state\":' > ${inactive_pgs_file} || true\n\n    # If the inactive pgs file is non-empty, there are some inactive pgs in the cluster.\n    inactive_pgs=(`cat ${inactive_pgs_file} | awk -F \"\\\"\" '/pgid/{print $4}'`)\n    echo \"This is the list of inactive pgs in the cluster: \"\n    echo ${inactive_pgs[*]}\n\n    echo \"Checking to see if the cluster is rebalancing or recovering some PG's...\"\n\n    # Check for PGs that are down. These are critical errors.\n    down_pgs=(`cat ${inactive_pgs_file} | grep -B1 'down' | awk -F \"\\\"\" '/pgid/{print $4}'`)\n    if [[ ${#down_pgs[*]} -gt 0 ]]; then\n      # Some PGs could be down. This is really bad situation and test must fail.\n      echo \"Some PGs are down: \"\n      echo ${down_pgs[*]}\n      echo \"This is critical error, exiting. \"\n      exit 1\n    fi\n\n    # Check for PGs that are in some transient state due to rebalancing,\n    # peering or backfilling. If we see other states which are not in the\n    # following list of states, then we likely have a problem and need to\n    # exit.\n    transient_states='peer|recover|activating|creating|unknown'\n    non_transient_pgs=(`cat ${inactive_pgs_file} | grep '\"state\":' | grep -v -E \"${transient_states}\" || true`)\n    if [[ ${#non_transient_pgs[*]} -gt 0 ]]; then\n      # Some PGs could be inactive and not peering. Better we fail.\n      echo \"We don't have down/stuck PGs, but we have some inactive pgs that\"\n      echo \"are not in the list of allowed transient states: \"\n      pg_list=(`sed -n '/peer\\|recover\\|activating\\|creating\\|unknown/{s/.*//;x;d;};x;p;${x;p;}' ${inactive_pgs_file} | sed '/^$/d' | awk -F \"\\\"\" '/pgid/{print $4}'`)\n      echo ${pg_list[*]}\n      echo ${non_transient_pgs[*]}\n      # Critical error. Fail/exit the script\n      exit 1\n    fi\n\n    # Check and note which PGs are in a transient state. This script\n    # will allow these transient states for a period of time\n    # (time_between_retries * max_retries seconds).\n    transient_pgs=(`cat ${inactive_pgs_file} | grep -B1 -E \"${transient_states}\" | awk -F \"\\\"\" '/pgid/{print $4}'`)\n    if [[ ${#transient_pgs[*]} -gt 0 ]]; then\n      # Some PGs are not in an active state but peering and/or cluster is recovering\n      echo \"Some PGs are peering and/or cluster is recovering: \"\n      echo ${transient_pgs[*]}\n      echo \"This is normal but will wait a while to verify the PGs are not stuck in a transient state.\"\n      # not critical, just wait\n      pgs_transitioning=true\n\n      # Check to see if any transitioning PG has been stuck for too long\n      check_transient_pgs transient_pgs\n    fi\n  fi\n}\n\nfunction pg_validation() {\n  retries=0\n  time_between_retries=3\n  max_retries=60\n  pg_inactive_timeout=30\n  pgs_transitioning=false\n  stuck_pgs_file=$(mktemp -p /tmp)\n  inactive_pgs_file=$(mktemp -p /tmp)\n  transient_pgs_file=$(mktemp -p /tmp)\n\n  # Check this over a period of retries. Fail/stop if any critical errors found.\n  while check_pgs && [[ \"${pgs_transitioning}\" == \"true\" ]] && [[ retries -lt ${max_retries} ]]; do\n    echo \"Sleep for a bit waiting on the pg(s) to become active/unstuck...\"\n    sleep ${time_between_retries}\n    ((retries=retries+1))\n  done\n\n  # Check if transitioning PGs have gone active after retries have expired\n  if [[ retries -ge ${max_retries} ]]; then\n    ((timeout_sec=${time_between_retries}*${max_retries}))\n    echo \"Some PGs have not become active after ${timeout_sec} seconds. Exiting...\"\n    # This is ok, as the autoscaler might still be adjusting the PGs.\n  fi\n}\n\nfunction check_ceph_osd_crush_weight(){\n  OSDS_WITH_ZERO_WEIGHT=(`ceph --cluster ${CLUSTER} osd df -f json-pretty | awk -F\"[, ]*\" '/\"crush_weight\":/{if ($3 == 0) print $3}'`)\n  if [[ ${#OSDS_WITH_ZERO_WEIGHT[*]} -eq 0 ]]; then\n    echo \"All OSDs from namespace have crush weight!\"\n  else\n    echo \"OSDs from namespace have zero crush weight\"\n    exit 1\n  fi\n}\n\ncheck_osd_count\nmgr_validation\n\nOSD_POOLS_DETAILS=$(ceph osd pool ls detail -f json-pretty)\nOSD_CRUSH_RULE_DUMP=$(ceph osd crush rule dump -f json-pretty)\nPG_STAT=$(ceph pg stat -f json-pretty)\n\nceph -s\npg_validation\npool_validation\npool_failuredomain_validation\ncheck_failure_domain_count_per_pool\ncheck_cluster_status\ncheck_recovery_flags\ncheck_ceph_osd_crush_weight\n"
  },
  {
    "path": "ceph-client/templates/bin/_init-dirs.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport LC_ALL=C\n: \"${HOSTNAME:=$(uname -n)}\"\n: \"${MGR_NAME:=${HOSTNAME}}\"\n: \"${MDS_NAME:=mds-${HOSTNAME}}\"\n: \"${MDS_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring}\"\n: \"${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}\"\n\nfor keyring in ${OSD_BOOTSTRAP_KEYRING} ${MDS_BOOTSTRAP_KEYRING}; do\n  mkdir -p \"$(dirname \"$keyring\")\"\ndone\n\n# Let's create the ceph directories\nfor DIRECTORY in mds tmp mgr crash; do\n  mkdir -p \"/var/lib/ceph/${DIRECTORY}\"\ndone\n\n# Create socket directory\nmkdir -p /run/ceph\n\n# Create the MDS directory\nmkdir -p \"/var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}\"\n\n# Create the MGR directory\nmkdir -p \"/var/lib/ceph/mgr/${CLUSTER}-${MGR_NAME}\"\n\n# Adjust the owner of all those directories\nchown -R ceph. /run/ceph/ /var/lib/ceph/*\n"
  },
  {
    "path": "ceph-client/templates/bin/mds/_start.sh.tpl",
    "content": "#!/bin/bash\nset -ex\nexport LC_ALL=C\n: \"${HOSTNAME:=$(uname -n)}\"\n: \"${CEPHFS_CREATE:=0}\"\n: \"${CEPHFS_NAME:=cephfs}\"\n: \"${CEPHFS_DATA_POOL:=${CEPHFS_NAME}_data}\"\n: \"${CEPHFS_DATA_POOL_PG:=8}\"\n: \"${CEPHFS_METADATA_POOL:=${CEPHFS_NAME}_metadata}\"\n: \"${CEPHFS_METADATA_POOL_PG:=8}\"\n: \"${MDS_NAME:=mds-${HOSTNAME}}\"\n: \"${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}\"\n: \"${MDS_KEYRING:=/var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}/keyring}\"\n: \"${MDS_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring}\"\n: \"${CEPH_CONF:=\"/etc/ceph/${CLUSTER}.conf\"}\"\n\n{{ include \"helm-toolkit.snippets.mon_host_from_k8s_ep\" . }}\n\nif [[ ! -e ${CEPH_CONF}.template ]]; then\n  echo \"ERROR- ${CEPH_CONF}.template must exist; get it from your existing mon\"\n  exit 1\nelse\n  ENDPOINT=$(mon_host_from_k8s_ep \"${NAMESPACE}\" ceph-mon-discovery)\n  if [[ \"${ENDPOINT}\" == \"\" ]]; then\n    /bin/sh -c -e \"cat ${CEPH_CONF}.template | tee ${CEPH_CONF}\" || true\n  else\n    /bin/sh -c -e \"cat ${CEPH_CONF}.template | sed 's#mon_host.*#mon_host = ${ENDPOINT}#g' | tee ${CEPH_CONF}\" || true\n  fi\nfi\n\n# Check to see if we are a new MDS\nif [ ! -e \"${MDS_KEYRING}\" ]; then\n\n  if [ -e \"${ADMIN_KEYRING}\" ]; then\n     KEYRING_OPT=(--name client.admin --keyring \"${ADMIN_KEYRING}\")\n  elif [ -e \"${MDS_BOOTSTRAP_KEYRING}\" ]; then\n     KEYRING_OPT=(--name client.bootstrap-mds --keyring \"${MDS_BOOTSTRAP_KEYRING}\")\n  else\n    echo \"ERROR- Failed to bootstrap MDS: could not find admin or bootstrap-mds keyring.  You can extract it from your current monitor by running 'ceph auth get client.bootstrap-mds -o ${MDS_BOOTSTRAP_KEYRING}\"\n    exit 1\n  fi\n\n  timeout 10 ceph --cluster \"${CLUSTER}\" \"${KEYRING_OPT[@]}\" health || exit 1\n\n  # Generate the MDS key\n  ceph --cluster \"${CLUSTER}\" \"${KEYRING_OPT[@]}\" auth get-or-create \"mds.${MDS_NAME}\" osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o \"${MDS_KEYRING}\"\n  chown ceph. \"${MDS_KEYRING}\"\n  chmod 600 \"${MDS_KEYRING}\"\n\nfi\n\n# NOTE (leseb): having the admin keyring is really a security issue\n# If we need to bootstrap a MDS we should probably create the following on the monitors\n# I understand that this handy to do this here\n# but having the admin key inside every container is a concern\n\n# Create the Ceph filesystem, if necessary\nif [ $CEPHFS_CREATE -eq 1 ]; then\n\n  if [[ ! -e ${ADMIN_KEYRING} ]]; then\n      echo \"ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon\"\n      exit 1\n  fi\n\n  if [[ \"$(ceph --cluster \"${CLUSTER}\" fs ls | grep -c name:.${CEPHFS_NAME},)\" -eq 0 ]]; then\n     # Make sure the specified data pool exists\n     if ! ceph --cluster \"${CLUSTER}\" osd pool stats ${CEPHFS_DATA_POOL} > /dev/null 2>&1; then\n        ceph --cluster \"${CLUSTER}\" osd pool create ${CEPHFS_DATA_POOL} ${CEPHFS_DATA_POOL_PG}\n     fi\n\n     # Make sure the specified metadata pool exists\n     if ! ceph --cluster \"${CLUSTER}\" osd pool stats ${CEPHFS_METADATA_POOL} > /dev/null 2>&1; then\n        ceph --cluster \"${CLUSTER}\" osd pool create ${CEPHFS_METADATA_POOL} ${CEPHFS_METADATA_POOL_PG}\n     fi\n\n     ceph --cluster \"${CLUSTER}\" fs new ${CEPHFS_NAME} ${CEPHFS_METADATA_POOL} ${CEPHFS_DATA_POOL}\n  fi\nfi\n\n# NOTE: prefixing this with exec causes it to die (commit suicide)\n/usr/bin/ceph-mds \\\n  --cluster \"${CLUSTER}\" \\\n  --setuser \"ceph\" \\\n  --setgroup \"ceph\" \\\n  -d \\\n  -i \"${MDS_NAME}\"\n"
  },
  {
    "path": "ceph-client/templates/bin/pool/_calc.py.tpl",
    "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n#NOTE(portdirect): this is a simple approximation of https://ceph.com/pgcalc/\n\nimport math\nimport sys\n\nreplication = int(sys.argv[1])\nnumber_of_osds = int(sys.argv[2])\npercentage_data = float(sys.argv[3])\ntarget_pgs_per_osd = int(sys.argv[4])\n\nraw_pg_num_opt = target_pgs_per_osd * number_of_osds \\\n    * (math.ceil(percentage_data) / 100.0) / replication\n\nraw_pg_num_min = number_of_osds / replication\n\nif raw_pg_num_min >= raw_pg_num_opt:\n    raw_pg_num = raw_pg_num_min\nelse:\n    raw_pg_num = raw_pg_num_opt\n\nmax_pg_num = int(math.pow(2, math.ceil(math.log(raw_pg_num, 2))))\nmin_pg_num = int(math.pow(2, math.floor(math.log(raw_pg_num, 2))))\n\nif min_pg_num >= (raw_pg_num * 0.75):\n    print(min_pg_num)\nelse:\n    print(max_pg_num)\n"
  },
  {
    "path": "ceph-client/templates/bin/pool/_init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport LC_ALL=C\n\n: \"${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}\"\n: \"${CEPH_CONF:=\"/etc/ceph/${CLUSTER}.conf\"}\"\n\n{{ include \"helm-toolkit.snippets.mon_host_from_k8s_ep\" . }}\n\nif [[ ! -e ${CEPH_CONF}.template ]]; then\n  echo \"ERROR- ${CEPH_CONF}.template must exist; get it from your existing mon\"\n  exit 1\nelse\n  ENDPOINT=$(mon_host_from_k8s_ep \"${NAMESPACE}\" ceph-mon-discovery)\n  if [[ \"${ENDPOINT}\" == \"\" ]]; then\n    /bin/sh -c -e \"cat ${CEPH_CONF}.template | tee ${CEPH_CONF}\" || true\n  else\n    /bin/sh -c -e \"cat ${CEPH_CONF}.template | sed 's#mon_host.*#mon_host = ${ENDPOINT}#g' | tee ${CEPH_CONF}\" || true\n  fi\nfi\n\nif [[ ! -e ${ADMIN_KEYRING} ]]; then\n   echo \"ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon\"\n   exit 1\nfi\n\nfunction wait_for_pid() {\n  tail --pid=$1 -f /dev/null\n}\n\nfunction wait_for_pgs () {\n  echo \"#### Start: Checking pgs ####\"\n\n  pgs_ready=0\n  query='map({state: .state}) | group_by(.state) | map({state: .[0].state, count: length}) | .[] | select(.state | contains(\"active\") or contains(\"premerge\") | not)'\n\n  if [[ $(ceph mon versions | awk '/version/{print $3}' | sort -n | head -n 1 | cut -d. -f1) -ge 14 ]]; then\n    query=\".pg_stats | ${query}\"\n  fi\n\n  # Loop until all pgs are active\n  while [[ $pgs_ready -lt 3 ]]; do\n    pgs_state=$(ceph --cluster ${CLUSTER} pg ls -f json | jq -c \"${query}\")\n    if [[ $(jq -c '. | select(.state | contains(\"peer\") or contains(\"activating\") or contains(\"recover\") or contains(\"unknown\") or contains(\"creating\") | not)' <<< \"${pgs_state}\") ]]; then\n      # If inactive PGs aren't in the allowed set of states above, fail\n      echo \"Failure, found inactive PGs that aren't in the allowed set of states\"\n      exit 1\n    fi\n    if [[ \"${pgs_state}\" ]]; then\n      pgs_ready=0\n    else\n      (( pgs_ready+=1 ))\n    fi\n    sleep 3\n  done\n}\n\nfunction check_recovery_flags () {\n  echo \"### Start: Checking for flags that will prevent recovery\"\n\n  # Ensure there are no flags set that will prevent recovery of degraded PGs\n  if [[ $(ceph osd stat | grep \"norecover\\|nobackfill\\|norebalance\") ]]; then\n    ceph osd stat\n    echo \"Flags are set that prevent recovery of degraded PGs\"\n    exit 1\n  fi\n}\n\nfunction check_osd_count() {\n  echo \"#### Start: Checking OSD count ####\"\n  noup_flag=$(ceph osd stat | awk '/noup/ {print $2}')\n  osd_stat=$(ceph osd stat -f json-pretty)\n  num_osd=$(awk '/\"num_osds\"/{print $2}' <<< \"$osd_stat\" | cut -d, -f1)\n  num_in_osds=$(awk '/\"num_in_osds\"/{print $2}' <<< \"$osd_stat\" | cut -d, -f1)\n  num_up_osds=$(awk '/\"num_up_osds\"/{print $2}' <<< \"$osd_stat\" | cut -d, -f1)\n\n  EXPECTED_OSDS={{.Values.conf.pool.target.osd}}\n  EXPECTED_FINAL_OSDS={{.Values.conf.pool.target.final_osd}}\n  REQUIRED_PERCENT_OF_OSDS={{.Values.conf.pool.target.required_percent_of_osds}}\n\n  if [ ${num_up_osds} -gt ${EXPECTED_FINAL_OSDS} ]; then\n    echo \"More running OSDs (${num_up_osds}) than expected (${EXPECTED_FINAL_OSDS}). Please correct the expected value (.Values.conf.pool.target.final_osd).\"\n    exit 1\n  fi\n\n  MIN_OSDS=$(($EXPECTED_OSDS*$REQUIRED_PERCENT_OF_OSDS/100))\n  if [ ${MIN_OSDS} -lt 1 ]; then\n    MIN_OSDS=1\n  fi\n\n  if [ \"${noup_flag}\" ]; then\n    osd_status=$(ceph osd dump -f json | jq -c '.osds[] | .state')\n    count=0\n    for osd in $osd_status; do\n      if [[ \"$osd\" == *\"up\"* || \"$osd\" == *\"new\"* ]]; then\n        ((count=count+1))\n      fi\n    done\n    echo \"Caution: noup flag is set. ${count} OSDs in up/new state. Required number of OSDs: ${MIN_OSDS}.\"\n    if [ $MIN_OSDS -gt $count ]; then\n      exit 1\n    fi\n  else\n    if [ \"${num_osd}\" -eq 0 ]; then\n      echo \"There are no osds in the cluster\"\n      exit 1\n    elif [ \"${num_in_osds}\" -ge \"${MIN_OSDS}\" ] && [ \"${num_up_osds}\" -ge \"${MIN_OSDS}\"  ]; then\n      echo \"Required number of OSDs (${MIN_OSDS}) are UP and IN status\"\n    else\n      echo \"Required number of OSDs (${MIN_OSDS}) are NOT UP and IN status. Cluster shows OSD count=${num_osd}, UP=${num_up_osds}, IN=${num_in_osds}\"\n      exit 1\n    fi\n  fi\n}\n\nfunction create_crushrule () {\n  CRUSH_NAME=$1\n  CRUSH_RULE=$2\n  CRUSH_FAILURE_DOMAIN=$3\n  CRUSH_DEVICE_CLASS=$4\n  if ! ceph --cluster \"${CLUSTER}\" osd crush rule ls | grep -q \"^\\$CRUSH_NAME$\"; then\n    ceph --cluster \"${CLUSTER}\" osd crush rule $CRUSH_RULE $CRUSH_NAME default $CRUSH_FAILURE_DOMAIN $CRUSH_DEVICE_CLASS || true\n  fi\n}\n\n# Set mons to use the msgr2 protocol on nautilus\nif [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then\n  ceph --cluster \"${CLUSTER}\" mon enable-msgr2\nfi\n\ncheck_osd_count\n{{- range $crush_rule := .Values.conf.pool.crush_rules -}}\n{{- with $crush_rule }}\ncreate_crushrule {{ .name }} {{ .crush_rule }} {{ .failure_domain }} {{ .device_class }}\n{{- end }}\n{{- end }}\n\nfunction reweight_osds () {\n  OSD_DF_OUTPUT=$(ceph --cluster \"${CLUSTER}\" osd df --format json-pretty)\n  for OSD_ID in $(ceph --cluster \"${CLUSTER}\" osd ls); do\n    OSD_EXPECTED_WEIGHT=$(echo \"${OSD_DF_OUTPUT}\" | grep -A7 \"\\bosd.${OSD_ID}\\b\" | awk '/\"kb\"/{ gsub(\",\",\"\"); d= $2/1073741824 ; r = sprintf(\"%.2f\", d); print r }');\n    OSD_WEIGHT=$(echo \"${OSD_DF_OUTPUT}\" | grep -A3 \"\\bosd.${OSD_ID}\\b\" | awk '/crush_weight/{print $2}' | cut -d',' -f1)\n    if [[ \"${OSD_EXPECTED_WEIGHT}\" != \"0.00\" ]] && [[ \"${OSD_WEIGHT}\" != \"${OSD_EXPECTED_WEIGHT}\" ]]; then\n      ceph --cluster \"${CLUSTER}\" osd crush reweight osd.${OSD_ID} ${OSD_EXPECTED_WEIGHT};\n    fi\n  done\n}\n\nfunction enable_autoscaling () {\n  CEPH_MAJOR_VERSION=$(ceph mgr versions | awk '/version/{print $3}' | cut -d. -f1)\n\n  if [[ ${CEPH_MAJOR_VERSION} -ge 16 ]]; then\n    # Pacific introduced the noautoscale flag to make this simpler\n    ceph osd pool unset noautoscale\n  else\n    if [[ ${CEPH_MAJOR_VERSION} -eq 14 ]]; then\n      ceph mgr module enable pg_autoscaler # only required for nautilus\n    fi\n    ceph config set global osd_pool_default_pg_autoscale_mode on\n  fi\n}\n\nfunction disable_autoscaling () {\n  CEPH_MAJOR_VERSION=$(ceph mgr versions | awk '/version/{print $3}' | cut -d. -f1)\n\n  if [[ ${CEPH_MAJOR_VERSION} -ge 16 ]]; then\n    # Pacific introduced the noautoscale flag to make this simpler\n    ceph osd pool set noautoscale\n  else\n    if [[ ${CEPH_MAJOR_VERSION} -eq 14 ]]; then\n      ceph mgr module disable pg_autoscaler # only required for nautilus\n    fi\n    ceph config set global osd_pool_default_pg_autoscale_mode off\n  fi\n}\n\nfunction set_cluster_flags () {\n  if [[ -n \"${CLUSTER_SET_FLAGS}\" ]]; then\n    for flag in ${CLUSTER_SET_FLAGS}; do\n      ceph osd set ${flag}\n    done\n  fi\n}\n\nfunction unset_cluster_flags () {\n  if [[ -n \"${CLUSTER_UNSET_FLAGS}\" ]]; then\n    for flag in ${CLUSTER_UNSET_FLAGS}; do\n      ceph osd unset ${flag}\n    done\n  fi\n}\n\nfunction run_cluster_commands () {\n  {{- range .Values.conf.features.cluster_commands }}\n    ceph --cluster \"${CLUSTER}\" {{ . }}\n  {{- end }}\n}\n\n# Helper function to set pool properties only if the target value differs from\n# the current value to optimize performance\nfunction set_pool_property() {\n  POOL_NAME=$1\n  PROPERTY_NAME=$2\n  CURRENT_PROPERTY_VALUE=$3\n  TARGET_PROPERTY_VALUE=$4\n  REALLY_MEAN_IT=\"\"\n\n  if [[ \"${PROPERTY_NAME}\" == \"size\" ]]; then\n    REALLY_MEAN_IT=\"--yes-i-really-mean-it\"\n  fi\n\n  if [[ \"${CURRENT_PROPERTY_VALUE}\" != \"${TARGET_PROPERTY_VALUE}\" ]]; then\n    ceph --cluster \"${CLUSTER}\" osd pool set \"${POOL_NAME}\" \"${PROPERTY_NAME}\" \"${TARGET_PROPERTY_VALUE}\" ${REALLY_MEAN_IT}\n  fi\n\n  echo \"${TARGET_PROPERTY_VALUE}\"\n}\n\nfunction create_pool () {\n  POOL_APPLICATION=$1\n  POOL_NAME=$2\n  POOL_REPLICATION=$3\n  POOL_PLACEMENT_GROUPS=$4\n  POOL_CRUSH_RULE=$5\n  POOL_PROTECTION=$6\n  PG_NUM_MIN=$7\n  if ! ceph --cluster \"${CLUSTER}\" osd pool stats \"${POOL_NAME}\" > /dev/null 2>&1; then\n    if [[ ${POOL_PLACEMENT_GROUPS} -gt 0 ]]; then\n      ceph --cluster \"${CLUSTER}\" osd pool create \"${POOL_NAME}\" ${POOL_PLACEMENT_GROUPS}\n    else\n      ceph --cluster \"${CLUSTER}\" osd pool create \"${POOL_NAME}\" ${PG_NUM_MIN} --pg-num-min ${PG_NUM_MIN}\n    fi\n    while [ $(ceph --cluster \"${CLUSTER}\" -s | grep creating -c) -gt 0 ]; do echo -n .;sleep 1; done\n    ceph --cluster \"${CLUSTER}\" osd pool application enable \"${POOL_NAME}\" \"${POOL_APPLICATION}\"\n  fi\n\n  # 'tr' and 'awk' are needed here to strip off text that is echoed before the JSON string.\n  # In some cases, errors/warnings are written to stdout and the JSON doesn't parse correctly.\n  pool_values=$(ceph --cluster \"${CLUSTER}\" osd pool get \"${POOL_NAME}\" all -f json | tr -d '\\n' | awk -F{ '{print \"{\" $2}')\n\n  if [[ $(ceph mgr versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then\n    if [[ \"${ENABLE_AUTOSCALER}\" == \"true\" ]]; then\n      pg_num=$(jq -r '.pg_num' <<< \"${pool_values}\")\n      pgp_num=$(jq -r '.pgp_num' <<< \"${pool_values}\")\n      pg_num_min=$(jq -r '.pg_num_min' <<< \"${pool_values}\")\n      pg_autoscale_mode=$(jq -r '.pg_autoscale_mode' <<< \"${pool_values}\")\n      # set pg_num_min to PG_NUM_MIN before enabling autoscaler\n      if [[ ${pg_num} -lt ${PG_NUM_MIN} ]]; then\n        pg_autoscale_mode=$(set_pool_property \"${POOL_NAME}\" pg_autoscale_mode \"${pg_autoscale_mode}\" \"off\")\n        pg_num=$(set_pool_property \"${POOL_NAME}\" pg_num \"${pg_num}\" \"${PG_NUM_MIN}\")\n        pgp_num=$(set_pool_property \"${POOL_NAME}\" pgp_num \"${pgp_num}\" \"${PG_NUM_MIN}\")\n      fi\n      pg_num_min=$(set_pool_property \"${POOL_NAME}\" pg_num_min \"${pg_num_min}\" \"${PG_NUM_MIN}\")\n      pg_autoscale_mode=$(set_pool_property \"${POOL_NAME}\" pg_autoscale_mode \"${pg_autoscale_mode}\" \"on\")\n    else\n      pg_autoscale_mode=$(set_pool_property \"${POOL_NAME}\" pg_autoscale_mode \"${pg_autoscale_mode}\" \"off\")\n    fi\n  fi\n#\n# Make sure pool is not protected after creation AND expansion so we can manipulate its settings.\n# Final protection settings are applied once parameters (size, pg) have been adjusted.\n#\n  nosizechange=$(jq -r '.nosizechange' <<< \"${pool_values}\")\n  nopschange=$(jq -r '.nopschange' <<< \"${pool_values}\")\n  nodelete=$(jq -r '.nodelete' <<< \"${pool_values}\")\n  size=$(jq -r '.size' <<< \"${pool_values}\")\n  crush_rule=$(jq -r '.crush_rule' <<< \"${pool_values}\")\n  nosizechange=$(set_pool_property \"${POOL_NAME}\" nosizechange \"${nosizechange}\" \"false\")\n  nopgchange=$(set_pool_property \"${POOL_NAME}\" nopgchange \"${nopgchange}\" \"false\")\n  nodelete=$(set_pool_property \"${POOL_NAME}\" nodelete \"${nodelete}\" \"false\")\n  size=$(set_pool_property \"${POOL_NAME}\" size \"${size}\" \"${POOL_REPLICATION}\")\n  crush_rule=$(set_pool_property \"${POOL_NAME}\" crush_rule \"${crush_rule}\" \"${POOL_CRUSH_RULE}\")\n# set pg_num to pool\n  if [[ ${POOL_PLACEMENT_GROUPS} -gt 0 ]]; then\n    pg_num=$(jq -r \".pg_num\" <<< \"${pool_values}\")\n    pgp_num=$(jq -r \".pgp_num\" <<< \"${pool_values}\")\n    pg_num=$(set_pool_property \"${POOL_NAME}\" pg_num \"${pg_num}\" \"${POOL_PLACEMENT_GROUPS}\")\n    pgp_num=$(set_pool_property \"${POOL_NAME}\" pgp_num \"${pgp_num}\" \"${POOL_PLACEMENT_GROUPS}\")\n  fi\n\n#This is to handle cluster expansion case where replication may change from intilization\n  if [ ${POOL_REPLICATION} -gt 1 ]; then\n    min_size=$(jq -r '.min_size' <<< \"${pool_values}\")\n    EXPECTED_POOLMINSIZE=$[${POOL_REPLICATION}-1]\n    min_size=$(set_pool_property \"${POOL_NAME}\" min_size \"${min_size}\" \"${EXPECTED_POOLMINSIZE}\")\n  fi\n#\n# Handling of .Values.conf.pool.target.protected:\n# Possible settings\n# - true  | 1 = Protect the pools after they get created\n# - false | 0 = Do not protect the pools once they get created and let Ceph defaults apply\n# - Absent    = Do not protect the pools once they get created and let Ceph defaults apply\n#\n# If protection is not requested through values.yaml, just use the Ceph defaults. With Luminous we do not\n# apply any protection to the pools when they get created.\n#\n# Note: If the /etc/ceph/ceph.conf file modifies the defaults the deployment will fail on pool creation\n# - nosizechange = Do not allow size and min_size changes on the pool\n# - nodelete     = Do not allow deletion of the pool\n#\n  if [ \"x${POOL_PROTECTION}\" == \"xtrue\" ] ||  [ \"x${POOL_PROTECTION}\" == \"x1\" ]; then\n    nosizechange=$(set_pool_property \"${POOL_NAME}\" nosizechange \"${nosizechange}\" \"true\")\n    nodelete=$(set_pool_property \"${POOL_NAME}\" nodelete \"${nodelete}\" \"true\")\n  fi\n}\n\nfunction manage_pool () {\n  POOL_APPLICATION=$1\n  POOL_NAME=$2\n  POOL_REPLICATION=$3\n  TOTAL_DATA_PERCENT=$4\n  TARGET_PG_PER_OSD=$5\n  POOL_CRUSH_RULE=$6\n  POOL_QUOTA=$7\n  POOL_PROTECTION=$8\n  CLUSTER_CAPACITY=$9\n  POOL_PG_NUM_MIN=${10}\n  TOTAL_OSDS={{.Values.conf.pool.target.osd}}\n  POOL_PLACEMENT_GROUPS=0\n  if [[ -n \"${TOTAL_DATA_PERCENT}\" ]]; then\n    if [[ \"${ENABLE_AUTOSCALER}\" == \"false\" ]] || [[ $(ceph mgr versions | awk '/version/{print $3}' | cut -d. -f1) -lt 14 ]]; then\n      POOL_PLACEMENT_GROUPS=$(python3 /tmp/pool-calc.py ${POOL_REPLICATION} ${TOTAL_OSDS} ${TOTAL_DATA_PERCENT} ${TARGET_PG_PER_OSD})\n    fi\n  fi\n  create_pool \"${POOL_APPLICATION}\" \"${POOL_NAME}\" \"${POOL_REPLICATION}\" \"${POOL_PLACEMENT_GROUPS}\" \"${POOL_CRUSH_RULE}\" \"${POOL_PROTECTION}\" \"${POOL_PG_NUM_MIN}\"\n  ceph --cluster \"${CLUSTER}\" osd pool set-quota \"${POOL_NAME}\" max_bytes $POOL_QUOTA\n}\n\n# Helper to convert TiB, TB, GiB, GB, MiB, MB, KiB, KB, or bytes to bytes\nfunction convert_to_bytes() {\n  value=${1}\n  value=\"$(echo \"${value}\" | sed 's/TiB/ \\* 1024GiB/g')\"\n  value=\"$(echo \"${value}\" | sed 's/TB/ \\* 1000GB/g')\"\n  value=\"$(echo \"${value}\" | sed 's/GiB/ \\* 1024MiB/g')\"\n  value=\"$(echo \"${value}\" | sed 's/GB/ \\* 1000MB/g')\"\n  value=\"$(echo \"${value}\" | sed 's/MiB/ \\* 1024KiB/g')\"\n  value=\"$(echo \"${value}\" | sed 's/MB/ \\* 1000KB/g')\"\n  value=\"$(echo \"${value}\" | sed 's/KiB/ \\* 1024/g')\"\n  value=\"$(echo \"${value}\" | sed 's/KB/ \\* 1000/g')\"\n  python3 -c \"print(int(${value}))\"\n}\n\nset_cluster_flags\nunset_cluster_flags\nrun_cluster_commands\nreweight_osds\n\n{{ $targetOSDCount := .Values.conf.pool.target.osd }}\n{{ $targetFinalOSDCount := .Values.conf.pool.target.final_osd }}\n{{ $targetPGperOSD := .Values.conf.pool.target.pg_per_osd }}\n{{ $crushRuleDefault := .Values.conf.pool.default.crush_rule }}\n{{ $targetQuota := .Values.conf.pool.target.quota | default 100 }}\n{{ $targetProtection := .Values.conf.pool.target.protected | default \"false\" | quote | lower }}\n{{ $targetPGNumMin := .Values.conf.pool.target.pg_num_min }}\ncluster_capacity=$(ceph --cluster \"${CLUSTER}\" df -f json-pretty | grep '\"total_bytes\":' | head -n1 | awk '{print $2}' | tr -d ',')\n\n# Check to make sure pool quotas don't exceed the expected cluster capacity in its final state\ntarget_quota=$(python3 -c \"print(int(${cluster_capacity} * {{ $targetFinalOSDCount }} / {{ $targetOSDCount }} * {{ $targetQuota }} / 100))\")\nquota_sum=0\n\n{{- range $pool := .Values.conf.pool.spec -}}\n{{- with $pool }}\n# Read the pool quota from the pool spec (no quota if absent)\n# Set pool_quota to 0 if target_quota is 0\n[[ ${target_quota} -eq 0 ]] && pool_quota=0 || pool_quota=\"$(convert_to_bytes {{ .pool_quota | default 0 }})\"\nquota_sum=$(python3 -c \"print(int(${quota_sum} + (${pool_quota} * {{ .replication }})))\")\n{{- end }}\n{{- end }}\n\nif [[ ${quota_sum} -gt ${target_quota} ]]; then\n  echo \"The sum of all pool quotas exceeds the target quota for the cluster\"\n  exit 1\nfi\n\nif [[ $(ceph mgr versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]] && [[ \"${ENABLE_AUTOSCALER}\" != \"true\" ]]; then\n  disable_autoscaling\nfi\n\n# Track the manage_pool() PIDs in an array so we can wait for them to finish\nMANAGE_POOL_PIDS=()\n\n{{- range $pool := .Values.conf.pool.spec -}}\n{{- with $pool }}\npool_name=\"{{ .name }}\"\n{{- if .rename }}\n# If a renamed pool exists, that name should be used for idempotence\nif [[ -n \"$(ceph --cluster ${CLUSTER} osd pool ls | grep ^{{ .rename }}$)\" ]]; then\n  pool_name=\"{{ .rename }}\"\nfi\n{{- end }}\n# Read the pool quota from the pool spec (no quota if absent)\n# Set pool_quota to 0 if target_quota is 0\n[[ ${target_quota} -eq 0 ]] && pool_quota=0 || pool_quota=\"$(convert_to_bytes {{ .pool_quota | default 0 }})\"\npool_crush_rule=\"{{ $crushRuleDefault }}\"\n{{- if .crush_rule }}\npool_crush_rule=\"{{ .crush_rule }}\"\n{{- end }}\npool_pg_num_min={{ $targetPGNumMin }}\n{{- if .pg_num_min }}\npool_pg_num_min={{ .pg_num_min }}\n{{- end }}\nmanage_pool {{ .application }} ${pool_name} {{ .replication }} {{ .percent_total_data }} {{ $targetPGperOSD }} $pool_crush_rule $pool_quota {{ $targetProtection }} ${cluster_capacity} ${pool_pg_num_min} &\nMANAGE_POOL_PID=$!\nMANAGE_POOL_PIDS+=( $MANAGE_POOL_PID )\n{{- if .rename }}\n# Wait for manage_pool() to finish for this pool before trying to rename the pool\nwait_for_pid $MANAGE_POOL_PID\n# If a rename value exists, the pool exists, and a pool with the rename value doesn't exist, rename the pool\npool_list=$(ceph --cluster ${CLUSTER} osd pool ls)\nif [[ -n $(grep ^{{ .name }}$ <<< \"${pool_list}\") ]] &&\n   [[ -z $(grep ^{{ .rename }}$ <<< \"${pool_list}\") ]]; then\n  ceph --cluster \"${CLUSTER}\" osd pool rename \"{{ .name }}\" \"{{ .rename }}\"\n  pool_name=\"{{ .rename }}\"\nfi\n{{- end }}\n{{- if and .delete .delete_all_pool_data }}\n# Wait for manage_pool() to finish for this pool before trying to delete the pool\nwait_for_pid $MANAGE_POOL_PID\n# If delete is set to true and delete_all_pool_data is also true, delete the pool\nif [[ \"true\" == \"{{ .delete }}\" ]] &&\n   [[ \"true\" == \"{{ .delete_all_pool_data }}\" ]]; then\n  ceph --cluster \"${CLUSTER}\" tell mon.* injectargs '--mon-allow-pool-delete=true'\n  ceph --cluster \"${CLUSTER}\" osd pool set \"${pool_name}\" nodelete false\n  ceph --cluster \"${CLUSTER}\" osd pool delete \"${pool_name}\" \"${pool_name}\" --yes-i-really-really-mean-it\n  ceph --cluster \"${CLUSTER}\" tell mon.* injectargs '--mon-allow-pool-delete=false'\nfi\n{{- end }}\n{{- end }}\n{{- end }}\n\n# Wait for all manage_pool() instances to finish before proceeding\nfor pool_pid in ${MANAGE_POOL_PIDS[@]}; do\n  wait_for_pid $pool_pid\ndone\n\nif [[ $(ceph mgr versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]] && [[ \"${ENABLE_AUTOSCALER}\" == \"true\" ]]; then\n  enable_autoscaling\nfi\n\n{{- if .Values.conf.pool.crush.tunables }}\nceph --cluster \"${CLUSTER}\" osd crush tunables {{ .Values.conf.pool.crush.tunables }}\n{{- end }}\n\nwait_for_pgs\ncheck_recovery_flags\n"
  },
  {
    "path": "ceph-client/templates/bin/utils/_checkDNS.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n: \"${CEPH_CONF:=\"/etc/ceph/${CLUSTER}.conf\"}\"\nENDPOINT=\"{$1}\"\n\nfunction check_mon_dns () {\n  GREP_CMD=$(grep -rl 'ceph-mon' ${CEPH_CONF})\n\n  if [[ \"${ENDPOINT}\" == \"{up}\" ]]; then\n    echo \"If DNS is working, we are good here\"\n  elif [[ \"${ENDPOINT}\" != \"\" ]]; then\n    if [[ ${GREP_CMD} != \"\" ]]; then\n      # No DNS, write CEPH MONs IPs into ${CEPH_CONF}\n      sh -c -e \"cat ${CEPH_CONF}.template | sed 's/mon_host.*/mon_host = ${ENDPOINT}/g' | tee ${CEPH_CONF}\" > /dev/null 2>&1\n    else\n      echo \"endpoints are already cached in ${CEPH_CONF}\"\n      exit\n    fi\n  fi\n}\n\ncheck_mon_dns\n\nexit\n"
  },
  {
    "path": "ceph-client/templates/bin/utils/_checkDNS_start.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -xe\n\n{{ include \"helm-toolkit.snippets.mon_host_from_k8s_ep\" . }}\n\n{{- $rgwNameSpaces := \"\" }}\n{{- $sep := \"\" }}\n{{- range $_, $ns := .Values.endpoints.ceph_object_store.endpoint_namespaces }}\n  {{- $rgwNameSpaces = printf \"%s%s%s\" $rgwNameSpaces $sep $ns }}\n  {{- $sep = \" \" }}\n{{- end }}\n\nrgwNameSpaces={{- printf \"\\\"%s\\\"\" $rgwNameSpaces }}\n\nfunction check_mon_dns {\n  NS=${1}\n  # RGWs and the rgw namespace could not exist. Let's check this and prevent this script from failing\n  if [[ $(kubectl get ns ${NS} -o json | jq -r '.status.phase') == \"Active\" ]]; then\n    DNS_CHECK=$(getent hosts ceph-mon | head -n1)\n    PODS=$(kubectl get pods --namespace=${NS} --selector=application=ceph --field-selector=status.phase=Running \\\n          --output=jsonpath='{range .items[*]}{.metadata.name}{\"\\n\"}{end}' | grep -E 'ceph-mon|ceph-osd|ceph-mgr|ceph-mds|ceph-rgw')\n    ENDPOINT=$(mon_host_from_k8s_ep \"${NAMESPACE}\" ceph-mon-discovery)\n\n    if [[ ${PODS} == \"\" || \"${ENDPOINT}\" == \"\" ]]; then\n      echo \"Something went wrong, no PODS or ENDPOINTS are available!\"\n    elif [[ ${DNS_CHECK} == \"\" ]]; then\n      for POD in ${PODS}; do\n        kubectl exec -t ${POD} --namespace=${NS} -- \\\n        sh -c -e \"/tmp/utils-checkDNS.sh \"${ENDPOINT}\"\"\n      done\n    else\n      for POD in ${PODS}; do\n        kubectl exec -t ${POD} --namespace=${NS} -- \\\n        sh -c -e \"/tmp/utils-checkDNS.sh up\"\n      done\n    fi\n  else\n    echo \"The namespace ${NS} is not ready, yet\"\n  fi\n}\n\nfunction watch_mon_dns {\n  while [ true ]; do\n    echo \"checking DNS health\"\n    for myNS in ${NAMESPACE} ${rgwNameSpaces}; do\n      check_mon_dns ${myNS} || true\n    done\n    echo \"sleep 300 sec\"\n    sleep 300\n  done\n}\n\nwatch_mon_dns\n\nexit\n"
  },
  {
    "path": "ceph-client/templates/bin/utils/_checkPGs.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nmgrPod=$(kubectl get pods --namespace=${DEPLOYMENT_NAMESPACE} --selector=application=ceph --selector=component=mgr --output=jsonpath={.items[0].metadata.name} 2>/dev/null)\n\nkubectl exec -t ${mgrPod} --namespace=${DEPLOYMENT_NAMESPACE} -- python3 /tmp/utils-checkPGs.py All 2>/dev/null\n"
  },
  {
    "path": "ceph-client/templates/bin/utils/_defragOSDs.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nPODS=$(kubectl get pods --namespace=${NAMESPACE} \\\n  --selector=application=ceph,component=osd --field-selector=status.phase=Running \\\n  '--output=jsonpath={range .items[*]}{.metadata.name}{\"\\n\"}{end}')\n\nfor POD in ${PODS}; do\n  kubectl exec -t ${POD} -c ceph-osd-default --namespace=${NAMESPACE} -- \\\n  sh -c -e \"/tmp/utils-defragOSDs.sh\"\ndone\n\n\nexit 0\n"
  },
  {
    "path": "ceph-client/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.configmap_bin .Values.deployment.ceph }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: ceph-client-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n\n  init-dirs.sh: |\n{{ tuple \"bin/_init-dirs.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n\n  pool-init.sh: |\n{{ tuple \"bin/pool/_init.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n  pool-calc.py: |\n{{ tuple \"bin/pool/_calc.py.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n\n  mds-start.sh: |\n{{ tuple \"bin/mds/_start.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n\n  helm-tests.sh: |\n{{ tuple \"bin/_helm-tests.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  utils-checkDNS.sh: |\n{{ tuple \"bin/utils/_checkDNS.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  utils-checkDNS_start.sh: |\n{{ tuple \"bin/utils/_checkDNS_start.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n\n  utils-checkPGs.sh: |\n{{ tuple \"bin/utils/_checkPGs.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n\n  utils-defragOSDs.sh: |\n{{ tuple \"bin/utils/_defragOSDs.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n\n{{- end }}\n"
  },
  {
    "path": "ceph-client/templates/configmap-etc-client.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"ceph.configmap.etc\" }}\n{{- $configMapName := index . 0 }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n\n{{- if .Values.deployment.ceph }}\n\n{{- if empty .Values.conf.ceph.global.mon_host -}}\n{{- $monHost := tuple \"ceph_mon\" \"internal\" \"mon_msgr2\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n{{- $_ := $monHost | set .Values.conf.ceph.global \"mon_host\" -}}\n{{- end -}}\n\n\n{{- if empty .Values.conf.ceph.osd.cluster_network -}}\n{{- $_ := .Values.network.cluster | set .Values.conf.ceph.osd \"cluster_network\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ceph.osd.public_network -}}\n{{- $_ := .Values.network.public | set .Values.conf.ceph.osd \"public_network\" -}}\n{{- end -}}\n\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ $configMapName }}\ndata:\n  ceph.conf: |\n{{ include \"helm-toolkit.utils.to_ini\" .Values.conf.ceph | indent 4 }}\n\n{{- end }}\n{{- end }}\n{{- end }}\n{{- if .Values.manifests.configmap_etc }}\n{{- list \"ceph-client-etc\" . | include \"ceph.configmap.etc\" }}\n{{- end }}\n"
  },
  {
    "path": "ceph-client/templates/cronjob-checkPGs.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.cronjob_checkPGs }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"ceph-pool-checkpgs\" }}\n{{ tuple $envAll \"pool_checkpgs\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - pods\n      - pods/exec\n    verbs:\n      - get\n      - list\n      - watch\n      - create\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: {{ $serviceAccountName }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"ceph\" \"pool-checkpgs\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  schedule: {{ .Values.jobs.pool_checkPGs.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.pool_checkPGs.history.successJob }}\n  failedJobsHistoryLimit: {{ .Values.jobs.pool_checkPGs.history.failJob }}\n  concurrencyPolicy: {{ .Values.jobs.pool_checkPGs.concurrency.execPolicy }}\n  startingDeadlineSeconds: {{ .Values.jobs.pool_checkPGs.startingDeadlineSecs }}\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"pool-checkpgs\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"ceph\" \"pool-checkpgs\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n        spec:\n          serviceAccountName: {{ $serviceAccountName }}\n          nodeSelector:\n            {{ .Values.labels.mgr.node_selector_key }}: {{ .Values.labels.mgr.node_selector_value }}\n          initContainers:\n{{ tuple $envAll \"pool_checkpgs\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 12 }}\n          containers:\n          - name: {{ $serviceAccountName }}\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 12 }}\n            env:\n              - name: DEPLOYMENT_NAMESPACE\n                valueFrom:\n                  fieldRef:\n                    fieldPath: metadata.namespace\n            command:\n              - /tmp/utils-checkPGs.sh\n            volumeMounts:\n              - name: pod-tmp\n                mountPath: /tmp\n              - name: pod-etc-ceph\n                mountPath: /etc/ceph\n              - name: ceph-client-bin\n                mountPath: /tmp/utils-checkPGs.sh\n                subPath: utils-checkPGs.sh\n                readOnly: true\n              - name: ceph-client-etc\n                mountPath: /etc/ceph/ceph.conf\n                subPath: ceph.conf\n                readOnly: true\n              - mountPath: /etc/ceph/ceph.client.admin.keyring\n                name: ceph-client-admin-keyring\n                readOnly: true\n                subPath: ceph.client.admin.keyring\n              - mountPath: /etc/ceph/ceph.mon.keyring.seed\n                name: ceph-mon-keyring\n                readOnly: true\n                subPath: ceph.mon.keyring\n              - mountPath: /var/lib/ceph/bootstrap-osd/ceph.keyring\n                name: ceph-bootstrap-osd-keyring\n                readOnly: true\n                subPath: ceph.keyring\n              - mountPath: /var/lib/ceph/bootstrap-mds/ceph.keyring\n                name: ceph-bootstrap-mds-keyring\n                readOnly: true\n                subPath: ceph.keyring\n          restartPolicy: Never\n          hostNetwork: true\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: pod-etc-ceph\n              emptyDir: {}\n            - name: ceph-client-bin\n              configMap:\n                name: ceph-client-bin\n                defaultMode: 0555\n            - name: ceph-client-etc\n              configMap:\n                name: ceph-client-etc\n                defaultMode: 0444\n            - name: ceph-client-admin-keyring\n              secret:\n                defaultMode: 420\n                secretName: ceph-client-admin-keyring\n            - name: ceph-mon-keyring\n              secret:\n                defaultMode: 420\n                secretName: ceph-mon-keyring\n            - name: ceph-bootstrap-osd-keyring\n              secret:\n                defaultMode: 420\n                secretName: ceph-bootstrap-osd-keyring\n            - name: ceph-bootstrap-mds-keyring\n              secret:\n                defaultMode: 420\n                secretName: ceph-bootstrap-mds-keyring\n\n{{- end }}\n"
  },
  {
    "path": "ceph-client/templates/cronjob-defragosds.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.cronjob_defragosds }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"ceph-defragosds\" }}\n{{ tuple $envAll \"ceph_defragosds\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - pods\n      - pods/exec\n    verbs:\n      - get\n      - list\n      - watch\n      - create\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: {{ $serviceAccountName }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"ceph\" \"ceph-defragosds\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  schedule: {{ .Values.jobs.ceph_defragosds.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.ceph_defragosds.history.successJob }}\n  failedJobsHistoryLimit: {{ .Values.jobs.ceph_defragosds.history.failJob }}\n  concurrencyPolicy: {{ .Values.jobs.ceph_defragosds.concurrency.execPolicy }}\n  startingDeadlineSeconds: {{ .Values.jobs.ceph_defragosds.startingDeadlineSecs }}\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"ceph-defragosds\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"ceph\" \"ceph-defragosds\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n        spec:\n          serviceAccountName: {{ $serviceAccountName }}\n          nodeSelector:\n            {{ .Values.labels.mgr.node_selector_key }}: {{ .Values.labels.mgr.node_selector_value }}\n          containers:\n          - name: {{ $serviceAccountName }}\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 12 }}\n            env:\n              - name: NAMESPACE\n                valueFrom:\n                  fieldRef:\n                    fieldPath: metadata.namespace\n              - name: KUBECTL_PARAM\n                value: {{ tuple $envAll \"ceph\" \"ceph-defragosd\" | include \"helm-toolkit.snippets.kubernetes_kubectl_params\" }}\n            command:\n              - /tmp/utils-defragOSDs.sh\n              - cron\n            volumeMounts:\n              - name: pod-tmp\n                mountPath: /tmp\n              - name: pod-etc-ceph\n                mountPath: /etc/ceph\n              - name: ceph-client-bin\n                mountPath: /tmp/utils-defragOSDs.sh\n                subPath: utils-defragOSDs.sh\n                readOnly: true\n          restartPolicy: Never\n          hostNetwork: true\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: pod-etc-ceph\n              emptyDir: {}\n            - name: ceph-client-bin\n              configMap:\n                name: ceph-client-bin\n                defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "ceph-client/templates/deployment-checkdns.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License: is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.deployment_checkdns .Values.deployment.ceph }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"ceph-checkdns\" }}\n{{/*\nWe will give different name to the RoleBinding resource (see $cephRoleBindingName variable below).\nThis is neccessary, because the RoleBinding with the default name \"ceph-checkdns\" exists in the system,\nand its reference can not be changed.\n*/}}\n{{- $cephRoleBindingName := \"ceph-checkdns-rolebinding\" }}\n\n{{ tuple $envAll \"checkdns\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.Release.Name \"clusterrole-checkdns\" | quote }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - pods\n      - endpoints\n      - pods/exec\n      - namespaces\n    verbs:\n      - get\n      - list\n      - watch\n      - create\n---\n\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ printf \"%s-for-%s\" $cephRoleBindingName $envAll.Release.Namespace }}\n  namespace: {{ $envAll.Release.Namespace }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: {{ printf \"%s-%s\" $envAll.Release.Name \"clusterrole-checkdns\" | quote }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\n\nkind: Deployment\napiVersion: apps/v1\nmetadata:\n  name: ceph-checkdns\n  annotations:\n    configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n  labels:\n{{ tuple $envAll \"ceph\" \"checkdns\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"ceph\" \"checkdns\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"checkdns\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-checkdns\" \"containerNames\" (list \"ceph-checkdns\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"checkdns\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"ceph\" \"checkdns\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n{{ tuple $envAll \"checkdns\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n      nodeSelector:\n        {{ .Values.labels.checkdns.node_selector_key }}: {{ .Values.labels.checkdns.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"checkdns\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      hostNetwork: true\n      dnsPolicy: {{ .Values.pod.dns_policy }}\n      containers:\n        - name: ceph-checkdns\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.checkdns | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"checkdns\" \"container\" \"checkdns\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: CLUSTER\n              value: \"ceph\"\n            - name: K8S_HOST_NETWORK\n              value: \"1\"\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MON_PORT\n              value: {{ tuple \"ceph_mon\" \"internal\" \"mon\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: MON_PORT_V2\n              value: {{ tuple \"ceph_mon\" \"internal\" \"mon_msgr2\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: KUBECTL_PARAM\n              value: {{ tuple $envAll \"ceph\" \"checkdns\" | include \"helm-toolkit.snippets.kubernetes_kubectl_params\" }}\n          command:\n            - /tmp/_start.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ceph-client-bin\n              mountPath: /tmp/_start.sh\n              subPath: utils-checkDNS_start.sh\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: ceph-client-bin\n          configMap:\n            name: ceph-client-bin\n            defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "ceph-client/templates/deployment-mds.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"livenessProbeTemplate\" }}\ntcpSocket:\n  port: 6800\n{{- end }}\n\n{{- define \"readinessProbeTemplate\" }}\ntcpSocket:\n  port: 6800\n{{- end }}\n\n{{- if and .Values.manifests.deployment_mds ( and .Values.deployment.ceph .Values.conf.features.mds) }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"ceph-mds\" }}\n{{ tuple $envAll \"mds\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\nkind: Deployment\napiVersion: apps/v1\nmetadata:\n  name: ceph-mds\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"ceph\" \"mds\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.mds }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"ceph\" \"mds\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      name: ceph-mds\n      labels:\n{{ tuple $envAll \"ceph\" \"mds\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-client-hash: {{ tuple \"configmap-etc-client.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-mds\" \"containerNames\" (list \"ceph-mds\" \"ceph-init-dirs\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"mds\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"ceph\" \"mds\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n{{ tuple $envAll \"mds\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n      nodeSelector:\n        {{ .Values.labels.mds.node_selector_key }}: {{ .Values.labels.mds.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"mds\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: ceph-init-dirs\n{{ tuple $envAll \"ceph_mds\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"mds\" \"container\" \"init_dirs\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/init-dirs.sh\n          env:\n            - name: CLUSTER\n              value: \"ceph\"\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-run\n              mountPath: /run\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-client-bin\n              mountPath: /tmp/init-dirs.sh\n              subPath: init-dirs.sh\n              readOnly: true\n            - name: pod-var-lib-ceph\n              mountPath: /var/lib/ceph\n              readOnly: false\n            - name: pod-var-lib-ceph-crash\n              mountPath: /var/lib/ceph/crash\n              readOnly: false\n      containers:\n        - name: ceph-mds\n{{ tuple $envAll \"ceph_mds\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.mds | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"mds\" \"container\" \"mds\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/mds-start.sh\n          env:\n            - name: CLUSTER\n              value: \"ceph\"\n            - name: CEPHFS_CREATE\n              value: \"1\"\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MON_PORT\n              value: {{ tuple \"ceph_mon\" \"internal\" \"mon\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: MON_PORT_V2\n              value: {{ tuple \"ceph_mon\" \"internal\" \"mon_msgr2\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n          ports:\n            - containerPort: 6800\n{{ dict \"envAll\" . \"component\" \"ceph\" \"container\" \"ceph-mds\" \"type\" \"liveness\" \"probeTemplate\" (include \"livenessProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | trim | indent 10 }}\n{{ dict \"envAll\" . \"component\" \"ceph\" \"container\" \"ceph-mds\" \"type\" \"readiness\" \"probeTemplate\" (include \"readinessProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | trim | indent 10 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-run\n              mountPath: /run\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-client-bin\n              mountPath: /tmp/mds-start.sh\n              subPath: mds-start.sh\n              readOnly: true\n            - name: ceph-client-bin\n              mountPath: /tmp/utils-checkDNS.sh\n              subPath: utils-checkDNS.sh\n              readOnly: true\n            - name: ceph-client-etc\n              mountPath: /etc/ceph/ceph.conf.template\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-client-admin-keyring\n              mountPath: /etc/ceph/ceph.client.admin.keyring\n              subPath: ceph.client.admin.keyring\n              readOnly: true\n            - name: ceph-bootstrap-mds-keyring\n              mountPath: /var/lib/ceph/bootstrap-mds/ceph.keyring\n              subPath: ceph.keyring\n              readOnly: false\n            - name: pod-var-lib-ceph\n              mountPath: /var/lib/ceph\n              readOnly: false\n            - name: pod-var-lib-ceph-crash\n              mountPath: /var/lib/ceph/crash\n              readOnly: false\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-run\n          emptyDir:\n            medium: \"Memory\"\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: ceph-client-etc\n          configMap:\n            name: ceph-client-etc\n            defaultMode: 0444\n        - name: ceph-client-bin\n          configMap:\n            name: ceph-client-bin\n            defaultMode: 0555\n        - name: pod-var-lib-ceph\n          emptyDir: {}\n        - name: pod-var-lib-ceph-crash\n          hostPath:\n            path: /var/lib/openstack-helm/ceph/crash\n            type: DirectoryOrCreate\n        - name: ceph-client-admin-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.admin }}\n        - name: ceph-bootstrap-mds-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.mds }}\n{{- end }}\n"
  },
  {
    "path": "ceph-client/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "ceph-client/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"ceph-client-bootstrap\" }}\n{{ tuple $envAll \"bootstrap\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: ceph-client-bootstrap\n  labels:\n{{ tuple $envAll \"ceph\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-client-bootstrap\" \"containerNames\" (list \"ceph-client-bootstrap\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"bootstrap\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n      containers:\n        - name: ceph-client-bootstrap\n{{ tuple $envAll \"ceph_bootstrap\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"bootstrap\" \"container\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/bootstrap.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ceph-client-bin\n              mountPath: /tmp/bootstrap.sh\n              subPath: bootstrap.sh\n              readOnly: true\n            - name: ceph-client-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-client-admin-keyring\n              mountPath: /etc/ceph/ceph.client.admin.keyring\n              subPath: ceph.client.admin.keyring\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: ceph-client-bin\n          configMap:\n            name: ceph-client-bin\n            defaultMode: 0555\n        - name: ceph-client-etc\n          configMap:\n            name: ceph-client-etc\n            defaultMode: 0444\n        - name: ceph-client-admin-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.admin }}\n{{- end }}\n"
  },
  {
    "path": "ceph-client/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"ceph-client\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "ceph-client/templates/job-rbd-pool.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_rbd_pool .Values.deployment.ceph }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"ceph-rbd-pool\" }}\n{{ tuple $envAll \"rbd_pool\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: ceph-rbd-pool\n  labels:\n{{ tuple $envAll \"ceph\" \"rbd-pool\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      name: ceph-rbd-pool\n      labels:\n{{ tuple $envAll \"ceph\" \"rbd-pool\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-rbd-pool\" \"containerNames\" (list \"ceph-rbd-pool\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"rbd_pool\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: {{ $envAll.Values.jobs.rbd_pool.restartPolicy | quote }}\n      affinity:\n{{ tuple $envAll \"ceph\" \"rbd-pool\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"rbd_pool\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: ceph-rbd-pool\n{{ tuple $envAll \"ceph_rbd_pool\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.rbd_pool | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"rbd_pool\" \"container\" \"rbd_pool\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: CLUSTER\n              value: \"ceph\"\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: ENABLE_AUTOSCALER\n              value: {{ .Values.conf.features.pg_autoscaler | quote }}\n            - name: CLUSTER_SET_FLAGS\n              value: {{ .Values.conf.features.cluster_flags.set | quote }}\n            - name: CLUSTER_UNSET_FLAGS\n              value: {{ .Values.conf.features.cluster_flags.unset | quote }}\n          command:\n            - /tmp/pool-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ceph-client-bin\n              mountPath: /tmp/pool-init.sh\n              subPath: pool-init.sh\n              readOnly: true\n            - name: ceph-client-bin\n              mountPath: /tmp/pool-calc.py\n              subPath: pool-calc.py\n              readOnly: true\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n              readOnly: false\n            - name: ceph-client-etc\n              mountPath: /etc/ceph/ceph.conf.template\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-client-admin-keyring\n              mountPath: /etc/ceph/ceph.client.admin.keyring\n              subPath: ceph.client.admin.keyring\n              readOnly: true\n            - name: pod-var-lib-ceph\n              mountPath: /var/lib/ceph\n              readOnly: false\n            - name: pod-run\n              mountPath: /run\n              readOnly: false\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: ceph-client-etc\n          configMap:\n            name: ceph-client-etc\n            defaultMode: 0444\n        - name: ceph-client-bin\n          configMap:\n            name: ceph-client-bin\n            defaultMode: 0555\n        - name: pod-var-lib-ceph\n          emptyDir: {}\n        - name: pod-run\n          emptyDir:\n            medium: \"Memory\"\n        - name: ceph-client-admin-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.admin }}\n{{- end }}\n"
  },
  {
    "path": "ceph-client/templates/pod-helm-tests.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.helm_tests }}\n{{- $envAll := . }}\n{{- $serviceAccountName := printf \"%s-%s\" $envAll.Release.Name \"test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: {{ $serviceAccountName }}\n  labels:\n{{ tuple $envAll \"ceph-client\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-client-test\" \"containerNames\" (list \"init\" \"ceph-cluster-helm-test\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n{{ dict \"envAll\" $envAll \"application\" \"test\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 2 }}\n  restartPolicy: Never\n  serviceAccountName: {{ $serviceAccountName }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n  initContainers:\n{{ tuple $envAll \"tests\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n  containers:\n    - name: ceph-cluster-helm-test\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" \"container\" \"test\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      env:\n        - name: CLUSTER\n          value: \"ceph\"\n        - name: CEPH_DEPLOYMENT_NAMESPACE\n          value: {{ .Release.Namespace }}\n        - name: REQUIRED_PERCENT_OF_OSDS\n          value: {{ .Values.conf.pool.target.required_percent_of_osds | ceil | quote }}\n        - name: EXPECTED_CRUSHRULE\n          value: {{ .Values.conf.pool.default.crush_rule | default \"replicated_rule\" | quote }}\n        - name: MGR_COUNT\n          value: {{ .Values.pod.replicas.mgr | default \"1\" | quote }}\n        - name: ENABLE_AUTOSCALER\n          value: {{ .Values.conf.features.pg_autoscaler | quote }}\n        {{- range $pool := .Values.conf.pool.spec -}}\n        {{- with $pool }}\n        - name: {{ .name | upper | replace \".\" \"_\" }}\n          value: {{ .replication | quote }}\n        {{- end }}\n        {{- end }}\n      command:\n        - /tmp/helm-tests.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: ceph-client-bin\n          mountPath: /tmp/helm-tests.sh\n          subPath: helm-tests.sh\n          readOnly: true\n        - name: ceph-client-admin-keyring\n          mountPath: /etc/ceph/ceph.client.admin.keyring\n          subPath: ceph.client.admin.keyring\n          readOnly: true\n        - name: ceph-client-etc\n          mountPath: /etc/ceph/ceph.conf\n          subPath: ceph.conf\n          readOnly: true\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: ceph-client-bin\n      configMap:\n        name: ceph-client-bin\n        defaultMode: 0555\n    - name: ceph-client-admin-keyring\n      secret:\n        secretName: {{ .Values.secrets.keyrings.admin }}\n    - name: ceph-client-etc\n      configMap:\n        name: ceph-client-etc\n        defaultMode: 0444\n{{- end }}\n"
  },
  {
    "path": "ceph-client/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "ceph-client/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for ceph-client.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\ndeployment:\n  ceph: true\n\nrelease_group: null\n\nimages:\n  pull_policy: IfNotPresent\n  tags:\n    ceph_bootstrap: 'quay.io/airshipit/ceph-daemon:ubuntu_jammy_20.2.1-1-20260407'\n    ceph_config_helper: 'quay.io/airshipit/ceph-config-helper:ubuntu_jammy_20.2.1-1-20260407'\n    ceph_mds: 'quay.io/airshipit/ceph-daemon:ubuntu_jammy_20.2.1-1-20260407'\n    ceph_rbd_pool: 'quay.io/airshipit/ceph-config-helper:ubuntu_jammy_20.2.1-1-20260407'\n    dep_check: 'quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy'\n    image_repo_sync: 'quay.io/airshipit/docker:27.5.0'\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  mgr:\n    node_selector_key: ceph-mgr\n    node_selector_value: enabled\n  mds:\n    node_selector_key: ceph-mds\n    node_selector_value: enabled\n  checkdns:\n    node_selector_key: ceph-mon\n    node_selector_value: enabled\n\npod:\n  security_context:\n    checkdns:\n      pod:\n        runAsUser: 65534\n      container:\n        checkdns:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    mds:\n      pod:\n        runAsUser: 65534\n      container:\n        init_dirs:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        mds:\n          runAsUser: 64045\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    bootstrap:\n      pod:\n        runAsUser: 65534\n      container:\n        bootstrap:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    rbd_pool:\n      pod:\n        runAsUser: 65534\n      container:\n        rbd_pool:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    test:\n      pod:\n        runAsUser: 65534\n      container:\n        test:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n  dns_policy: \"ClusterFirstWithHostNet\"\n  replicas:\n    mds: 2\n  lifecycle:\n    upgrades:\n      deployments:\n        pod_replacement_strategy: RollingUpdate\n        revision_history: 3\n        rolling_update:\n          max_surge: 25%\n          max_unavailable: 25%\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  resources:\n    enabled: false\n    mds:\n      requests:\n        memory: \"10Mi\"\n        cpu: \"250m\"\n      limits:\n        memory: \"50Mi\"\n        cpu: \"500m\"\n    checkdns:\n      requests:\n        memory: \"5Mi\"\n        cpu: \"250m\"\n      limits:\n        memory: \"50Mi\"\n        cpu: \"500m\"\n    jobs:\n      bootstrap:\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n        requests:\n          memory: \"128Mi\"\n          cpu: \"500m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rbd_pool:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"10Mi\"\n          cpu: \"250m\"\n        limits:\n          memory: \"50Mi\"\n          cpu: \"500m\"\n  tolerations:\n    checkdns:\n      tolerations:\n      - effect: NoExecute\n        key: node.kubernetes.io/not-ready\n        operator: Exists\n        tolerationSeconds: 60\n      - effect: NoExecute\n        key: node.kubernetes.io/unreachable\n        operator: Exists\n        tolerationSeconds: 60\n    mds:\n      tolerations:\n      - effect: NoExecute\n        key: node.kubernetes.io/not-ready\n        operator: Exists\n        tolerationSeconds: 60\n      - effect: NoExecute\n        key: node.kubernetes.io/unreachable\n        operator: Exists\n        tolerationSeconds: 60\n  probes:\n    ceph:\n      ceph-mds:\n        readiness:\n          enabled: true\n          params:\n            timeoutSeconds: 5\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 60\n            timeoutSeconds: 5\n\nsecrets:\n  keyrings:\n    mon: ceph-mon-keyring\n    mds: ceph-bootstrap-mds-keyring\n    osd: ceph-bootstrap-osd-keyring\n    rgw: ceph-bootstrap-rgw-keyring\n    mgr: ceph-bootstrap-mgr-keyring\n    admin: ceph-client-admin-keyring\n  oci_image_registry:\n    ceph-client: ceph-client-oci-image-registry\n\nnetwork:\n  public: 192.168.0.0/16\n  cluster: 192.168.0.0/16\n\njobs:\n  ceph_defragosds:\n    # Execute the 1st of each month\n    cron: \"0 0 1 * *\"\n    history:\n      # Number of successful job to keep\n      successJob: 1\n      # Number of failed job to keep\n      failJob: 1\n    concurrency:\n      # Skip new job if previous job still active\n      execPolicy: Forbid\n    startingDeadlineSecs: 60\n  pool_checkPGs:\n    # Execute every 15 minutes\n    cron: \"*/15 * * * *\"\n    history:\n      # Number of successful job to keep\n      successJob: 1\n      # Number of failed job to keep\n      failJob: 1\n    concurrency:\n      # Skip new job if previous job still active\n      execPolicy: Forbid\n    startingDeadlineSecs: 60\n  rbd_pool:\n    restartPolicy: OnFailure\n\nconf:\n  features:\n    mds: true\n    pg_autoscaler: true\n    cluster_flags:\n      # List of flags to set or unset separated by spaces\n      set: \"\"\n      unset: \"\"\n    cluster_commands:\n      # Add additional commands to run against the Ceph cluster here\n      # NOTE: Beginning with Pacific, mon_allow_pool_size_one must be\n      #       configured here to allow gate scripts to use 1x replication.\n      #       Adding it to /etc/ceph/ceph.conf doesn't seem to be effective.\n      - config set global mon_allow_pool_size_one true\n      - osd require-osd-release tentacle\n      - status\n  pool:\n  # NOTE(portdirect): this drives a simple approximation of\n  # https://ceph.com/pgcalc/, the `target.osd` key should be set to match the\n  # expected number of osds in a cluster, and the `target.pg_per_osd` should be\n  # set to match the desired number of placement groups on each OSD.\n    crush:\n      # NOTE(portdirect): to use RBD devices with Ubuntu 16.04's 4.4.x series\n      # kernel this should be set to `hammer`\n      tunables: null\n    target:\n      # NOTE(portdirect): arbitrarily we set the default number of expected OSD's to 5\n      # to match the number of nodes in the OSH gate.\n      osd: 5\n      # This the number of OSDs expected in the final state. This is to allow the above\n      # target to be smaller initially in the event of a partial deployment. This way\n      # helm tests can still pass at deployment time and pool quotas can be set based on\n      # the expected final state (actual target quota = final_osd / osd * quota).\n      final_osd: 5\n      # This is  just for helm tests to proceed the deployment if  we have mentioned % of\n      # osds are up and running.\n      required_percent_of_osds: 75\n      pg_per_osd: 100\n      # NOTE(bw6938): When pools are created with the autoscaler enabled, a pg_num_min\n      # value specifies the minimum value of pg_num that the autoscaler will target.\n      # That default was recently changed from 8 to 32 which severely limits the number\n      # of pools in a small cluster per https://github.com/rook/rook/issues/5091. This change\n      # overrides the default pg_num_min value of 32 with a value of 8, matching the default\n      # pg_num value of 8.\n      pg_num_min: 8\n      protected: true\n      # NOTE(st053q): target quota should be set to the overall cluster full percentage\n      # to be tolerated as a quota (percent full to allow in order to tolerate some\n      # level of failure)\n      # Set target quota to \"0\" (must be quoted) to remove quotas for all pools\n      quota: 100\n    default:\n      # NOTE(supamatt): Accepted values are taken from `crush_rules` list.\n      crush_rule: replicated_rule\n    crush_rules:\n      # NOTE(supamatt): Device classes must remain undefined if all OSDs are the\n      # same device type of backing disks (ie, all HDD or all SDD).\n      - name: same_host\n        crush_rule: create-simple\n        failure_domain: osd\n        device_class:\n      - name: replicated_rule\n        crush_rule: create-simple\n        failure_domain: host\n        device_class:\n      - name: rack_replicated_rule\n        crush_rule: create-simple\n        failure_domain: rack\n        device_class:\n      # - name: replicated_rule-ssd\n      #   crush_rule: create-replicated\n      #   failure_domain: host\n      #   device_class: sdd\n      # - name: replicated_rule-hdd\n      #   crush_rule: create-replicated\n      #   failure_domain: host\n      #   device_class: hdd\n      # - name: rack_replicated_rule-ssd\n      #   crush_rule: create-replicated\n      #   failure_domain: rack\n      #   device_class: ssd\n      # - name: rack_replicated_rule-hdd\n      #   crush_rule: create-replicated\n      #   failure_domain: rack\n      #   device_class: hdd\n      # - name: row_replicated_rule\n      #   crush_rule: create-simple\n      #   failure_domain: row\n      #   device_class:\n\n    # NOTE(portdirect): this section describes the pools that will be managed by\n    # the ceph pool management job, as it tunes the pgs and crush rule, based on\n    # the above.\n    spec:\n      # Health metrics pool\n      - name: .mgr\n        application: mgr_devicehealth\n        replication: 1\n        percent_total_data: 5\n      # RBD pool\n      - name: rbd\n        # An optional \"rename\" value may be used to change the name of an existing pool.\n        # If the pool doesn't exist, it will be created and renamed. If the pool exists with\n        # the original name, it will be renamed. If the pool exists and has already been\n        # renamed, the name will not be changed. If two pools exist with the two names, the\n        # pool matching the renamed value will be configured and the other left alone.\n        # rename: rbd-new\n        # Optional \"delete\" and \"delete_all_pool_data\" values may be used to delete an\n        # existing pool. Both must exist and must be set to true in order to delete a pool.\n        # NOTE: Deleting a pool deletes all of its data and is unrecoverable. This is why\n        #       both values are required in order to delete a pool. Neither value does\n        #       anything by itself.\n        # delete: false\n        # delete_all_pool_data: false\n        application: rbd\n        replication: 3\n        percent_total_data: 40\n        # Example of 100 GiB pool_quota for rbd pool (no pool quota if absent)\n        # May be specified in TiB, TB, GiB, GB, MiB, MB, KiB, KB, or bytes\n        # NOTE: This should always be a string value to avoid Helm issues with large integers\n        # pool_quota: \"100GiB\"\n        # Example of an overridden pg_num_min value for a single pool\n        # pg_num_min: 32\n      # NOTE(supamatt): By default the crush rules used to create each pool will be\n      # taken from the pool default `crush_rule` unless a pool specific `crush_rule`\n      # is specified. The rule MUST exist for it to be defined here.\n      #  crush_rule: replicated_rule\n      # CephFS pools\n      - name: cephfs_metadata\n        application: cephfs\n        replication: 3\n        percent_total_data: 5\n      - name: cephfs_data\n        application: cephfs\n        replication: 3\n        percent_total_data: 10\n      # RadosGW pools\n      - name: .rgw.root\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.control\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.data.root\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.gc\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.log\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.intent-log\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.meta\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.usage\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.users.keys\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.users.email\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.users.swift\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.users.uid\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.buckets.extra\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.buckets.index\n        application: rgw\n        replication: 3\n        percent_total_data: 3\n      - name: default.rgw.buckets.data\n        application: rgw\n        replication: 3\n        percent_total_data: 29\n\n  ceph:\n    global:\n      # auth\n      cephx: true\n      cephx_require_signatures: false\n      cephx_cluster_require_signatures: true\n      cephx_service_require_signatures: false\n      objecter_inflight_op_bytes: \"1073741824\"\n      objecter_inflight_ops: 10240\n      debug_ms: \"0/0\"\n      log_file: /dev/stdout\n      mon_cluster_log_file: /dev/stdout\n    osd:\n      osd_mkfs_type: xfs\n      osd_mkfs_options_xfs: -f -i size=2048\n      osd_max_object_name_len: 256\n      ms_bind_port_min: 6800\n      ms_bind_port_max: 7100\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - ceph-client-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    bootstrap:\n      jobs: null\n      services:\n        - endpoint: internal\n          service: ceph_mon\n    cephfs_client_key_generator:\n      jobs: null\n    mds:\n      jobs:\n        - ceph-storage-keys-generator\n        - ceph-mds-keyring-generator\n        - ceph-rbd-pool\n      services:\n        - endpoint: internal\n          service: ceph_mon\n    pool_checkpgs:\n      jobs:\n        - ceph-rbd-pool\n      services:\n        - endpoint: internal\n          service: ceph_mgr\n    checkdns:\n      services:\n        - endpoint: internal\n          service: ceph_mon\n    namespace_client_key_cleaner:\n      jobs: null\n    namespace_client_key_generator:\n      jobs: null\n    rbd_pool:\n      services:\n        - endpoint: internal\n          service: ceph_mon\n        - endpoint: internal\n          service: ceph_mgr\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    tests:\n      jobs:\n        - ceph-rbd-pool\n        - ceph-mgr-keyring-generator\n      services:\n        - endpoint: internal\n          service: ceph_mon\n        - endpoint: internal\n          service: ceph_mgr\n\nbootstrap:\n  enabled: false\n  script: |\n    ceph -s\n    function ensure_pool () {\n      ceph osd pool stats $1 || ceph osd pool create $1 $2\n      if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then\n        ceph osd pool application enable $1 $3\n      fi\n    }\n    #ensure_pool volumes 8 cinder\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      ceph-client:\n        username: ceph-client\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  ceph_mon:\n    namespace: null\n    hosts:\n      default: ceph-mon\n      discovery: ceph-mon-discovery\n    host_fqdn_override:\n      default: null\n    port:\n      mon:\n        default: 6789\n      mon_msgr2:\n        default: 3300\n  ceph_mgr:\n    namespace: null\n    hosts:\n      default: ceph-mgr\n    host_fqdn_override:\n      default: null\n    port:\n      mgr:\n        default: 7000\n      metrics:\n        default: 9283\n    scheme:\n      default: http\n  ceph_object_store:\n    endpoint_namespaces:\n    - openstack\n    - ceph\n    # hosts:\n    #   default: ceph-rgw\n    # host_fqdn_override:\n    #   default: null\n\nmanifests:\n  configmap_bin: true\n  configmap_test_bin: true\n  configmap_etc: true\n  deployment_mds: true\n  deployment_checkdns: true\n  job_bootstrap: false\n  job_cephfs_client_key: true\n  job_image_repo_sync: true\n  job_rbd_pool: true\n  helm_tests: true\n  cronjob_checkPGs: true\n  cronjob_defragosds: true\n  secret_registry: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "ceph-mon/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Ceph Mon\nname: ceph-mon\nversion: 2025.2.0\nhome: https://github.com/ceph/ceph\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "ceph-mon/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "ceph-mon/templates/bin/_init-dirs.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport LC_ALL=C\n: \"${HOSTNAME:=$(uname -n)}\"\n: \"${MGR_NAME:=${HOSTNAME}}\"\n: \"${MDS_NAME:=mds-${HOSTNAME}}\"\n: \"${MDS_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring}\"\n: \"${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}\"\n\nfor keyring in ${OSD_BOOTSTRAP_KEYRING} ${MDS_BOOTSTRAP_KEYRING} ; do\n  mkdir -p \"$(dirname \"$keyring\")\"\ndone\n\n# Let's create the ceph directories\nfor DIRECTORY in mon osd mds radosgw tmp mgr crash; do\n  mkdir -p \"/var/lib/ceph/${DIRECTORY}\"\ndone\n\n# Create socket directory\nmkdir -p /run/ceph\n\n# Create the MDS directory\nmkdir -p \"/var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}\"\n\n# Create the MGR directory\nmkdir -p \"/var/lib/ceph/mgr/${CLUSTER}-${MGR_NAME}\"\n\n# Adjust the owner of all those directories\nchown -R ceph. /run/ceph/ /var/lib/ceph/*\n"
  },
  {
    "path": "ceph-mon/templates/bin/_post-apply.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nexport LC_ALL=C\n\n: \"${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}\"\n\nif [[ ! -f /etc/ceph/${CLUSTER}.conf ]]; then\n  echo \"ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon\"\n  exit 1\nfi\n\nif [[ ! -f ${ADMIN_KEYRING} ]]; then\n   echo \"ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon\"\n   exit 1\nfi\n\nceph --cluster ${CLUSTER}  -s\nfunction wait_for_pods() {\n  timeout=${2:-1800}\n  end=$(date -ud \"${timeout} seconds\" +%s)\n  # Selecting containers with \"ceph-mon\" name and\n  # counting them based on \"ready\" field.\n  count_pods=\".items | map(.status.containerStatuses | .[] | \\\n              select(.name==\\\"ceph-mon\\\")) | \\\n              group_by(.ready) | map({(.[0].ready | tostring): length}) | .[]\"\n  min_mons=\"add | if .true >= (.false + .true) \\\n           then \\\"pass\\\" else \\\"fail\\\" end\"\n  while true; do\n      # Leave while loop if all mons are ready.\n      state=$(kubectl get pods --namespace=\"${1}\" -l component=mon -o json | jq \"${count_pods}\")\n      mon_state=$(jq -s \"${min_mons}\" <<< \"${state}\")\n      if [[ \"${mon_state}\" == \\\"pass\\\" ]]; then\n        break\n      fi\n      sleep 5\n\n      if [ $(date -u +%s) -gt $end ] ; then\n          echo -e \"Containers failed to start after $timeout seconds\\n\"\n          kubectl get pods --namespace \"${1}\" -o wide -l component=mon\n          exit 1\n      fi\n  done\n}\n\nfunction check_ds() {\n for ds in `kubectl get ds --namespace=$CEPH_NAMESPACE -l component=mon --no-headers=true|awk '{print $1}'`\n do\n   ds_query=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status`\n   if echo $ds_query |grep -i \"numberAvailable\" ;then\n     currentNumberScheduled=`echo $ds_query|jq -r .currentNumberScheduled`\n     desiredNumberScheduled=`echo $ds_query|jq -r .desiredNumberScheduled`\n     numberAvailable=`echo $ds_query|jq -r .numberAvailable`\n     numberReady=`echo $ds_query|jq -r .numberReady`\n     updatedNumberScheduled=`echo $ds_query|jq -r .updatedNumberScheduled`\n     ds_check=`echo \"$currentNumberScheduled $desiredNumberScheduled $numberAvailable $numberReady $updatedNumberScheduled\"| \\\n       tr ' ' '\\n'|sort -u|wc -l`\n     if [ $ds_check != 1 ]; then\n       echo \"Some pods in daemonset $ds are not ready\"\n       exit\n     else\n       echo \"All pods in deamonset $ds are ready\"\n     fi\n   else\n     echo \"There are no mons under daemonset $ds\"\n   fi\n done\n}\n\nfunction restart_mons() {\n  mon_pods=`kubectl get po -n $CEPH_NAMESPACE -l component=mon --no-headers | awk '{print $1}'`\n\n  for pod in ${mon_pods}\n  do\n    if [[ -n \"$pod\" ]]; then\n      echo \"Restarting pod $pod\"\n      kubectl delete pod -n $CEPH_NAMESPACE $pod\n    fi\n    echo \"Waiting for the pod $pod to restart\"\n    # The pod will not be ready in first 60 seconds. Thus we can reduce\n    # amount of queries to kubernetes.\n    sleep 60\n    wait_for_pods\n    ceph -s\n  done\n}\n\nwait_for_pods $CEPH_NAMESPACE\n\nrequire_upgrade=0\nmax_release=0\n\nfor ds in `kubectl get ds --namespace=$CEPH_NAMESPACE -l component=mon --no-headers=true|awk '{print $1}'`\ndo\n  updatedNumberScheduled=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status.updatedNumberScheduled`\n  desiredNumberScheduled=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status.desiredNumberScheduled`\n  if [[ $updatedNumberScheduled != $desiredNumberScheduled ]]; then\n    if kubectl get ds -n $CEPH_NAMESPACE  $ds -o json|jq -r .status|grep -i \"numberAvailable\" ;then\n      require_upgrade=$((require_upgrade+1))\n      _release=`kubectl get ds -n $CEPH_NAMESPACE $ds  -o json|jq -r .status.observedGeneration`\n      max_release=$(( max_release > _release ? max_release : _release ))\n    fi\n  fi\ndone\n\necho \"Latest revision of the helm chart(s) is : $max_release\"\n\nif [[ \"$UNCONDITIONAL_MON_RESTART\" == \"true\" ]] || [[ $max_release -gt 1  ]]; then\n  if [[ \"$UNCONDITIONAL_MON_RESTART\" == \"true\" ]] || [[  $require_upgrade -gt 0 ]]; then\n    echo \"Restart ceph-mon pods one at a time to prevent disruption\"\n    restart_mons\n  fi\n\n  # Check all the ceph-mon daemonsets\n  echo \"checking DS\"\n  check_ds\nelse\n  echo \"No revisions found for upgrade\"\nfi\n"
  },
  {
    "path": "ceph-mon/templates/bin/keys/_bootstrap-keyring-generator.py.tpl",
    "content": "#!/bin/python\nimport os\nimport struct\nimport time\nimport base64\nkey = os.urandom(16)\nheader = struct.pack(\n    '<hiih',\n    1,                 # le16 type: CEPH_CRYPTO_AES\n    int(time.time()),  # le32 created: seconds\n    0,                 # le32 created: nanoseconds,\n    len(key),          # le16: len(key)\n)\nprint(base64.b64encode(header + key).decode('ascii'))\n"
  },
  {
    "path": "ceph-mon/templates/bin/keys/_bootstrap-keyring-manager.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n{{ if .Release.IsInstall }}\n{{- $envAll := . }}\n\nfunction ceph_gen_key () {\n  python3 ${CEPH_GEN_DIR}/keys-bootstrap-keyring-generator.py\n}\n\nfunction kube_ceph_keyring_gen () {\n  CEPH_KEY=$1\n  CEPH_KEY_TEMPLATE=$2\n  sed \"s|{{\"{{\"}} key {{\"}}\"}}|${CEPH_KEY}|\" ${CEPH_TEMPLATES_DIR}/${CEPH_KEY_TEMPLATE} | base64 -w0 | tr -d '\\n'\n}\n\nfunction create_kube_key () {\n  CEPH_KEYRING=$1\n  CEPH_KEYRING_NAME=$2\n  CEPH_KEYRING_TEMPLATE=$3\n  KUBE_SECRET_NAME=$4\n  if ! kubectl get --namespace ${DEPLOYMENT_NAMESPACE} secrets ${KUBE_SECRET_NAME}; then\n    {\n      cat <<EOF\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: ${KUBE_SECRET_NAME}\n  labels:\n{{ tuple $envAll \"ceph\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\ntype: Opaque\ndata:\n  ${CEPH_KEYRING_NAME}: $( kube_ceph_keyring_gen ${CEPH_KEYRING} ${CEPH_KEYRING_TEMPLATE} )\nEOF\n    } | kubectl apply --namespace ${DEPLOYMENT_NAMESPACE} -f -\n  fi\n}\n\n#create_kube_key <ceph_key> <ceph_keyring_name> <ceph_keyring_template> <kube_secret_name>\ncreate_kube_key $(ceph_gen_key) ${CEPH_KEYRING_NAME} ${CEPH_KEYRING_TEMPLATE} ${KUBE_SECRET_NAME}\n\n{{ else }}\n\necho \"Not touching ${KUBE_SECRET_NAME} as this is not the initial deployment\"\n\n{{- end -}}\n"
  },
  {
    "path": "ceph-mon/templates/bin/keys/_storage-keyring-manager.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{ if .Release.IsInstall }}\n{{- $envAll := . }}\n\nfunction ceph_gen_key () {\n  python3 ${CEPH_GEN_DIR}/keys-bootstrap-keyring-generator.py\n}\n\nfunction kube_ceph_keyring_gen () {\n  CEPH_KEY=$1\n  CEPH_KEY_TEMPLATE=$2\n  sed \"s|{{\"{{\"}} key {{\"}}\"}}|${CEPH_KEY}|\" ${CEPH_TEMPLATES_DIR}/${CEPH_KEY_TEMPLATE} | base64 -w0 | tr -d '\\n'\n}\n\nCEPH_CLIENT_KEY=\"\"\nROOK_CEPH_TOOLS_POD=$(kubectl -n ${DEPLOYMENT_NAMESPACE} get pods --no-headers | awk '/rook-ceph-tools/{print $1}')\n\nif [[ -n \"${ROOK_CEPH_TOOLS_POD}\" ]]; then\n  CEPH_AUTH_KEY_NAME=$(echo \"${CEPH_KEYRING_NAME}\" | awk -F. '{print $2 \".\" $3}')\n  CEPH_CLIENT_KEY=$(kubectl -n ${DEPLOYMENT_NAMESPACE} exec ${ROOK_CEPH_TOOLS_POD} -- ceph auth ls | grep -A1 \"${CEPH_AUTH_KEY_NAME}\" | awk '/key:/{print $2}')\nfi\n\nif [[ -z \"${CEPH_CLIENT_KEY}\" ]]; then\n  CEPH_CLIENT_KEY=$(ceph_gen_key)\nfi\n\nfunction create_kube_key () {\n  CEPH_KEYRING=$1\n  CEPH_KEYRING_NAME=$2\n  CEPH_KEYRING_TEMPLATE=$3\n  KUBE_SECRET_NAME=$4\n\n  if ! kubectl get --namespace ${DEPLOYMENT_NAMESPACE} secrets ${KUBE_SECRET_NAME}; then\n    {\n      cat <<EOF\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: ${KUBE_SECRET_NAME}\n  labels:\n{{ tuple $envAll \"ceph\" \"admin\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\ntype: Opaque\ndata:\n  ${CEPH_KEYRING_NAME}: $( kube_ceph_keyring_gen ${CEPH_KEYRING} ${CEPH_KEYRING_TEMPLATE} )\nEOF\n    } | kubectl apply --namespace ${DEPLOYMENT_NAMESPACE} -f -\n  fi\n}\n#create_kube_key <ceph_key> <ceph_keyring_name> <ceph_keyring_template> <kube_secret_name>\ncreate_kube_key ${CEPH_CLIENT_KEY} ${CEPH_KEYRING_NAME} ${CEPH_KEYRING_TEMPLATE} ${CEPH_KEYRING_ADMIN_NAME}\n\nfunction create_kube_storage_key () {\n  CEPH_KEYRING=$1\n  KUBE_SECRET_NAME=$2\n\n  if ! kubectl get --namespace ${DEPLOYMENT_NAMESPACE} secrets ${KUBE_SECRET_NAME}; then\n    {\n      cat <<EOF\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: ${KUBE_SECRET_NAME}\n  labels:\n{{ tuple $envAll \"ceph\" \"admin\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\ntype: kubernetes.io/rbd\ndata:\n  key: $( echo ${CEPH_KEYRING} | base64 | tr -d '\\n' )\n  userID: $( echo -n \"admin\" | base64 | tr -d '\\n' )\n  userKey: $( echo -n ${CEPH_KEYRING} | base64 | tr -d '\\n' )\nEOF\n    } | kubectl apply --namespace ${DEPLOYMENT_NAMESPACE} -f -\n  fi\n}\n#create_kube_storage_key <ceph_key> <kube_secret_name>\ncreate_kube_storage_key ${CEPH_CLIENT_KEY} ${CEPH_STORAGECLASS_ADMIN_SECRET_NAME}\ncreate_kube_storage_key ${CEPH_CLIENT_KEY} ${CEPH_STORAGECLASS_ADMIN_SECRET_NAME_NODE}\n\n{{ else }}\n\necho \"Not touching ${KUBE_SECRET_NAME} as this is not the initial deployment\"\n\n{{ end }}\n"
  },
  {
    "path": "ceph-mon/templates/bin/mgr/_check.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport LC_ALL=C\n\nCOMMAND=\"${@:-liveness}\"\n\nfunction heath_check () {\n   ASOK=$(ls /var/run/ceph/${CLUSTER}-mgr*)\n   MGR_NAME=$(basename ${ASOK} | sed -e 's/.asok//' | cut -f 1 -d '.' --complement)\n   MGR_STATE=$(ceph --cluster ${CLUSTER} --connect-timeout 1 daemon mgr.${MGR_NAME} status)\n   if [ $? = 0 ]; then\n     exit 0\n   else\n     echo $MGR_STATE\n     exit 1\n   fi\n}\n\nfunction liveness () {\n  heath_check\n}\n\nfunction readiness () {\n  heath_check\n}\n\n$COMMAND\n"
  },
  {
    "path": "ceph-mon/templates/bin/mgr/_start.sh.tpl",
    "content": "#!/bin/bash\nset -ex\n: \"${CEPH_GET_ADMIN_KEY:=0}\"\n: \"${MGR_NAME:=$(uname -n)}\"\n: \"${MGR_KEYRING:=/var/lib/ceph/mgr/${CLUSTER}-${MGR_NAME}/keyring}\"\n: \"${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}\"\n: \"${CEPH_CONF:=\"/etc/ceph/${CLUSTER}.conf\"}\"\n\n{{ include \"helm-toolkit.snippets.mon_host_from_k8s_ep\" . }}\n\nif [[ ! -e ${CEPH_CONF}.template ]]; then\n  echo \"ERROR- ${CEPH_CONF}.template must exist; get it from your existing mon\"\n  exit 1\nelse\n  ENDPOINT=$(mon_host_from_k8s_ep \"${NAMESPACE}\" ceph-mon-discovery)\n  if [[ \"${ENDPOINT}\" == \"\" ]]; then\n    /bin/sh -c -e \"cat ${CEPH_CONF}.template | tee ${CEPH_CONF}\" || true\n  else\n    /bin/sh -c -e \"cat ${CEPH_CONF}.template | sed 's#mon_host.*#mon_host = ${ENDPOINT}#g' | tee ${CEPH_CONF}\" || true\n  fi\nfi\n\nif [ ${CEPH_GET_ADMIN_KEY} -eq 1 ]; then\n    if [[ ! -e ${ADMIN_KEYRING} ]]; then\n        echo \"ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon\"\n        exit 1\n    fi\nfi\n\n# Create a MGR keyring\nrm -rf $MGR_KEYRING\nif [ ! -e \"$MGR_KEYRING\" ]; then\n    # Create ceph-mgr key\n    timeout 10 ceph --cluster \"${CLUSTER}\" auth get-or-create mgr.\"${MGR_NAME}\" mon 'allow profile mgr' osd 'allow *' mds 'allow *' -o \"$MGR_KEYRING\"\n    chown --verbose ceph. \"$MGR_KEYRING\"\n    chmod 600 \"$MGR_KEYRING\"\nfi\n\necho \"SUCCESS\"\n\nceph --cluster \"${CLUSTER}\" -v\n\n# Env. variables matching the pattern \"<module>_\" will be\n# found and parsed for config-key settings by\n#  ceph config set mgr mgr/<module>/<key> <value>\nMODULES_TO_DISABLE=`ceph mgr dump | python3 -c \"import json, sys; print(' '.join(json.load(sys.stdin)['modules']))\"`\n\nfor module in ${ENABLED_MODULES}; do\n    # This module may have been enabled in the past\n    # remove it from the disable list if present\n    MODULES_TO_DISABLE=${MODULES_TO_DISABLE/$module/}\n\n    options=`env | grep ^${module}_ || true`\n    for option in ${options}; do\n        #strip module name\n        option=${option/${module}_/}\n        key=`echo $option | cut -d= -f1`\n        value=`echo $option | cut -d= -f2`\n        if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then\n          ceph --cluster \"${CLUSTER}\" config set mgr mgr/$module/$key $value --force\n        else\n          ceph --cluster \"${CLUSTER}\" config set mgr mgr/$module/$key $value\n        fi\n    done\n    ceph --cluster \"${CLUSTER}\" mgr module enable ${module} --force\ndone\n\nfor module in $MODULES_TO_DISABLE; do\n  ceph --cluster \"${CLUSTER}\" mgr module disable ${module}\ndone\n\necho \"SUCCESS\"\n# start ceph-mgr\nexec /usr/bin/ceph-mgr \\\n  --cluster \"${CLUSTER}\" \\\n  --setuser \"ceph\" \\\n  --setgroup \"ceph\" \\\n  -d \\\n  -i \"${MGR_NAME}\"\n"
  },
  {
    "path": "ceph-mon/templates/bin/mon/_check.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-liveness}\"\n: ${K8S_HOST_NETWORK:=0}\n\nfunction heath_check () {\n  SOCKDIR=${CEPH_SOCKET_DIR:-/run/ceph}\n  SBASE=${CEPH_OSD_SOCKET_BASE:-ceph-mon}\n  SSUFFIX=${CEPH_SOCKET_SUFFIX:-asok}\n\n  MON_ID=$(ps auwwx | grep ceph-mon | grep -v \"$1\" | grep -v grep | sed 's/.*-i\\ //;s/\\ .*//'|awk '{print $1}')\n\n  if [ -z \"${MON_ID}\" ]; then\n    if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then\n        MON_NAME=${POD_NAME}\n    else\n        MON_NAME=${NODE_NAME}\n    fi\n  fi\n\n  if [ -S \"${SOCKDIR}/${SBASE}.${MON_NAME}.${SSUFFIX}\" ]; then\n   MON_STATE=$(ceph -f json-pretty --connect-timeout 1 --admin-daemon \"${SOCKDIR}/${SBASE}.${MON_NAME}.${SSUFFIX}\" mon_status|grep state|sed 's/.*://;s/[^a-z]//g')\n   echo \"MON ${MON_ID} ${MON_STATE}\";\n   # this might be a stricter check than we actually want.  what are the\n   # other values for the \"state\" field?\n   for S in ${MON_LIVE_STATE}; do\n    if [ \"x${MON_STATE}x\" = \"x${S}x\" ]; then\n     exit 0\n    fi\n   done\n  fi\n  # if we made it this far, things are not running\n  exit 1\n}\n\nfunction liveness () {\n  MON_LIVE_STATE=\"probing electing synchronizing leader peon\"\n  heath_check\n}\n\nfunction readiness () {\n  MON_LIVE_STATE=\"leader peon\"\n  heath_check\n}\n\n$COMMAND\n"
  },
  {
    "path": "ceph-mon/templates/bin/mon/_start.sh.tpl",
    "content": "#!/bin/bash\nset -ex\nexport LC_ALL=C\n: \"${K8S_HOST_NETWORK:=0}\"\n: \"${MON_KEYRING:=/etc/ceph/${CLUSTER}.mon.keyring}\"\n: \"${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}\"\n: \"${MDS_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring}\"\n: \"${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}\"\n: \"${CEPH_CONF:=\"/etc/ceph/${CLUSTER}.conf\"}\"\n\n{{ include \"helm-toolkit.snippets.mon_host_from_k8s_ep\" . }}\n\nif [[ ! -e ${CEPH_CONF}.template ]]; then\n  echo \"ERROR- ${CEPH_CONF}.template must exist; get it from your existing mon\"\n  exit 1\nelse\n\n  ENDPOINT=$(mon_host_from_k8s_ep \"${NAMESPACE}\" ceph-mon-discovery)\n\n  if [[ -z \"${ENDPOINT}\" ]]; then\n    /bin/sh -c -e \"cat ${CEPH_CONF}.template | tee ${CEPH_CONF}\" || true\n  else\n    /bin/sh -c -e \"cat ${CEPH_CONF}.template | sed 's#mon_host.*#mon_host = ${ENDPOINT}#g' | tee ${CEPH_CONF}\" || true\n  fi\nfi\n\nif [[ -z \"$CEPH_PUBLIC_NETWORK\" ]]; then\n  echo \"ERROR- CEPH_PUBLIC_NETWORK must be defined as the name of the network for the OSDs\"\n  exit 1\nfi\n\nif [[ -z \"$MON_IP\" ]]; then\n  echo \"ERROR- MON_IP must be defined as the IP address of the monitor\"\n  exit 1\nfi\n\nif [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then\n    MON_NAME=${POD_NAME}\nelse\n    MON_NAME=${NODE_NAME}\nfi\nMON_DATA_DIR=\"/var/lib/ceph/mon/${CLUSTER}-${MON_NAME}\"\nMONMAP=\"/etc/ceph/monmap-${CLUSTER}\"\n\n# Make the monitor directory\n/bin/sh -c \"mkdir -p \\\"${MON_DATA_DIR}\\\"\"\n\nfunction get_mon_config {\n  # Get fsid from ceph.conf\n  local fsid=$(ceph-conf --lookup fsid -c /etc/ceph/${CLUSTER}.conf)\n\n  timeout=10\n  MONMAP_ADD=\"\"\n\n  while [[ -z \"${MONMAP_ADD// }\" && \"${timeout}\" -gt 0 ]]; do\n    # Get the ceph mon pods (name and IP) from the Kubernetes API. Formatted as a set of monmap params\n    if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then\n        MONMAP_ADD=$(kubectl get pods --namespace=${NAMESPACE} ${KUBECTL_PARAM} -o template --template=\"{{`{{range .items}}`}}{{`{{if .status.podIP}}`}}--addv {{`{{.metadata.name}}`}} [v1:{{`{{.status.podIP}}`}}:${MON_PORT},v2:{{`{{.status.podIP}}`}}:${MON_PORT_V2}] {{`{{end}}`}} {{`{{end}}`}}\")\n    else\n        MONMAP_ADD=$(kubectl get pods --namespace=${NAMESPACE} ${KUBECTL_PARAM} -o template --template=\"{{`{{range .items}}`}}{{`{{if .status.podIP}}`}}--addv {{`{{.spec.nodeName}}`}} [v1:{{`{{.status.podIP}}`}}:${MON_PORT},v2:{{`{{.status.podIP}}`}}:${MON_PORT_V2}] {{`{{end}}`}} {{`{{end}}`}}\")\n    fi\n    (( timeout-- ))\n    sleep 1\n  done\n\n  if [[ -z \"${MONMAP_ADD// }\" ]]; then\n      exit 1\n  fi\n\n  # Create a monmap with the Pod Names and IP\n  monmaptool --create ${MONMAP_ADD} --fsid ${fsid} ${MONMAP} --clobber\n}\n\nget_mon_config\n\n# If we don't have a monitor keyring, this is a new monitor\nif [ ! -e \"${MON_DATA_DIR}/keyring\" ]; then\n  if [ ! -e ${MON_KEYRING}.seed ]; then\n    echo \"ERROR- ${MON_KEYRING}.seed must exist. You can extract it from your current monitor by running 'ceph auth get mon. -o ${MON_KEYRING}' or use a KV Store\"\n    exit 1\n  else\n    cp -vf ${MON_KEYRING}.seed ${MON_KEYRING}\n  fi\n\n  if [ ! -e ${MONMAP} ]; then\n    echo \"ERROR- ${MONMAP} must exist. You can extract it from your current monitor by running 'ceph mon getmap -o ${MONMAP}' or use a KV Store\"\n    exit 1\n  fi\n\n  # Testing if it's not the first monitor, if one key doesn't exist we assume none of them exist\n  for KEYRING in ${OSD_BOOTSTRAP_KEYRING} ${MDS_BOOTSTRAP_KEYRING} ${ADMIN_KEYRING}; do\n    ceph-authtool ${MON_KEYRING} --import-keyring ${KEYRING}\n  done\n\n  # Prepare the monitor daemon's directory with the map and keyring\n  ceph-mon --setuser ceph --setgroup ceph --cluster \"${CLUSTER}\" --mkfs -i ${MON_NAME} --inject-monmap ${MONMAP} --keyring ${MON_KEYRING} --mon-data \"${MON_DATA_DIR}\"\nelse\n  echo \"Trying to get the most recent monmap...\"\n  # Ignore when we timeout, in most cases that means the cluster has no quorum or\n  # no mons are up and running yet\n  timeout 5 ceph --cluster \"${CLUSTER}\" mon getmap -o ${MONMAP} || true\n  ceph-mon --setuser ceph --setgroup ceph --cluster \"${CLUSTER}\" -i ${MON_NAME} --inject-monmap ${MONMAP} --keyring ${MON_KEYRING} --mon-data \"${MON_DATA_DIR}\"\n  timeout 7 ceph --cluster \"${CLUSTER}\" mon add \"${MON_NAME}\" \"${MON_IP}:${MON_PORT_V2}\" || true\nfi\n\n# start MON\nexec /usr/bin/ceph-mon \\\n  --cluster \"${CLUSTER}\" \\\n  --setuser \"ceph\" \\\n  --setgroup \"ceph\" \\\n  -d \\\n  -i ${MON_NAME} \\\n  --mon-data \"${MON_DATA_DIR}\" \\\n  --public-addr \"${MON_IP}:${MON_PORT_V2}\"\n"
  },
  {
    "path": "ceph-mon/templates/bin/mon/_stop.sh.tpl",
    "content": "#!/bin/bash\n\nset -ex\n\nNUMBER_OF_MONS=$(ceph mon stat | awk '$3 == \"mons\" {print $2}')\nif [[ \"${NUMBER_OF_MONS}\" -gt \"3\" ]]; then\n  if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then\n      ceph mon remove \"${POD_NAME}\"\n  else\n      ceph mon remove \"${NODE_NAME}\"\n  fi\nelse\n  echo \"doing nothing since we are running less than or equal to 3 mons\"\nfi\n"
  },
  {
    "path": "ceph-mon/templates/bin/moncheck/_reap-zombies.py.tpl",
    "content": "#!/usr/bin/python\nimport re\nimport os\nimport subprocess  # nosec\nimport json\n\nMON_REGEX = r\"^\\d: \\[((v\\d+:([0-9\\.]*):\\d+\\/\\d+,*)+)] mon.([^ ]*)$\"\n# kubctl_command = 'kubectl get pods --namespace=${NAMESPACE} -l component=mon,application=ceph -o template --template=\"{ {{\"}}\"}}range .items{{\"}}\"}} \\\\\"{{\"}}\"}}.metadata.name{{\"}}\"}}\\\\\": \\\\\"{{\"}}\"}}.status.podIP{{\"}}\"}}\\\\\" ,   {{\"}}\"}}end{{\"}}\"}} }\"'\nif int(os.getenv('K8S_HOST_NETWORK', 0)) > 0:\n    kubectl_command = 'kubectl get pods --namespace=${NAMESPACE} -l component=mon,application=ceph -o template --template=\"{ {{\"{{\"}}range  \\$i, \\$v  := .items{{\"}}\"}} {{\"{{\"}} if \\$i{{\"}}\"}} , {{\"{{\"}} end {{\"}}\"}} \\\\\"{{\"{{\"}}\\$v.spec.nodeName{{\"}}\"}}\\\\\": \\\\\"{{\"{{\"}}\\$v.status.podIP{{\"}}\"}}\\\\\" {{\"{{\"}}end{{\"}}\"}} }\"'\nelse:\n    kubectl_command = 'kubectl get pods --namespace=${NAMESPACE} -l component=mon,application=ceph -o template --template=\"{ {{\"{{\"}}range  \\$i, \\$v  := .items{{\"}}\"}} {{\"{{\"}} if \\$i{{\"}}\"}} , {{\"{{\"}} end {{\"}}\"}} \\\\\"{{\"{{\"}}\\$v.metadata.name{{\"}}\"}}\\\\\": \\\\\"{{\"{{\"}}\\$v.status.podIP{{\"}}\"}}\\\\\" {{\"{{\"}}end{{\"}}\"}} }\"'\n\nmonmap_command = \"ceph --cluster=${CLUSTER} mon getmap > /tmp/monmap && monmaptool -f /tmp/monmap --print\"\n\n\ndef extract_mons_from_monmap():\n    monmap = subprocess.check_output(monmap_command, shell=True).decode('utf-8')  # nosec\n    mons = {}\n    for line in monmap.split(\"\\n\"):\n        m = re.match(MON_REGEX, line)\n        if m is not None:\n            mons[m.group(4)] = m.group(3)\n    return mons\n\ndef extract_mons_from_kubeapi():\n    kubemap = subprocess.check_output(kubectl_command, shell=True).decode('utf-8')  # nosec\n    return json.loads(kubemap)\n\ncurrent_mons = extract_mons_from_monmap()\nexpected_mons = extract_mons_from_kubeapi()\n\nprint(\"current mons: %s\" % current_mons)\nprint(\"expected mons: %s\" % expected_mons)\n\nremoved_mon = False\nfor mon in current_mons:\n    if not mon in expected_mons:\n        print(\"removing zombie mon %s\" % mon)\n        subprocess.call([\"ceph\", \"--cluster\", os.environ[\"NAMESPACE\"], \"mon\", \"remove\", mon])  # nosec\n        removed_mon = True\n    elif current_mons[mon] != expected_mons[mon]: # check if for some reason the ip of the mon changed\n        print(\"ip change detected for pod %s\" % mon)\n        subprocess.call([\"kubectl\", \"--namespace\", os.environ[\"NAMESPACE\"], \"delete\", \"pod\", mon])  # nosec\n        removed_mon = True\n        print(\"deleted mon %s via the kubernetes api\" % mon)\n\n\nif not removed_mon:\n    print(\"no zombie mons found ...\")\n"
  },
  {
    "path": "ceph-mon/templates/bin/moncheck/_start.sh.tpl",
    "content": "#!/bin/bash\nset -ex\nexport LC_ALL=C\n: \"${CEPH_CONF:=\"/etc/ceph/${CLUSTER}.conf\"}\"\n\n{{ include \"helm-toolkit.snippets.mon_host_from_k8s_ep\" . }}\n\nif [[ ! -e ${CEPH_CONF}.template ]]; then\n  echo \"ERROR- ${CEPH_CONF}.template must exist; get it from your existing mon\"\n  exit 1\nelse\n  ENDPOINT=$(mon_host_from_k8s_ep ${NAMESPACE} ceph-mon-discovery)\n  if [[ \"${ENDPOINT}\" == \"\" ]]; then\n    /bin/sh -c -e \"cat ${CEPH_CONF}.template | tee ${CEPH_CONF}\" || true\n  else\n    /bin/sh -c -e \"cat ${CEPH_CONF}.template | sed 's#mon_host.*#mon_host = ${ENDPOINT}#g' | tee ${CEPH_CONF}\" || true\n  fi\nfi\n\nfunction check_mon_msgr2 {\n if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then\n   if ceph health detail|grep -i \"MON_MSGR2_NOT_ENABLED\"; then\n     echo \"ceph-mon msgr v2 not enabled on all ceph mons so enabling\"\n     ceph mon enable-msgr2\n   fi\n fi\n}\n\nfunction get_mon_count {\n  ceph mon count-metadata hostname | jq '. | length'\n}\n\nfunction check_mon_addrs {\n  local mon_dump=$(ceph mon dump)\n  local mon_hostnames=$(echo \"${mon_dump}\" | awk '/mon\\./{print $3}' | sed 's/mon\\.//g')\n  local mon_endpoints=$(kubectl get endpoints ceph-mon-discovery -n ${NAMESPACE} -o json)\n  local v1_port=$(jq '.subsets[0].ports[] | select(.name == \"mon\") | .port' <<< ${mon_endpoints})\n  local v2_port=$(jq '.subsets[0].ports[] | select(.name == \"mon-msgr2\") | .port' <<< ${mon_endpoints})\n\n  for mon in ${mon_hostnames}; do\n    local mon_endpoint=$(echo \"${mon_dump}\" | awk \"/${mon}/{print \\$2}\")\n    local mon_ip=$(jq -r \".subsets[0].addresses[] | select(.nodeName == \\\"${mon}\\\") | .ip\" <<< ${mon_endpoints})\n\n    # Skip this mon if it doesn't appear in the list of kubernetes endpoints\n    if [[ -n \"${mon_ip}\" ]]; then\n      local desired_endpoint=$(printf '[v1:%s:%s/0,v2:%s:%s/0]' ${mon_ip} ${v1_port} ${mon_ip} ${v2_port})\n\n      if [[ \"${mon_endpoint}\" != \"${desired_endpoint}\" ]]; then\n        echo \"endpoint for ${mon} is ${mon_endpoint}, setting it to ${desired_endpoint}\"\n        ceph mon set-addrs ${mon} ${desired_endpoint}\n      fi\n    fi\n  done\n}\n\nfunction watch_mon_health {\n  previous_mon_count=$(get_mon_count)\n  while [ true ]; do\n    mon_count=$(get_mon_count)\n    if [[ ${mon_count} -ne ${previous_mon_count} ]]; then\n      echo \"checking for zombie mons\"\n      python3 /tmp/moncheck-reap-zombies.py || true\n    fi\n    previous_mon_count=${mon_count}\n    echo \"checking for ceph-mon msgr v2\"\n    check_mon_msgr2\n    echo \"checking mon endpoints in monmap\"\n    check_mon_addrs\n    echo \"sleep 30 sec\"\n    sleep 30\n  done\n}\n\nwatch_mon_health\n"
  },
  {
    "path": "ceph-mon/templates/bin/utils/_checkDNS.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n: \"${CEPH_CONF:=\"/etc/ceph/${CLUSTER}.conf\"}\"\nENDPOINT=\"{$1}\"\n\nfunction check_mon_dns () {\n  GREP_CMD=$(grep -rl 'ceph-mon' ${CEPH_CONF})\n\n  if [[ \"${ENDPOINT}\" == \"{up}\" ]]; then\n    echo \"If DNS is working, we are good here\"\n  elif [[ \"${ENDPOINT}\" != \"\" ]]; then\n    if [[ ${GREP_CMD} != \"\" ]]; then\n      # No DNS, write CEPH MONs IPs into ${CEPH_CONF}\n      sh -c -e \"cat ${CEPH_CONF}.template | sed 's/mon_host.*/mon_host = ${ENDPOINT}/g' | tee ${CEPH_CONF}\" > /dev/null 2>&1\n    else\n      echo \"endpoints are already cached in ${CEPH_CONF}\"\n      exit\n    fi\n  fi\n}\n\ncheck_mon_dns\n\nexit\n"
  },
  {
    "path": "ceph-mon/templates/bin/utils/_checkObjectReplication.py.tpl",
    "content": "#!/usr/bin/python3\n\nimport subprocess  # nosec\nimport json\nimport sys\nimport collections\n\nif (int(len(sys.argv)) == 1):\n    print(\"Please provide pool name to test , example: checkObjectReplication.py  <pool name>\")\n    sys.exit(1)\nelse:\n    poolName = sys.argv[1]\n    cmdRep = 'ceph osd map' + ' ' + str(poolName) + ' ' +  'testreplication -f json-pretty'\n    objectRep  = subprocess.check_output(cmdRep, shell=True)  # nosec\n    repOut = json.loads(objectRep)\n    osdNumbers = repOut['up']\n    print(\"Test object got replicated on these osds: %s\" % str(osdNumbers))\n\n    osdHosts= []\n    for osd in osdNumbers:\n        cmdFind = 'ceph osd find' +  ' ' + str(osd)\n        osdFind = subprocess.check_output(cmdFind , shell=True)  # nosec\n        osdHost = json.loads(osdFind)\n        osdHostLocation = osdHost['crush_location']\n        osdHosts.append(osdHostLocation['host'])\n\n    print(\"Test object got replicated on these hosts: %s\" % str(osdHosts))\n\n    print(\"Hosts hosting multiple copies of a placement groups are: %s\" %\n          str([item for item, count in collections.Counter(osdHosts).items() if count > 1]))\n    sys.exit(0)\n"
  },
  {
    "path": "ceph-mon/templates/bin/utils/_checkPGs.py.tpl",
    "content": "#!/usr/bin/python\n\nimport subprocess  # nosec\nimport json\nimport sys\nfrom argparse import *\n\nclass cephCRUSH():\n    \"\"\"\n    Currently, this script is coded to work with the ceph clusters that have\n    these type-ids -- osd, host, rack, root.  To add other type_ids to the\n    CRUSH map, this script needs enhancements to include the new type_ids.\n\n    type_id name\n    ------- ----\n          0 osd\n          1 host\n          2 chassis\n          3 rack\n          4 row\n          5 pdu\n          6 pod\n          7 room\n          8 datacenter\n          9 region\n         10 root\n\n    Ceph organizes the CRUSH map in hierarchical topology.  At the top, it is\n    the root.  The next levels are racks, hosts, and OSDs, respectively.  The\n    OSDs are at the leaf level.  This script looks at OSDs in each placement\n    group of a ceph pool.  For each OSD, starting from the OSD leaf level, this\n    script traverses up to the root.  Along the way, the host and rack are\n    recorded and then verified to make sure the paths to the root are in\n    separate failure domains.  This script reports the offending PGs to stdout.\n    \"\"\"\n\n    \"\"\"\n    This list stores the ceph crush hierarchy retrieved from the\n    ceph osd crush tree -f json-pretty\n    \"\"\"\n    crushHierarchy = []\n\n    \"\"\"\n    Failure Domains - currently our crush map uses these type IDs - osd,\n    host, rack, root\n    If we need to add chassis type (or other types) later on, add the\n    type to the if statement in the crushFD construction section.\n\n    crushFD[0] = {'id': -2, 'name': 'host1', 'type': 'host'}\n    crushFD[23] = {'id': -5, 'name': 'host2', 'type': 'host'}\n    crushFD[68] = {'id': -7, 'name': 'host3', 'type': 'host'}\n    rack_FD[-2] = {'id': -9, 'name': 'rack1', 'type': 'rack' }\n    rack_FD[-15] = {'id': -17, 'name': 'rack2', 'type': 'rack' }\n    root_FD[-17] = {'id': -1, 'name': 'default', 'type': 'root' }}\n    root_FD[-9] = {'id': -1, 'name': 'default', 'type': 'root' }}\n    \"\"\"\n    crushFD = {}\n\n    def __init__(self, poolName):\n        if 'all' in poolName or 'All' in poolName:\n            try:\n                poolLs = 'ceph osd pool ls -f json-pretty'\n                poolstr = subprocess.check_output(poolLs, shell=True)  # nosec\n                self.listPoolName = json.loads(poolstr)\n            except subprocess.CalledProcessError as e:\n                print('{}'.format(e))\n                \"\"\"Unable to get all pools - cannot proceed\"\"\"\n                sys.exit(2)\n        else:\n            self.listPoolName = poolName\n\n        try:\n            \"\"\"Retrieve the crush hierarchies\"\"\"\n            crushTree = \"ceph osd crush tree -f json-pretty | jq .nodes\"\n            chstr = subprocess.check_output(crushTree, shell=True)  # nosec\n            self.crushHierarchy = json.loads(chstr)\n        except subprocess.CalledProcessError as e:\n            print('{}'.format(e))\n            \"\"\"Unable to get crush hierarchy - cannot proceed\"\"\"\n            sys.exit(2)\n\n        \"\"\"\n        Number of racks configured in the ceph cluster.  The racks that are\n        present in the crush hierarchy may not be used.  The un-used rack\n        would not show up in the crushFD.\n        \"\"\"\n        self.count_racks = 0\n\n        \"\"\"depth level - 3 is OSD, 2 is host, 1 is rack, 0 is root\"\"\"\n        self.osd_depth = 0\n        \"\"\"Construct the Failure Domains - OSD -> Host -> Rack -> Root\"\"\"\n        for chitem in self.crushHierarchy:\n            if chitem['type'] == 'host' or \\\n               chitem['type'] == 'rack' or \\\n               chitem['type'] == 'root':\n                for child in chitem['children']:\n                    self.crushFD[child] = {'id': chitem['id'], 'name': chitem['name'], 'type': chitem['type']}\n                if chitem['type'] == 'rack' and len(chitem['children']) > 0:\n                    self.count_racks += 1\n            elif chitem['type'] == 'osd':\n                if self.osd_depth == 0:\n                    self.osd_depth = chitem['depth']\n\n        \"\"\"[ { 'pg-name' : [osd.1, osd.2, osd.3] } ... ]\"\"\"\n        self.poolPGs = []\n        \"\"\"Replica of the pool.  Initialize to 0.\"\"\"\n        self.poolSize = 0\n\n    def isSupportedRelease(self):\n        cephMajorVer = int(subprocess.check_output(\"ceph mon versions | awk '/version/{print $3}' | cut -d. -f1\", shell=True))  # nosec\n        return cephMajorVer >= 14\n\n    def getPoolSize(self, poolName):\n        \"\"\"\n        size (number of replica) is an attribute of a pool\n        { \"pool\": \"rbd\", \"pool_id\": 1, \"size\": 3 }\n        \"\"\"\n        pSize = {}\n        \"\"\"Get the size attribute of the poolName\"\"\"\n        try:\n            poolGet = 'ceph osd pool get ' + poolName + ' size -f json-pretty'\n            szstr = subprocess.check_output(poolGet, shell=True)  # nosec\n            pSize = json.loads(szstr)\n            self.poolSize = pSize['size']\n        except subprocess.CalledProcessError as e:\n            print('{}'.format(e))\n            self.poolSize = 0\n            \"\"\"Continue on\"\"\"\n        return\n\n    def checkPGs(self, poolName):\n        poolPGs = self.poolPGs['pg_stats'] if self.isSupportedRelease() else self.poolPGs\n        if not poolPGs:\n            return\n        print('Checking PGs in pool {} ...'.format(poolName)),\n        badPGs = False\n        for pg in poolPGs:\n            osdUp = pg['up']\n            \"\"\"\n            Construct the OSD path from the leaf to the root.  If the\n            replica is set to 3 and there are 3 racks.  Each OSD has its\n            own rack (failure domain).   If more than one OSD has the\n            same rack, this is a violation.  If the number of rack is\n            one, then we need to make sure the hosts for the three OSDs\n            are different.\n            \"\"\"\n            check_FD = {}\n            checkFailed = False\n            for osd in osdUp:\n                traverseID = osd\n                \"\"\"Start the level with 1 to include the OSD leaf\"\"\"\n                traverseLevel = 1\n                while (self.crushFD[traverseID]['type'] != 'root'):\n                    crushType = self.crushFD[traverseID]['type']\n                    crushName = self.crushFD[traverseID]['name']\n                    if crushType in check_FD:\n                        check_FD[crushType].append(crushName)\n                    else:\n                        check_FD[crushType] = [crushName]\n                    \"\"\"traverse up (to the root) one level\"\"\"\n                    traverseID = self.crushFD[traverseID]['id']\n                    traverseLevel += 1\n                if not (traverseLevel == self.osd_depth):\n                    raise Exception(\"OSD depth mismatch\")\n            \"\"\"\n            check_FD should have\n            {\n             'host': ['host1', 'host2', 'host3', 'host4'],\n             'rack': ['rack1', 'rack2', 'rack3']\n            }\n            Not checking for the 'root' as there is only one root.\n            \"\"\"\n            for ktype in check_FD:\n                kvalue = check_FD[ktype]\n                if ktype == 'host':\n                    \"\"\"\n                    At the host level, every OSD should come from different\n                    host.  It is a violation if duplicate hosts are found.\n                    \"\"\"\n                    if len(kvalue) != len(set(kvalue)):\n                        if not badPGs:\n                            print('Failed')\n                        badPGs = True\n                        print('OSDs {} in PG {} failed check in host {}'.format(pg['up'], pg['pgid'], kvalue))\n                elif ktype == 'rack':\n                    if len(kvalue) == len(set(kvalue)):\n                        continue\n                    else:\n                        \"\"\"\n                        There are duplicate racks.  This could be due to\n                        situation like pool's size is 3 and there are only\n                        two racks (or one rack).  OSDs should come from\n                        different hosts as verified in the 'host' section.\n                        \"\"\"\n                        if self.count_racks == len(set(kvalue)):\n                            continue\n                        elif self.count_racks > len(set(kvalue)):\n                            \"\"\"Not all the racks were used to allocate OSDs\"\"\"\n                            if not badPGs:\n                                print('Failed')\n                            badPGs = True\n                            print('OSDs {} in PG {} failed check in rack {}'.format(pg['up'], pg['pgid'], kvalue))\n            check_FD.clear()\n        if not badPGs:\n            print('Passed')\n        return\n\n    def checkPoolPGs(self):\n        for pool in self.listPoolName:\n            self.getPoolSize(pool)\n            if self.poolSize == 1:\n                \"\"\"No need to check pool with the size set to 1 copy\"\"\"\n                print('Checking PGs in pool {} ... {}'.format(pool, 'Skipped'))\n                continue\n            elif self.poolSize == 0:\n                print('Pool {} was not found.'.format(pool))\n                continue\n            if not self.poolSize > 1:\n                raise Exception(\"Pool size was incorrectly set\")\n\n            try:\n                \"\"\"Get the list of PGs in the pool\"\"\"\n                lsByPool = 'ceph pg ls-by-pool ' + pool + ' -f json-pretty'\n                pgstr = subprocess.check_output(lsByPool, shell=True)  # nosec\n                self.poolPGs = json.loads(pgstr)\n                \"\"\"Check that OSDs in the PG are in separate failure domains\"\"\"\n                self.checkPGs(pool)\n            except subprocess.CalledProcessError as e:\n                print('{}'.format(e))\n                \"\"\"Continue to the next pool (if any)\"\"\"\n        return\n\ndef Main():\n    parser = ArgumentParser(description='''\nCross-check the OSDs assigned to the Placement Groups (PGs) of a ceph pool\nwith the CRUSH topology.  The cross-check compares the OSDs in a PG and\nverifies the OSDs reside in separate failure domains.  PGs with OSDs in\nthe same failure domain are flagged as violation.  The offending PGs are\nprinted to stdout.\n\nThis CLI is executed on-demand on a ceph-mon pod.  To invoke the CLI, you\ncan specify one pool or list of pools to check.  The special pool name\nAll (or all) checks all the pools in the ceph cluster.\n''',\n    formatter_class=RawTextHelpFormatter)\n    parser.add_argument('PoolName', type=str, nargs='+',\n      help='List of pools (or All) to validate the PGs and OSDs mapping')\n    args = parser.parse_args()\n\n    if ('all' in args.PoolName or\n        'All' in args.PoolName) and len(args.PoolName) > 1:\n        print('You only need to give one pool with special pool All')\n        sys.exit(1)\n\n    \"\"\"\n    Retrieve the crush hierarchies and store it.  Cross-check the OSDs\n    in each PG searching for failure domain violation.\n    \"\"\"\n    ccm = cephCRUSH(args.PoolName)\n    ccm.checkPoolPGs()\n\nif __name__ == '__main__':\n    Main()\n"
  },
  {
    "path": "ceph-mon/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.configmap_bin .Values.deployment.ceph }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.Release.Name \"bin\" | quote }}\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  post-apply.sh: |\n{{ tuple \"bin/_post-apply.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n\n  init-dirs.sh: |\n{{ tuple \"bin/_init-dirs.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n\n  keys-bootstrap-keyring-generator.py: |\n{{ tuple \"bin/keys/_bootstrap-keyring-generator.py.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  keys-bootstrap-keyring-manager.sh: |\n{{ tuple \"bin/keys/_bootstrap-keyring-manager.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  keys-storage-keyring-manager.sh: |\n{{ tuple \"bin/keys/_storage-keyring-manager.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n\n  mon-start.sh: |\n{{ tuple \"bin/mon/_start.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  mon-stop.sh: |\n{{ tuple \"bin/mon/_stop.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  mon-check.sh: |\n{{ tuple \"bin/mon/_check.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n\n  mgr-start.sh: |\n{{ tuple \"bin/mgr/_start.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  mgr-check.sh: |\n{{ tuple \"bin/mgr/_check.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n\n  moncheck-start.sh: |\n{{ tuple \"bin/moncheck/_start.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  moncheck-reap-zombies.py: |\n{{ tuple \"bin/moncheck/_reap-zombies.py.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n\n  utils-checkObjectReplication.py: |\n{{ tuple \"bin/utils/_checkObjectReplication.py.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  utils-checkDNS.sh: |\n{{ tuple \"bin/utils/_checkDNS.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n\n  utils-checkPGs.py: |\n{{ tuple \"bin/utils/_checkPGs.py.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n"
  },
  {
    "path": "ceph-mon/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"ceph.mon.configmap.etc\" }}\n{{- $configMapName := index . 0 }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n\n{{- if .Values.deployment.ceph }}\n\n{{- if empty .Values.conf.ceph.global.mon_host -}}\n{{- $monHost := tuple \"ceph_mon\" \"discovery\" \"mon_msgr2\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n{{- $_ := $monHost | set .Values.conf.ceph.global \"mon_host\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ceph.global.fsid -}}\n{{- $_ := uuidv4 | set .Values.conf.ceph.global \"fsid\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ceph.osd.cluster_network -}}\n{{- $_ := .Values.network.cluster | set .Values.conf.ceph.osd \"cluster_network\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ceph.osd.public_network -}}\n{{- $_ := .Values.network.public | set .Values.conf.ceph.osd \"public_network\" -}}\n{{- end -}}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ $configMapName }}\ndata:\n  ceph.conf: |\n{{ include \"helm-toolkit.utils.to_ini\" .Values.conf.ceph | indent 4 }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- if .Values.manifests.configmap_etc }}\n{{- list (printf \"%s-%s\" .Release.Name \"etc\") . | include \"ceph.mon.configmap.etc\" }}\n{{- end }}\n"
  },
  {
    "path": "ceph-mon/templates/configmap-templates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.configmap_templates .Values.deployment.storage_secrets }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.Release.Name \"templates\" | quote }}\ndata:\n  admin.keyring: |\n{{ .Values.conf.templates.keyring.admin | indent 4 }}\n  mon.keyring: |\n{{ .Values.conf.templates.keyring.mon | indent 4 }}\n  bootstrap.keyring.mds: |\n{{ .Values.conf.templates.keyring.bootstrap.mds | indent 4 }}\n  bootstrap.keyring.mgr: |\n{{ .Values.conf.templates.keyring.bootstrap.mgr | indent 4 }}\n  bootstrap.keyring.osd: |\n{{ .Values.conf.templates.keyring.bootstrap.osd | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "ceph-mon/templates/daemonset-mon.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"monLivenessProbeTemplate\" -}}\nexec:\n  command:\n    - /tmp/mon-check.sh\n{{- end -}}\n\n{{- define \"monReadinessProbeTemplate\" -}}\nexec:\n  command:\n    - /tmp/mon-check.sh\n{{- end -}}\n\n{{- if and .Values.manifests.daemonset_mon .Values.deployment.ceph }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := (printf \"%s\" .Release.Name) }}\n{{ tuple $envAll \"mon\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - pods\n      - endpoints\n    verbs:\n      - get\n      - list\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n{{- end }}\n\n{{- define \"ceph.mon.daemonset\" }}\n{{- $daemonset := index . 0 }}\n{{- $configMapName := index . 1 }}\n{{- $serviceAccountName := index . 2 }}\n{{- $envAll := index . 3 }}\n{{- with $envAll }}\n---\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n  name: ceph-mon\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"ceph\" \"mon\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"ceph\" \"mon\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"mon\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"mon\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-mon\" \"containerNames\" (list \"ceph-mon\" \"ceph-init-dirs\" \"ceph-log-ownership\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"mon\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      nodeSelector:\n        {{ .Values.labels.mon.node_selector_key }}: {{ .Values.labels.mon.node_selector_value }}\n      hostNetwork: true\n      shareProcessNamespace: true\n      dnsPolicy: {{ .Values.pod.dns_policy }}\n      initContainers:\n{{ tuple $envAll \"mon\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: ceph-init-dirs\n{{ tuple $envAll \"ceph_mon\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"mon\" \"container\" \"ceph_init_dirs\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/init-dirs.sh\n          env:\n            - name: CLUSTER\n              value: \"ceph\"\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-run\n              mountPath: /run\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-mon-bin\n              mountPath: /tmp/init-dirs.sh\n              subPath: init-dirs.sh\n              readOnly: true\n            - name: pod-var-lib-ceph\n              mountPath: /var/lib/ceph\n              readOnly: false\n            - name: pod-var-lib-ceph-crash\n              mountPath: /var/lib/ceph/crash\n              readOnly: false\n        - name: ceph-log-ownership\n{{ tuple $envAll \"ceph_mon\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"mon\" \"container\" \"ceph_log_ownership\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - chown\n            - -R\n            - ceph:root\n            - /var/log/ceph\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-run\n              mountPath: /run\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: pod-var-log\n              mountPath: /var/log/ceph\n              readOnly: false\n      containers:\n        - name: ceph-mon\n{{ tuple $envAll \"ceph_mon\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.mon | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"mon\" \"container\" \"ceph_mon\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: CLUSTER\n              value: \"ceph\"\n            - name: K8S_HOST_NETWORK\n              value: \"1\"\n            - name: MONMAP\n              value: /var/lib/ceph/mon/monmap\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: CEPH_PUBLIC_NETWORK\n              value: {{ .Values.network.public | quote }}\n            - name: KUBECTL_PARAM\n              value: {{ tuple $envAll \"ceph\" \"mon\" | include \"helm-toolkit.snippets.kubernetes_kubectl_params\" }}\n            - name: MON_PORT\n              value: {{ tuple \"ceph_mon\" \"internal\" \"mon\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: MON_PORT_V2\n              value: {{ tuple \"ceph_mon\" \"internal\" \"mon_msgr2\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: MON_IP\n              valueFrom:\n                fieldRef:\n                  fieldPath: status.podIP\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.name\n            - name: NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n          command:\n            - /tmp/mon-start.sh\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/mon-stop.sh\n          ports:\n            - containerPort: {{ tuple \"ceph_mon\" \"internal\" \"mon\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            - containerPort: {{ tuple \"ceph_mon\" \"internal\" \"mon_msgr2\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{ dict \"envAll\" . \"component\" \"ceph\" \"container\" \"ceph-mon\" \"type\" \"liveness\" \"probeTemplate\" (include \"monLivenessProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | trim | indent 10 }}\n{{ dict \"envAll\" . \"component\" \"ceph\" \"container\" \"ceph-mon\" \"type\" \"readiness\" \"probeTemplate\" (include \"monReadinessProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | trim | indent 10 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-run\n              mountPath: /run\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-mon-bin\n              mountPath: /tmp/mon-start.sh\n              subPath: mon-start.sh\n              readOnly: true\n            - name: ceph-mon-bin\n              mountPath: /tmp/mon-stop.sh\n              subPath: mon-stop.sh\n              readOnly: true\n            - name: ceph-mon-bin\n              mountPath: /tmp/mon-check.sh\n              subPath: mon-check.sh\n              readOnly: true\n            - name: ceph-mon-bin\n              mountPath: /tmp/checkObjectReplication.py\n              subPath: utils-checkObjectReplication.py\n              readOnly: true\n            - name: ceph-mon-bin\n              mountPath: /tmp/utils-checkDNS.sh\n              subPath: utils-checkDNS.sh\n              readOnly: true\n            - name: ceph-mon-etc\n              mountPath: /etc/ceph/ceph.conf.template\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-client-admin-keyring\n              mountPath: /etc/ceph/ceph.client.admin.keyring\n              subPath: ceph.client.admin.keyring\n              readOnly: true\n            - name: ceph-mon-keyring\n              mountPath: /etc/ceph/ceph.mon.keyring.seed\n              subPath: ceph.mon.keyring\n              readOnly: true\n            - name: ceph-bootstrap-osd-keyring\n              mountPath: /var/lib/ceph/bootstrap-osd/ceph.keyring\n              subPath: ceph.keyring\n              readOnly: true\n            - name: ceph-bootstrap-mds-keyring\n              mountPath: /var/lib/ceph/bootstrap-mds/ceph.keyring\n              subPath: ceph.keyring\n              readOnly: true\n            - name: pod-var-lib-ceph\n              mountPath: /var/lib/ceph\n              readOnly: false\n            - name: pod-var-lib-ceph-crash\n              mountPath: /var/lib/ceph/crash\n              readOnly: false\n            - name: pod-var-log\n              mountPath: /var/log/ceph\n              readOnly: false\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-run\n          emptyDir:\n            medium: \"Memory\"\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: pod-var-log\n          emptyDir: {}\n        - name: ceph-mon-bin\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"bin\" | quote }}\n            defaultMode: 0555\n        - name: ceph-mon-etc\n          configMap:\n            name: {{ $configMapName }}\n            defaultMode: 0444\n        - name: pod-var-lib-ceph\n          hostPath:\n            path: {{ .Values.conf.storage.mon.directory }}\n        - name: pod-var-lib-ceph-crash\n          hostPath:\n            path: /var/lib/openstack-helm/ceph/crash\n            type: DirectoryOrCreate\n        - name: ceph-client-admin-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.admin }}\n        - name: ceph-mon-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.mon }}\n        - name: ceph-bootstrap-osd-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.osd }}\n        - name: ceph-bootstrap-mds-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.mds }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.daemonset_mon }}\n{{- $daemonset := .Values.daemonset.prefix_name }}\n{{- $configMapName := (printf \"%s-%s\" .Release.Name \"etc\") }}\n{{- $serviceAccountName := (printf \"%s\" .Release.Name) }}\n{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include \"ceph.mon.daemonset\" | toString | fromYaml }}\n{{- $configmap_yaml := \"ceph.mon.configmap.etc\" }}\n{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include \"ceph.utils.mon_daemonset_overrides\" }}\n{{- end }}\n"
  },
  {
    "path": "ceph-mon/templates/deployment-mgr.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"mgrLivenessProbeTemplate\" -}}\nexec:\n  command:\n    - /tmp/mgr-check.sh\n{{- end }}\n\n{{- define \"mgrReadinessProbeTemplate\" -}}\nexec:\n  command:\n    - /tmp/mgr-check.sh\n{{- end }}\n\n{{- if and .Values.manifests.deployment_mgr (and .Values.deployment.ceph .Values.conf.features.mgr ) }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"ceph-mgr\" }}\n# This protective IF prevents an attempt of repeated creation\n# of ceph-mgr service account.\n# To be considered: the separation of SA and Deployment manifests.\n{{- if .Values.manifests.deployment_mgr_sa }}\n{{ tuple $envAll \"mgr\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- end }}\n---\nkind: Deployment\napiVersion: apps/v1\nmetadata:\n  name: ceph-mgr\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"ceph\" \"mgr\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.mgr }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"ceph\" \"mgr\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  strategy:\n    type: {{ .Values.pod.updateStrategy.mgr.type }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"mgr\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-mgr\" \"containerNames\" (list \"ceph-mgr\" \"ceph-init-dirs\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"mgr\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"ceph\" \"mgr\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n{{ tuple $envAll \"mgr\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n      nodeSelector:\n        {{ .Values.labels.mgr.node_selector_key }}: {{ .Values.labels.mgr.node_selector_value }}\n      hostNetwork: true\n      hostPID: true\n      dnsPolicy: {{ .Values.pod.dns_policy }}\n      initContainers:\n{{ tuple $envAll \"mgr\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: ceph-init-dirs\n{{ tuple $envAll \"ceph_mgr\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"mgr\" \"container\" \"init_dirs\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/init-dirs.sh\n          env:\n            - name: CLUSTER\n              value: \"ceph\"\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-run\n              mountPath: /run\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-mon-bin\n              mountPath: /tmp/init-dirs.sh\n              subPath: init-dirs.sh\n              readOnly: true\n            - name: pod-var-lib-ceph\n              mountPath: /var/lib/ceph\n              readOnly: false\n            - name: pod-var-lib-ceph-crash\n              mountPath: /var/lib/ceph/crash\n              readOnly: false\n      containers:\n        - name: ceph-mgr\n{{ tuple $envAll \"ceph_mgr\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.mgr | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"mgr\" \"container\" \"mgr\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: CLUSTER\n              value: \"ceph\"\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MON_PORT\n              value: {{ tuple \"ceph_mon\" \"internal\" \"mon\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: MON_PORT_V2\n              value: {{ tuple \"ceph_mon\" \"internal\" \"mon_msgr2\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            {{- if .Values.ceph_mgr_enabled_modules }}\n            - name: ENABLED_MODULES\n              value: |-\n              {{- range $value := .Values.ceph_mgr_enabled_modules }}\n                {{ $value }}\n              {{- end }}\n            {{- end }}\n            {{- if .Values.ceph_mgr_modules_config }}\n            {{- range $module,$params := .Values.ceph_mgr_modules_config }}\n            {{- range $key, $value := $params }}\n            - name: {{ $module }}_{{ $key }}\n              value: {{ $value | quote }}\n            {{- end }}\n            {{- end }}\n            {{- end }}\n          command:\n            - /mgr-start.sh\n          ports:\n            - name: mgr\n              containerPort: {{ tuple \"ceph_mgr\" \"internal\" \"mgr\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          {{- if (has \"prometheus\" .Values.ceph_mgr_enabled_modules) }}\n            - name: metrics\n              containerPort: {{ tuple \"ceph_mgr\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          {{ end -}}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-run\n              mountPath: /run\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-mon-bin\n              mountPath: /mgr-start.sh\n              subPath: mgr-start.sh\n              readOnly: true\n            - name: ceph-mon-bin\n              mountPath: /tmp/mgr-check.sh\n              subPath: mgr-check.sh\n              readOnly: true\n            - name: ceph-mon-etc\n              mountPath: /etc/ceph/ceph.conf.template\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-mon-admin-keyring\n              mountPath: /etc/ceph/ceph.client.admin.keyring\n              subPath: ceph.client.admin.keyring\n              readOnly: true\n            - name: ceph-bootstrap-mgr-keyring\n              mountPath: /var/lib/ceph/bootstrap-mgr/ceph.keyring\n              subPath: ceph.keyring\n              readOnly: false\n            - name: pod-var-lib-ceph\n              mountPath: /var/lib/ceph\n              readOnly: false\n            - name: pod-var-lib-ceph-crash\n              mountPath: /var/lib/ceph/crash\n              readOnly: false\n            - name: ceph-mon-bin\n              mountPath: /tmp/utils-checkPGs.py\n              subPath: utils-checkPGs.py\n              readOnly: true\n{{ dict \"envAll\" . \"component\" \"ceph\" \"container\" \"ceph-mgr\" \"type\" \"liveness\" \"probeTemplate\" (include \"mgrLivenessProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | trim | indent 10 }}\n{{ dict \"envAll\" . \"component\" \"ceph\" \"container\" \"ceph-mgr\" \"type\" \"readiness\" \"probeTemplate\" (include \"mgrReadinessProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | trim | indent 10 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-run\n          emptyDir:\n            medium: \"Memory\"\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: ceph-mon-bin\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"bin\" | quote }}\n            defaultMode: 0555\n        - name: ceph-mon-etc\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"etc\" | quote }}\n            defaultMode: 0444\n        - name: pod-var-lib-ceph\n          emptyDir: {}\n        - name: pod-var-lib-ceph-crash\n          hostPath:\n            path: /var/lib/openstack-helm/ceph/crash\n            type: DirectoryOrCreate\n        - name: ceph-mon-admin-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.admin }}\n        - name: ceph-bootstrap-mgr-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.mgr }}\n{{- end }}\n"
  },
  {
    "path": "ceph-mon/templates/deployment-moncheck.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.deployment_moncheck .Values.deployment.ceph }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"ceph-mon-check\" }}\n{{ tuple $envAll \"moncheck\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\nkind: Deployment\napiVersion: apps/v1\nmetadata:\n  name: ceph-mon-check\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"ceph\" \"moncheck\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.mon_check }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"ceph\" \"moncheck\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"moncheck\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-mon-check\" \"containerNames\" (list \"ceph-mon\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"moncheck\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"ceph\" \"moncheck\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n{{ tuple $envAll \"mon_check\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n      nodeSelector:\n        {{ .Values.labels.mon.node_selector_key }}: {{ .Values.labels.mon.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"moncheck\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: ceph-mon\n{{ tuple $envAll \"ceph_mon_check\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.moncheck | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"moncheck\" \"container\" \"ceph_mon\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: CLUSTER\n              value: \"ceph\"\n            - name: K8S_HOST_NETWORK\n              value: \"1\"\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MON_PORT\n              value: {{ tuple \"ceph_mon\" \"internal\" \"mon\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: MON_PORT_V2\n              value: {{ tuple \"ceph_mon\" \"internal\" \"mon_msgr2\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n          command:\n            - /tmp/moncheck-start.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-run\n              mountPath: /run\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-mon-bin\n              mountPath: /tmp/moncheck-start.sh\n              subPath: moncheck-start.sh\n              readOnly: true\n            - name: ceph-mon-bin\n              mountPath: /tmp/moncheck-reap-zombies.py\n              subPath: moncheck-reap-zombies.py\n              readOnly: true\n            - name: ceph-mon-bin\n              mountPath: /tmp/utils-checkDNS.sh\n              subPath: utils-checkDNS.sh\n              readOnly: true\n            - name: ceph-mon-etc\n              mountPath: /etc/ceph/ceph.conf.template\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-client-admin-keyring\n              mountPath: /etc/ceph/ceph.client.admin.keyring\n              subPath: ceph.client.admin.keyring\n              readOnly: true\n            - name: ceph-mon-keyring\n              mountPath: /etc/ceph/ceph.mon.keyring\n              subPath: ceph.mon.keyring\n              readOnly: true\n            - name: pod-var-lib-ceph\n              mountPath: /var/lib/ceph\n              readOnly: false\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-run\n          emptyDir:\n            medium: \"Memory\"\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: ceph-mon-etc\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"etc\" | quote }}\n            defaultMode: 0444\n        - name: ceph-mon-bin\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"bin\" | quote }}\n            defaultMode: 0555\n        - name: pod-var-lib-ceph\n          emptyDir: {}\n        - name: ceph-client-admin-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.admin }}\n        - name: ceph-mon-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.mon }}\n{{- end }}\n"
  },
  {
    "path": "ceph-mon/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "ceph-mon/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"ceph-bootstrap\" }}\n{{ tuple $envAll \"bootstrap\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: ceph-bootstrap\n  labels:\n{{ tuple $envAll \"ceph\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-bootstrap\" \"containerNames\" (list \"ceph-bootstrap\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"bootstrap\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n      containers:\n        - name: ceph-bootstrap\n{{ tuple $envAll \"ceph_bootstrap\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"bootstrap\" \"container\" \"ceph_bootstrap\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/bootstrap.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-mon-bin\n              mountPath: /tmp/bootstrap.sh\n              subPath: bootstrap.sh\n              readOnly: true\n            - name: ceph-mon-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-client-admin-keyring\n              mountPath: /etc/ceph/ceph.client.admin.keyring\n              subPath: ceph.client.admin.keyring\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: ceph-mon-bin\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"bin\" | quote }}\n            defaultMode: 0555\n        - name: ceph-mon-etc\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"etc\" | quote }}\n            defaultMode: 0444\n        - name: ceph-client-admin-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.admin }}\n{{- end }}\n"
  },
  {
    "path": "ceph-mon/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"ceph-mon\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "ceph-mon/templates/job-keyring.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_keyring .Values.deployment.storage_secrets }}\n{{- $envAll := . }}\n{{- range $key1, $cephBootstrapKey := tuple \"mds\" \"osd\" \"mon\" \"mgr\" }}\n{{- $component := print $cephBootstrapKey \"-keyring-generator\" }}\n{{- $jobName := print \"ceph-\" $component }}\n\n{{- $serviceAccountName := $jobName }}\n{{ tuple $envAll \"job_keyring_generator\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - create\n      - patch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ $jobName }}\n  labels:\n{{ tuple $envAll \"ceph\" $jobName | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" $jobName | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" $jobName \"containerNames\" (list $jobName \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"ceph\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"job_keyring_generator\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: {{ $jobName }}\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.secret_provisioning | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"ceph\" \"container\" $jobName | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: DEPLOYMENT_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: CEPH_GEN_DIR\n              value: /tmp\n            - name: CEPH_TEMPLATES_DIR\n              value: /tmp/templates\n            {{- if eq $cephBootstrapKey \"mon\" }}\n            - name: CEPH_KEYRING_NAME\n              value: ceph.mon.keyring\n            - name: CEPH_KEYRING_TEMPLATE\n              value: mon.keyring\n            {{- else }}\n            - name: CEPH_KEYRING_NAME\n              value: ceph.keyring\n            - name: CEPH_KEYRING_TEMPLATE\n              value: bootstrap.keyring.{{ $cephBootstrapKey }}\n            {{- end }}\n            - name: KUBE_SECRET_NAME\n              value: {{  index $envAll.Values.secrets.keyrings $cephBootstrapKey }}\n          command:\n            - /tmp/keys-bootstrap-keyring-manager.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-mon-bin\n              mountPath: /tmp/keys-bootstrap-keyring-manager.sh\n              subPath: keys-bootstrap-keyring-manager.sh\n              readOnly: true\n            - name: ceph-mon-bin\n              mountPath: /tmp/keys-bootstrap-keyring-generator.py\n              subPath: keys-bootstrap-keyring-generator.py\n              readOnly: true\n            - name: ceph-templates\n              mountPath: /tmp/templates\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: ceph-mon-bin\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"bin\" | quote }}\n            defaultMode: 0555\n        - name: ceph-templates\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"templates\" | quote }}\n            defaultMode: 0444\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "ceph-mon/templates/job-post-apply.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if eq .Values.pod.lifecycle.upgrades.daemonsets.pod_replacement_strategy \"OnDelete\" }}\n{{- if and .Values.manifests.job_post_apply }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := printf \"%s-%s\" .Release.Name \"post-apply\" }}\n{{ tuple $envAll \"post-apply\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - ''\n    resources:\n      - pods\n      - events\n      - jobs\n      - pods/exec\n    verbs:\n      - create\n      - get\n      - delete\n      - list\n  - apiGroups:\n      - 'apps'\n    resources:\n      - daemonsets\n    verbs:\n      - get\n      - list\n  - apiGroups:\n      - 'batch'\n    resources:\n      - jobs\n    verbs:\n      - get\n      - list\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $serviceAccountName }}\n  apiGroup: rbac.authorization.k8s.io\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ $serviceAccountName }}\n  labels:\n{{ tuple $envAll \"ceph-upgrade\" \"post-apply\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph-upgrade\" \"post-apply\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-mon-post-apply\" \"containerNames\" (list \"ceph-mon-post-apply\" \"init\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"post_apply\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"post-apply\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n      containers:\n        - name: ceph-mon-post-apply\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"post_apply\" \"container\" \"ceph_mon_post_apply\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: CLUSTER\n              value: \"ceph\"\n            - name: CEPH_NAMESPACE\n              value: {{ .Release.Namespace }}\n            - name: RELEASE_GROUP_NAME\n              value: {{ .Release.Name }}\n            - name: UNCONDITIONAL_MON_RESTART\n              value: {{ .Values.conf.storage.unconditional_mon_restart | quote }}\n          command:\n            - /tmp/post-apply.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-mon-bin\n              mountPath: /tmp/post-apply.sh\n              subPath: post-apply.sh\n              readOnly: true\n            - name: ceph-mon-bin\n              mountPath: /tmp/wait-for-pods.sh\n              subPath: wait-for-pods.sh\n              readOnly: true\n            - name: ceph-mon-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-mon-admin-keyring\n              mountPath: /etc/ceph/ceph.client.admin.keyring\n              subPath: ceph.client.admin.keyring\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: ceph-mon-bin\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"bin\" | quote }}\n            defaultMode: 0555\n        - name: ceph-mon-etc\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"etc\" | quote }}\n            defaultMode: 0444\n        - name: ceph-mon-admin-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.admin }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "ceph-mon/templates/job-storage-admin-keys.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_storage_admin_keys .Values.deployment.storage_secrets }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"ceph-storage-keys-generator\" }}\n{{ tuple $envAll \"storage_keys_generator\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - pods\n      - pods/exec\n      - secrets\n    verbs:\n      - get\n      - create\n      - patch\n      - list\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: ceph-storage-keys-generator\n  labels:\n{{ tuple $envAll \"ceph\" \"storage-keys-generator\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"storage-keys-generator\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-storage-keys-generator\" \"containerNames\" (list \"ceph-storage-keys-generator\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"storage_keys_generator\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"storage_keys_generator\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: ceph-storage-keys-generator\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.secret_provisioning | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"storage_keys_generator\" \"container\" \"ceph_storage_keys_generator\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: DEPLOYMENT_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: CEPH_GEN_DIR\n              value: /tmp\n            - name: CEPH_TEMPLATES_DIR\n              value: /tmp/templates\n            - name: CEPH_KEYRING_NAME\n              value: ceph.client.admin.keyring\n            - name: CEPH_KEYRING_TEMPLATE\n              value: admin.keyring\n            - name: CEPH_KEYRING_ADMIN_NAME\n              value: {{ .Values.secrets.keyrings.admin }}\n            - name: CEPH_STORAGECLASS_ADMIN_SECRET_NAME\n              value: {{ .Values.storageclass.rbd.parameters.adminSecretName }}\n            - name: CEPH_STORAGECLASS_ADMIN_SECRET_NAME_NODE\n              value: {{ .Values.storageclass.rbd.parameters.adminSecretNameNode }}\n          command:\n            - /tmp/keys-storage-keyring-manager.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-mon-bin\n              mountPath: /tmp/keys-storage-keyring-manager.sh\n              subPath: keys-storage-keyring-manager.sh\n              readOnly: true\n            - name: ceph-mon-bin\n              mountPath: /tmp/keys-bootstrap-keyring-generator.py\n              subPath: keys-bootstrap-keyring-generator.py\n              readOnly: true\n            - name: ceph-templates\n              mountPath: /tmp/templates\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: ceph-mon-bin\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"bin\" | quote }}\n            defaultMode: 0555\n        - name: ceph-templates\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"templates\" | quote }}\n            defaultMode: 0444\n{{- end }}\n"
  },
  {
    "path": "ceph-mon/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "ceph-mon/templates/service-mgr.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_mgr ( and .Values.deployment.ceph .Values.conf.features.mgr ) }}\n{{- $envAll := . }}\n{{- $prometheus_annotations := $envAll.Values.monitoring.prometheus.ceph_mgr }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: ceph-mgr\n  labels:\n{{ tuple $envAll \"ceph\" \"manager\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n{{- if .Values.monitoring.prometheus.enabled }}\n{{ tuple $prometheus_annotations | include \"helm-toolkit.snippets.prometheus_service_annotations\" | indent 4 }}\n{{- end }}\nspec:\n  ports:\n  - name: ceph-mgr\n    port: {{ tuple \"ceph_mgr\" \"internal\" \"mgr\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    protocol: TCP\n    targetPort: {{ tuple \"ceph_mgr\" \"internal\" \"mgr\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  {{ if (has \"prometheus\" .Values.ceph_mgr_enabled_modules) }}\n  - name: metrics\n    protocol: TCP\n    port: {{ tuple \"ceph_mgr\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  {{ end }}\n  selector:\n{{ tuple $envAll \"ceph\" \"mgr\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "ceph-mon/templates/service-mon-discovery.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_mon_discovery .Values.deployment.ceph }}\n{{- $envAll := . }}\n---\nkind: Service\napiVersion: v1\nmetadata:\n  name: {{ tuple \"ceph_mon\" \"discovery\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: mon\n    port: {{ tuple \"ceph_mon\" \"discovery\" \"mon\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    protocol: TCP\n    targetPort: {{ tuple \"ceph_mon\" \"discovery\" \"mon\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  - name: mon-msgr2\n    port: {{ tuple \"ceph_mon\" \"discovery\" \"mon_msgr2\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    protocol: TCP\n    targetPort: {{ tuple \"ceph_mon\" \"discovery\" \"mon_msgr2\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{- if .Values.manifests.daemonset_mon }}\n{{ tuple $envAll \"ceph\" \"mon\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- else }}\n    app: rook-ceph-mon\n    ceph_daemon_type: mon\n{{- end }}\n  clusterIP: None\n  publishNotReadyAddresses: true\n{{- end }}\n"
  },
  {
    "path": "ceph-mon/templates/service-mon.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_mon .Values.deployment.ceph }}\n{{- $envAll := . }}\n---\nkind: Service\napiVersion: v1\nmetadata:\n  name: {{ tuple \"ceph_mon\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: mon\n    port: {{ tuple \"ceph_mon\" \"internal\" \"mon\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    protocol: TCP\n    targetPort: {{ tuple \"ceph_mon\" \"internal\" \"mon\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  - name: mon-msgr2\n    port: {{ tuple \"ceph_mon\" \"internal\" \"mon_msgr2\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    protocol: TCP\n    targetPort: {{ tuple \"ceph_mon\" \"internal\" \"mon_msgr2\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"ceph\" \"mon\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  clusterIP: None\n{{- end }}\n"
  },
  {
    "path": "ceph-mon/templates/snippets/_mon_host_from_k8s_ep.sh.tpl",
    "content": "{{- define \"ceph-mon.snippets.mon_host_from_k8s_ep\" -}}\n{{/*\n\nInserts a bash function definition mon_host_from_k8s_ep() which can be used\nto construct a mon_hosts value from the given namespaced endpoint.\n\nUsage (e.g. in _script.sh.tpl):\n    #!/bin/bash\n\n    : \"${NS:=ceph}\"\n    : \"${EP:=ceph-mon-discovery}\"\n\n    {{ include \"ceph-mon.snippets.mon_host_from_k8s_ep\" . }}\n\n    MON_HOST=$(mon_host_from_k8s_ep \"$NS\" \"$EP\")\n\n    if [ -z \"$MON_HOST\" ]; then\n        # deal with failure\n    else\n        sed -i -e \"s/^mon_host = /mon_host = $MON_HOST/\" /etc/ceph/ceph.conf\n    fi\n*/}}\n{{`\n# Construct a mon_hosts value from the given namespaced endpoint\n# IP x.x.x.x with port p named \"mon-msgr2\" will appear as [v2:x.x.x.x/p/0]\n# IP x.x.x.x with port q named \"mon\" will appear as [v1:x.x.x.x/q/0]\n# IP x.x.x.x with ports p and q will appear as [v2:x.x.x.x/p/0,v1:x.x.x.x/q/0]\n# The entries for all IPs will be joined with commas\nmon_host_from_k8s_ep() {\n  local ns=$1\n  local ep=$2\n\n  if [ -z \"$ns\" ] || [ -z \"$ep\" ]; then\n    return 1\n  fi\n\n  # We don't want shell expansion for the go-template expression\n  # shellcheck disable=SC2016\n  kubectl get endpoints -n \"$ns\" \"$ep\" -o go-template='\n    {{- $sep := \"\" }}\n    {{- range $_,$s := .subsets }}\n      {{- $v2port := 0 }}\n      {{- $v1port := 0 }}\n      {{- range $_,$port := index $s \"ports\" }}\n        {{- if (eq $port.name \"mon-msgr2\") }}\n          {{- $v2port = $port.port }}\n        {{- else if (eq $port.name \"mon\") }}\n          {{- $v1port = $port.port }}\n        {{- end }}\n      {{- end }}\n      {{- range $_,$address := index $s \"addresses\" }}\n        {{- $v2endpoint := printf \"v2:%s:%d/0\" $address.ip $v2port }}\n        {{- $v1endpoint := printf \"v1:%s:%d/0\" $address.ip $v1port }}\n        {{- if (and $v2port $v1port) }}\n          {{- printf \"%s[%s,%s]\" $sep $v2endpoint $v1endpoint }}\n          {{- $sep = \",\" }}\n        {{- else if $v2port }}\n          {{- printf \"%s[%s]\" $sep $v2endpoint }}\n          {{- $sep = \",\" }}\n        {{- else if $v1port }}\n          {{- printf \"%s[%s]\" $sep $v1endpoint }}\n          {{- $sep = \",\" }}\n        {{- end }}\n      {{- end }}\n    {{- end }}'\n}\n`}}\n{{- end -}}\n"
  },
  {
    "path": "ceph-mon/templates/utils/_mon_daemonset_overrides.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"ceph.utils.match_exprs_hash\" }}\n  {{- $match_exprs := index . 0 }}\n  {{- $context := index . 1 }}\n  {{- $_ := set $context.Values \"__match_exprs_hash_content\" \"\" }}\n  {{- range $match_expr := $match_exprs }}\n    {{- $_ := set $context.Values \"__match_exprs_hash_content\" (print $context.Values.__match_exprs_hash_content $match_expr.key $match_expr.operator ($match_expr.values | quote)) }}\n  {{- end }}\n  {{- $context.Values.__match_exprs_hash_content | sha256sum | trunc 8 }}\n  {{- $_ := unset $context.Values \"__match_exprs_hash_content\" }}\n{{- end }}\n\n{{- define \"ceph.utils.mon_daemonset_overrides\" }}\n  {{- $daemonset := index . 0 }}\n  {{- $daemonset_yaml := index . 1 }}\n  {{- $configmap_include := index . 2 }}\n  {{- $configmap_name := index . 3 }}\n  {{- $context := index . 4 }}\n  {{- $_ := unset $context \".Files\" }}\n  {{- $_ := set $context.Values \"__daemonset_yaml\" $daemonset_yaml }}\n  {{- $daemonset_root_name := printf \"ceph_%s\" $daemonset }}\n  {{- $_ := set $context.Values \"__daemonset_list\" list }}\n  {{- $_ := set $context.Values \"__default\" dict }}\n  {{- if hasKey $context.Values.conf \"overrides\" }}\n    {{- range $key, $val := $context.Values.conf.overrides }}\n\n      {{- if eq $key $daemonset_root_name }}\n        {{- range $type, $type_data := . }}\n\n          {{- if eq $type \"hosts\" }}\n            {{- range $host_data := . }}\n              {{/* dictionary that will contain all info needed to generate this\n              iteration of the daemonset */}}\n              {{- $current_dict := dict }}\n\n              {{/* set daemonset name */}}\n              {{- $_ := set $current_dict \"name\" $host_data.name }}\n\n              {{/* apply overrides */}}\n              {{- $override_conf_copy := $host_data.conf }}\n              {{/* Deep copy to prevent https://storyboard.openstack.org/#!/story/2005936 */}}\n              {{- $root_conf_copy := omit ($context.Values.conf | toYaml | fromYaml) \"overrides\" }}\n              {{- $merged_dict := mergeOverwrite $root_conf_copy $override_conf_copy }}\n              {{- $root_conf_copy2 := dict \"conf\" $merged_dict }}\n              {{- $context_values := omit (omit ($context.Values | toYaml | fromYaml) \"conf\") \"__daemonset_list\" }}\n              {{- $root_conf_copy3 := mergeOverwrite $context_values $root_conf_copy2 }}\n              {{- $root_conf_copy4 := dict \"Values\" $root_conf_copy3 }}\n              {{- $_ := set $current_dict \"nodeData\" $root_conf_copy4 }}\n\n              {{/* Schedule to this host explicitly. */}}\n              {{- $nodeSelector_dict := dict }}\n\n              {{- $_ := set $nodeSelector_dict \"key\" \"kubernetes.io/hostname\" }}\n              {{- $_ := set $nodeSelector_dict \"operator\" \"In\" }}\n\n              {{- $values_list := list $host_data.name }}\n              {{- $_ := set $nodeSelector_dict \"values\" $values_list }}\n\n              {{- $list_aggregate := list $nodeSelector_dict }}\n              {{- $_ := set $current_dict \"matchExpressions\" $list_aggregate }}\n\n              {{/* store completed daemonset entry/info into global list */}}\n              {{- $list_aggregate := append $context.Values.__daemonset_list $current_dict }}\n              {{- $_ := set $context.Values \"__daemonset_list\" $list_aggregate }}\n\n            {{- end }}\n          {{- end }}\n\n          {{- if eq $type \"labels\" }}\n            {{- $_ := set $context.Values \"__label_list\" . }}\n            {{- range $label_data := . }}\n              {{/* dictionary that will contain all info needed to generate this\n              iteration of the daemonset. */}}\n              {{- $_ := set $context.Values \"__current_label\" dict }}\n\n              {{/* set daemonset name */}}\n              {{- $_ := set $context.Values.__current_label \"name\" $label_data.label.key }}\n\n              {{/* apply overrides */}}\n              {{- $override_conf_copy := $label_data.conf }}\n              {{/* Deep copy to prevent https://storyboard.openstack.org/#!/story/2005936 */}}\n              {{- $root_conf_copy := omit ($context.Values.conf | toYaml | fromYaml) \"overrides\" }}\n              {{- $merged_dict := mergeOverwrite $root_conf_copy $override_conf_copy }}\n              {{- $root_conf_copy2 := dict \"conf\" $merged_dict }}\n              {{- $context_values := omit (omit ($context.Values | toYaml | fromYaml) \"conf\") \"__daemonset_list\" }}\n              {{- $root_conf_copy3 := mergeOverwrite $context_values $root_conf_copy2 }}\n              {{- $root_conf_copy4 := dict \"Values\" $root_conf_copy3 }}\n              {{- $_ := set $context.Values.__current_label \"nodeData\" $root_conf_copy4 }}\n\n              {{/* Schedule to the provided label value(s) */}}\n              {{- $label_dict := omit $label_data.label \"NULL\" }}\n              {{- $_ := set $label_dict \"operator\" \"In\" }}\n              {{- $list_aggregate := list $label_dict }}\n              {{- $_ := set $context.Values.__current_label \"matchExpressions\" $list_aggregate }}\n\n              {{/* Do not schedule to other specified labels, with higher\n              precedence as the list position increases. Last defined label\n              is highest priority. */}}\n              {{- $other_labels := without $context.Values.__label_list $label_data }}\n              {{- range $label_data2 := $other_labels }}\n                {{- $label_dict := omit $label_data2.label \"NULL\" }}\n\n                {{- $_ := set $label_dict \"operator\" \"NotIn\" }}\n\n                {{- $list_aggregate := append $context.Values.__current_label.matchExpressions $label_dict }}\n                {{- $_ := set $context.Values.__current_label \"matchExpressions\" $list_aggregate }}\n              {{- end }}\n              {{- $_ := set $context.Values \"__label_list\" $other_labels }}\n\n              {{/* Do not schedule to any other specified hosts */}}\n              {{- range $type, $type_data := $val }}\n                {{- if eq $type \"hosts\" }}\n                  {{- range $host_data := . }}\n                    {{- $label_dict := dict }}\n\n                    {{- $_ := set $label_dict \"key\" \"kubernetes.io/hostname\" }}\n                    {{- $_ := set $label_dict \"operator\" \"NotIn\" }}\n\n                    {{- $values_list := list $host_data.name }}\n                    {{- $_ := set $label_dict \"values\" $values_list }}\n\n                    {{- $list_aggregate := append $context.Values.__current_label.matchExpressions $label_dict }}\n                    {{- $_ := set $context.Values.__current_label \"matchExpressions\" $list_aggregate }}\n                  {{- end }}\n                {{- end }}\n              {{- end }}\n\n              {{/* store completed daemonset entry/info into global list */}}\n              {{- $list_aggregate := append $context.Values.__daemonset_list $context.Values.__current_label }}\n              {{- $_ := set $context.Values \"__daemonset_list\" $list_aggregate }}\n              {{- $_ := unset $context.Values \"__current_label\" }}\n\n            {{- end }}\n          {{- end }}\n        {{- end }}\n\n        {{/* scheduler exceptions for the default daemonset */}}\n        {{- $_ := set $context.Values.__default \"matchExpressions\" list }}\n\n        {{- range $type, $type_data := . }}\n          {{/* Do not schedule to other specified labels */}}\n          {{- if eq $type \"labels\" }}\n            {{- range $label_data := . }}\n              {{- $default_dict := omit $label_data.label \"NULL\" }}\n\n              {{- $_ := set $default_dict \"operator\" \"NotIn\" }}\n\n              {{- $list_aggregate := append $context.Values.__default.matchExpressions $default_dict }}\n              {{- $_ := set $context.Values.__default \"matchExpressions\" $list_aggregate }}\n            {{- end }}\n          {{- end }}\n          {{/* Do not schedule to other specified hosts */}}\n          {{- if eq $type \"hosts\" }}\n            {{- range $host_data := . }}\n              {{- $default_dict := dict }}\n\n              {{- $_ := set $default_dict \"key\" \"kubernetes.io/hostname\" }}\n              {{- $_ := set $default_dict \"operator\" \"NotIn\" }}\n\n              {{- $values_list := list $host_data.name }}\n              {{- $_ := set $default_dict \"values\" $values_list }}\n\n              {{- $list_aggregate := append $context.Values.__default.matchExpressions $default_dict }}\n              {{- $_ := set $context.Values.__default \"matchExpressions\" $list_aggregate }}\n            {{- end }}\n          {{- end }}\n        {{- end }}\n      {{- end }}\n    {{- end }}\n  {{- end }}\n\n  {{/* generate the default daemonset */}}\n\n  {{/* set name */}}\n  {{- $_ := set $context.Values.__default \"name\" \"default\" }}\n\n  {{/* no overrides apply, so copy as-is */}}\n  {{- $root_conf_copy1 := omit $context.Values.conf \"overrides\" }}\n  {{- $root_conf_copy2 := dict \"conf\" $root_conf_copy1 }}\n  {{- $context_values := omit $context.Values \"conf\" }}\n  {{- $root_conf_copy3 := mergeOverwrite $context_values $root_conf_copy2 }}\n  {{- $root_conf_copy4 := dict \"Values\" $root_conf_copy3 }}\n  {{- $_ := set $context.Values.__default \"nodeData\" $root_conf_copy4 }}\n\n  {{/* add to global list */}}\n  {{- $list_aggregate := append $context.Values.__daemonset_list $context.Values.__default }}\n  {{- $_ := set $context.Values \"__daemonset_list\" $list_aggregate }}\n\n  {{- $_ := set $context.Values \"__last_configmap_name\" $configmap_name }}\n  {{- range $current_dict := $context.Values.__daemonset_list }}\n\n    {{- $context_novalues := omit $context \"Values\" }}\n    {{- $merged_dict := mergeOverwrite $context_novalues $current_dict.nodeData }}\n    {{- $_ := set $current_dict \"nodeData\" $merged_dict }}\n\n    {{/* name needs to be a DNS-1123 compliant name. Ensure lower case */}}\n    {{- $name_format1 := printf (print $daemonset_root_name \"-\" $current_dict.name) | lower }}\n    {{/* labels may contain underscores which would be invalid here, so we replace them with dashes\n    there may be other valid label names which would make for an invalid DNS-1123 name\n    but these will be easier to handle in future with sprig regex* functions\n    (not availabile in helm 2.5.1) */}}\n    {{- $name_format2 := $name_format1 | replace \"_\" \"-\" | replace \".\" \"-\" }}\n    {{/* To account for the case where the same label is defined multiple times in overrides\n    (but with different label values), we add a sha of the scheduling data to ensure\n    name uniqueness */}}\n    {{- $_ := set $current_dict \"dns_1123_name\" dict }}\n    {{- if hasKey $current_dict \"matchExpressions\" }}\n      {{- $_ := set $current_dict \"dns_1123_name\" (printf (print $name_format2 \"-\" (list $current_dict.matchExpressions $context | include \"ceph.utils.match_exprs_hash\"))) }}\n    {{- else }}\n      {{- $_ := set $current_dict \"dns_1123_name\" $name_format2 }}\n    {{- end }}\n\n    {{/* set daemonset metadata name */}}\n    {{- if not $context.Values.__daemonset_yaml.metadata }}{{- $_ := set $context.Values.__daemonset_yaml \"metadata\" dict }}{{- end }}\n    {{- if not $context.Values.__daemonset_yaml.metadata.name }}{{- $_ := set $context.Values.__daemonset_yaml.metadata \"name\" dict }}{{- end }}\n    {{- $_ := set $context.Values.__daemonset_yaml.metadata \"name\" $current_dict.dns_1123_name }}\n\n    {{/* cross-reference configmap name to container volume definitions */}}\n    {{- $_ := set $context.Values \"__volume_list\" list }}\n    {{- range $current_volume := $context.Values.__daemonset_yaml.spec.template.spec.volumes }}\n      {{- $_ := set $context.Values \"__volume\" $current_volume }}\n      {{- if hasKey $context.Values.__volume \"configMap\" }}\n        {{- if eq $context.Values.__volume.configMap.name $context.Values.__last_configmap_name }}\n          {{- $_ := set $context.Values.__volume.configMap \"name\" $current_dict.dns_1123_name }}\n        {{- end }}\n      {{- end }}\n      {{- $updated_list := append $context.Values.__volume_list $context.Values.__volume }}\n      {{- $_ := set $context.Values \"__volume_list\" $updated_list }}\n    {{- end }}\n    {{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec \"volumes\" $context.Values.__volume_list }}\n\n    {{/* populate scheduling restrictions */}}\n    {{- if hasKey $current_dict \"matchExpressions\" }}\n      {{- if not $context.Values.__daemonset_yaml.spec.template.spec }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template \"spec\" dict }}{{- end }}\n      {{- if not $context.Values.__daemonset_yaml.spec.template.spec.affinity }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec \"affinity\" dict }}{{- end }}\n      {{- if not $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec.affinity \"nodeAffinity\" dict }}{{- end }}\n      {{- if not $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity \"requiredDuringSchedulingIgnoredDuringExecution\" dict }}{{- end }}\n      {{- $match_exprs := dict }}\n      {{- $_ := set $match_exprs \"matchExpressions\" $current_dict.matchExpressions }}\n      {{- $appended_match_expr := list $match_exprs }}\n      {{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution \"nodeSelectorTerms\" $appended_match_expr }}\n    {{- end }}\n\n    {{/* input value hash for current set of values overrides */}}\n    {{- if not $context.Values.__daemonset_yaml.spec }}{{- $_ := set $context.Values.__daemonset_yaml \"spec\" dict }}{{- end }}\n    {{- if not $context.Values.__daemonset_yaml.spec.template }}{{- $_ := set $context.Values.__daemonset_yaml.spec \"template\" dict }}{{- end }}\n    {{- if not $context.Values.__daemonset_yaml.spec.template.metadata }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template \"metadata\" dict }}{{- end }}\n    {{- if not $context.Values.__daemonset_yaml.spec.template.metadata.annotations }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.metadata \"annotations\" dict }}{{- end }}\n    {{- $cmap := list $current_dict.dns_1123_name $current_dict.nodeData | include $configmap_include }}\n    {{- $values_hash := $cmap | quote | sha256sum }}\n    {{- $_ := set $context.Values.__daemonset_yaml.spec.template.metadata.annotations \"configmap-etc-hash\" $values_hash }}\n\n    {{/* generate configmap */}}\n---\n{{ $cmap }}\n\n    {{/* generate daemonset yaml */}}\n{{ range $k, $v := index $current_dict.nodeData.Values.conf.storage \"mon\" }}\n---\n{{- $_ := set $context.Values \"__tmpYAML\" dict }}\n\n{{ $dsNodeName := index $context.Values.__daemonset_yaml.metadata \"name\" }}\n{{ $localDsNodeName := print (trunc 54 $current_dict.dns_1123_name) \"-\" (print $dsNodeName $k | quote | sha256sum | trunc 8) }}\n{{- if not $context.Values.__tmpYAML.metadata }}{{- $_ := set $context.Values.__tmpYAML \"metadata\" dict }}{{- end }}\n{{- $_ := set $context.Values.__tmpYAML.metadata \"name\" $localDsNodeName }}\n\n{{ merge $context.Values.__tmpYAML $context.Values.__daemonset_yaml | toYaml }}\n\n{{ end }}\n\n---\n    {{- $_ := set $context.Values \"__last_configmap_name\" $current_dict.dns_1123_name }}\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "ceph-mon/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for ceph-mon.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\ndeployment:\n  ceph: true\n  storage_secrets: true\n\nimages:\n  pull_policy: IfNotPresent\n  tags:\n    ceph_bootstrap: 'quay.io/airshipit/ceph-daemon:ubuntu_jammy_20.2.1-1-20260407'\n    ceph_config_helper: 'quay.io/airshipit/ceph-config-helper:ubuntu_jammy_20.2.1-1-20260407'\n    ceph_mon: 'quay.io/airshipit/ceph-daemon:ubuntu_jammy_20.2.1-1-20260407'\n    ceph_mgr: 'quay.io/airshipit/ceph-daemon:ubuntu_jammy_20.2.1-1-20260407'\n    ceph_mon_check: 'quay.io/airshipit/ceph-config-helper:ubuntu_jammy_20.2.1-1-20260407'\n    dep_check: 'quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy'\n    image_repo_sync: 'quay.io/airshipit/docker:27.5.0'\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  mon:\n    node_selector_key: ceph-mon\n    node_selector_value: enabled\n  mgr:\n    node_selector_key: ceph-mgr\n    node_selector_value: enabled\n\npod:\n  security_context:\n    mon:\n      pod:\n        runAsUser: 65534\n      container:\n        ceph_init_dirs:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        ceph_log_ownership:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        ceph_mon:\n          runAsUser: 64045\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    mgr:\n      pod:\n        runAsUser: 65534\n      container:\n        init_dirs:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        mgr:\n          runAsUser: 64045\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    moncheck:\n      pod:\n        runAsUser: 65534\n      container:\n        ceph_mon:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    bootstrap:\n      pod:\n        runAsUser: 65534\n      container:\n        ceph_bootstrap:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    storage_keys_generator:\n      pod:\n        runAsUser: 65534\n      container:\n        ceph_storage_keys_generator:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    ceph:\n      pod:\n        runAsUser: 65534\n      container:\n        ceph-mds-keyring-generator:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        ceph-mgr-keyring-generator:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        ceph-mon-keyring-generator:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        ceph-osd-keyring-generator:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    post_apply:\n      pod:\n        runAsUser: 65534\n      container:\n        ceph_mon_post_apply:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n  dns_policy: \"ClusterFirstWithHostNet\"\n  replicas:\n    mgr: 2\n    mon_check: 1\n  lifecycle:\n    upgrades:\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        mon:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n  updateStrategy:\n    mgr:\n      type: Recreate\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  resources:\n    enabled: false\n    mon:\n      requests:\n        memory: \"50Mi\"\n        cpu: \"250m\"\n      limits:\n        memory: \"100Mi\"\n        cpu: \"500m\"\n    mgr:\n      requests:\n        memory: \"5Mi\"\n        cpu: \"250m\"\n      limits:\n        memory: \"50Mi\"\n        cpu: \"500m\"\n    mon_check:\n      requests:\n        memory: \"5Mi\"\n        cpu: \"250m\"\n      limits:\n        memory: \"50Mi\"\n        cpu: \"500m\"\n    jobs:\n      bootstrap:\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n        requests:\n          memory: \"128Mi\"\n          cpu: \"500m\"\n      secret_provisioning:\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n        requests:\n          memory: \"128Mi\"\n          cpu: \"500m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n  tolerations:\n    mgr:\n      tolerations:\n      - effect: NoExecute\n        key: node.kubernetes.io/not-ready\n        operator: Exists\n        tolerationSeconds: 60\n      - effect: NoExecute\n        key: node.kubernetes.io/unreachable\n        operator: Exists\n        tolerationSeconds: 60\n    mon_check:\n      tolerations:\n      - effect: NoExecute\n        key: node.kubernetes.io/not-ready\n        operator: Exists\n        tolerationSeconds: 60\n      - effect: NoExecute\n        key: node.kubernetes.io/unreachable\n        operator: Exists\n        tolerationSeconds: 60\n  probes:\n    ceph:\n      ceph-mon:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 60\n            periodSeconds: 60\n            timeoutSeconds: 5\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 360\n            periodSeconds: 180\n            timeoutSeconds: 5\n      ceph-mgr:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            timeoutSeconds: 5\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            timeoutSeconds: 5\n\nsecrets:\n  keyrings:\n    mon: ceph-mon-keyring\n    mds: ceph-bootstrap-mds-keyring\n    osd: ceph-bootstrap-osd-keyring\n    mgr: ceph-bootstrap-mgr-keyring\n    admin: ceph-client-admin-keyring\n  oci_image_registry:\n    ceph-mon: ceph-mon-oci-image-registry-key\n\nnetwork:\n  public: 192.168.0.0/16\n  cluster: 192.168.0.0/16\n\nconf:\n  features:\n    mgr: true\n  templates:\n    keyring:\n      admin: |\n        [client.admin]\n          key = {{ key }}\n          auid = 0\n          caps mds = \"allow\"\n          caps mon = \"allow *\"\n          caps osd = \"allow *\"\n          caps mgr = \"allow *\"\n      mon: |\n        [mon.]\n          key = {{ key }}\n          caps mon = \"allow *\"\n      bootstrap:\n        mds: |\n          [client.bootstrap-mds]\n            key = {{ key }}\n            caps mon = \"allow profile bootstrap-mds\"\n        mgr: |\n          [client.bootstrap-mgr]\n            key = {{ key }}\n            caps mgr = \"allow profile bootstrap-mgr\"\n        osd: |\n          [client.bootstrap-osd]\n            key = {{ key }}\n            caps mon = \"allow rw\"\n  ceph:\n    global:\n      # auth\n      cephx: true\n      cephx_require_signatures: false\n      cephx_cluster_require_signatures: true\n      cephx_service_require_signatures: false\n      objecter_inflight_op_bytes: \"1073741824\"\n      objecter_inflight_ops: 10240\n      debug_ms: \"0/0\"\n      mon_osd_down_out_interval: 1800\n      mon_osd_down_out_subtree_limit: root\n      mon_osd_min_in_ratio: 0\n      mon_osd_min_up_ratio: 0\n      mon_data_avail_warn: 15\n      log_file: /dev/stdout\n      mon_cluster_log_file: /dev/stdout\n      # Beginning with the Pacific release, this config setting is necessary\n      # to allow pools to use 1x replication, which is disabled by default. The\n      # openstack-helm gate scripts use 1x replication for automated testing,\n      # so this is required. It doesn't seem to be sufficient to add this to\n      # /etc/ceph/ceph.conf, however. It must also be set explicitly via the\n      # 'ceph config' command, so this must also be added to the\n      # cluster_commands value in the ceph-client chart so it will be set\n      # before pools are created and configured there.\n      mon_allow_pool_size_one: true\n    osd:\n      osd_mkfs_type: xfs\n      osd_mkfs_options_xfs: -f -i size=2048\n      osd_max_object_name_len: 256\n      ms_bind_port_min: 6800\n      ms_bind_port_max: 7100\n      osd_snap_trim_priority: 1\n      osd_snap_trim_sleep: 0.1\n      osd_pg_max_concurrent_snap_trims: 1\n      filestore_merge_threshold: -10\n      filestore_split_multiple: 12\n      filestore_max_sync_interval: 10\n      osd_scrub_begin_hour: 22\n      osd_scrub_end_hour: 4\n      osd_scrub_during_recovery: false\n      osd_scrub_sleep: 0.1\n      osd_scrub_chunk_min: 1\n      osd_scrub_chunk_max: 4\n      osd_scrub_load_threshold: 10.0\n      osd_deep_scrub_stride: \"1048576\"\n      osd_scrub_priority: 1\n      osd_recovery_op_priority: 1\n      osd_recovery_max_active: 1\n      osd_mount_options_xfs: \"rw,noatime,largeio,inode64,swalloc,logbufs=8,logbsize=256k,allocsize=4M\"\n      osd_journal_size: 10240\n  storage:\n    mon:\n      directory: /var/lib/openstack-helm/ceph/mon\n\n    # The post-apply job will try to determine if mons need to be restarted\n    # and only restart them if necessary. Set this value to \"true\" to restart\n    # mons unconditionally.\n    unconditional_mon_restart: \"false\"\n\ndaemonset:\n  prefix_name: \"mon\"\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - ceph-mon-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    bootstrap:\n      jobs: null\n      services:\n        - endpoint: internal\n          service: ceph_mon\n    job_keyring_generator:\n      jobs: null\n    mon:\n      jobs:\n        - ceph-storage-keys-generator\n        - ceph-mon-keyring-generator\n    mgr:\n      jobs:\n        - ceph-storage-keys-generator\n        - ceph-mgr-keyring-generator\n      services:\n        - endpoint: internal\n          service: ceph_mon\n    moncheck:\n      jobs:\n        - ceph-storage-keys-generator\n        - ceph-mon-keyring-generator\n      services:\n        - endpoint: discovery\n          service: ceph_mon\n    storage_keys_generator:\n      jobs: null\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\nbootstrap:\n  enabled: false\n  script: |\n    ceph -s\n    function ensure_pool () {\n      ceph osd pool stats $1 || ceph osd pool create $1 $2\n      if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then\n        ceph osd pool application enable $1 $3\n      fi\n    }\n    #ensure_pool volumes 8 cinder\n\n# Uncomment below to enable mgr modules\n# For a list of available modules:\n#  http://docs.ceph.com/docs/master/mgr/\n# This overrides mgr_initial_modules (default: restful, status)\n# Any module not listed here will be disabled\nceph_mgr_enabled_modules:\n  - restful\n  - status\n  - prometheus\n  - balancer\n  - iostat\n  - pg_autoscaler\n\n# You can configure your mgr modules\n# below. Each module has its own set\n# of key/value. Refer to the doc\n# above for more info. For example:\nceph_mgr_modules_config:\n#  balancer:\n#    active: 1\n#  prometheus:\n    # server_port: 9283\n#    server_addr: 0.0.0.0\n#  dashboard:\n#    port: 7000\n#  localpool:\n#    failure_domain: host\n#    subtree: rack\n#    pg_num: \"128\"\n#    num_rep: \"3\"\n#    min_size: \"2\"\n\n# if you change provision_storage_class to false\n# it is presumed you manage your own storage\n# class definition externally\n# We iterate over each storageclass parameters\n# and derive the manifest.\nstorageclass:\n  rbd:\n    parameters:\n      adminSecretName: pvc-ceph-conf-combined-storageclass\n      adminSecretNameNode: pvc-ceph-conf-combined-storageclass\n  cephfs:\n    provision_storage_class: true\n    provisioner: ceph.com/cephfs\n    metadata:\n      name: cephfs\n    parameters:\n      adminId: admin\n      userSecretName: pvc-ceph-cephfs-client-key\n      adminSecretName: pvc-ceph-conf-combined-storageclass\n      adminSecretNamespace: ceph\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      ceph-mon:\n        username: ceph-mon\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  ceph_mon:\n    namespace: null\n    hosts:\n      default: ceph-mon\n      discovery: ceph-mon-discovery\n    host_fqdn_override:\n      default: null\n    port:\n      mon:\n        default: 6789\n      mon_msgr2:\n        default: 3300\n  ceph_mgr:\n    namespace: null\n    hosts:\n      default: ceph-mgr\n    host_fqdn_override:\n      default: null\n    port:\n      mgr:\n        default: 7000\n      metrics:\n        default: 9283\n    scheme:\n      default: http\n\nmonitoring:\n  prometheus:\n    enabled: true\n    ceph_mgr:\n      scrape: true\n      port: 9283\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  configmap_templates: true\n  daemonset_mon: true\n  deployment_mgr: true\n  deployment_mgr_sa: true\n  deployment_moncheck: true\n  job_image_repo_sync: true\n  job_bootstrap: true\n  job_keyring: true\n  job_post_apply: true\n  service_mon: true\n  service_mgr: true\n  service_mon_discovery: true\n  job_storage_admin_keys: true\n  secret_registry: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "ceph-osd/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Ceph OSD\nname: ceph-osd\nversion: 2025.2.0\nhome: https://github.com/ceph/ceph\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "ceph-osd/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "ceph-osd/templates/bin/_helm-tests.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nfunction check_osd_count() {\n  echo \"#### Start: Checking OSD count ####\"\n  noup_flag=$(ceph osd stat | awk '/noup/ {print $2}')\n  osd_stat=$(ceph osd stat -f json-pretty)\n  num_osd=$(awk '/\"num_osds\"/{print $2}' <<< \"$osd_stat\" | cut -d, -f1)\n  num_in_osds=$(awk '/\"num_in_osds\"/{print $2}' <<< \"$osd_stat\" | cut -d, -f1)\n  num_up_osds=$(awk '/\"num_up_osds\"/{print $2}' <<< \"$osd_stat\" | cut -d, -f1)\n\n  MIN_OSDS=$((${num_osd}*$REQUIRED_PERCENT_OF_OSDS/100))\n  if [ ${MIN_OSDS} -lt 1 ]; then\n    MIN_OSDS=1\n  fi\n\n  if [ \"${noup_flag}\" ]; then\n    osd_status=$(ceph osd dump -f json | jq -c '.osds[] | .state')\n    count=0\n    for osd in $osd_status; do\n      if [[ \"$osd\" == *\"up\"* || \"$osd\" == *\"new\"* ]]; then\n        ((count=count+1))\n      fi\n    done\n    echo \"Caution: noup flag is set. ${count} OSDs in up/new state. Required number of OSDs: ${MIN_OSDS}.\"\n    ceph -s\n    exit 0\n  else\n    if [ \"${num_osd}\" -eq 0 ]; then\n      echo \"There are no osds in the cluster\"\n    elif [ \"${num_in_osds}\" -ge \"${MIN_OSDS}\" ] && [ \"${num_up_osds}\" -ge \"${MIN_OSDS}\"  ]; then\n      echo \"Required number of OSDs (${MIN_OSDS}) are UP and IN status\"\n      ceph -s\n      exit 0\n    else\n      echo \"Required number of OSDs (${MIN_OSDS}) are NOT UP and IN status. Cluster shows OSD count=${num_osd}, UP=${num_up_osds}, IN=${num_in_osds}\"\n    fi\n  fi\n}\n\n# in case the chart has been re-installed in order to make changes to daemonset\n# we do not need rack_by_rack restarts\n# but we need to wait until all re-installed ceph-osd pods are healthy\n# and there is degraded objects\nwhile true; do\n  check_osd_count\n  sleep 60\ndone\n\n"
  },
  {
    "path": "ceph-osd/templates/bin/_init-dirs.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport LC_ALL=C\n: \"${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}\"\n\nmkdir -p \"$(dirname \"${OSD_BOOTSTRAP_KEYRING}\")\"\n\n# Let's create the ceph directories\nfor DIRECTORY in osd tmp crash; do\n  mkdir -p \"/var/lib/ceph/${DIRECTORY}\"\ndone\n\n# Create socket directory\nmkdir -p /run/ceph\n\n# Adjust the owner of all those directories\nchown -R ceph. /run/ceph/ /var/lib/ceph/*\n"
  },
  {
    "path": "ceph-osd/templates/bin/_post-apply.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nexport LC_ALL=C\n\n: \"${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}\"\n\nif [[ ! -f /etc/ceph/${CLUSTER}.conf ]]; then\n  echo \"ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon\"\n  exit 1\nfi\n\nif [[ ! -f ${ADMIN_KEYRING} ]]; then\n   echo \"ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon\"\n   exit 1\nfi\n\nceph --cluster ${CLUSTER}  -s\nfunction wait_for_pods() {\n  timeout=${2:-1800}\n  end=$(date -ud \"${timeout} seconds\" +%s)\n  # Selecting containers with \"ceph-osd-default\" name and\n  # counting them based on \"ready\" field.\n  count_pods=\".items | map(.status.containerStatuses | .[] | \\\n              select(.name==\\\"ceph-osd-default\\\")) | \\\n              group_by(.ready) | map({(.[0].ready | tostring): length}) | .[]\"\n  min_osds=\"add | if .true >= (.false + .true)*${REQUIRED_PERCENT_OF_OSDS}/100 \\\n           then \\\"pass\\\" else \\\"fail\\\" end\"\n  while true; do\n      # Leaving while loop if minimum amount of OSDs are ready.\n      # It allows to proceed even if some OSDs are not ready\n      # or in \"CrashLoopBackOff\" state\n      state=$(kubectl get pods --namespace=\"${1}\" -l component=osd -o json | jq \"${count_pods}\")\n      osd_state=$(jq -s \"${min_osds}\" <<< \"${state}\")\n      if [[ \"${osd_state}\" == \\\"pass\\\" ]]; then\n        break\n      fi\n      sleep 5\n\n      if [ $(date -u +%s) -gt $end ] ; then\n          echo -e \"Containers failed to start after $timeout seconds\\n\"\n          kubectl get pods --namespace \"${1}\" -o wide -l component=osd\n          exit 1\n      fi\n  done\n}\n\nfunction check_ds() {\n for ds in `kubectl get ds --namespace=$CEPH_NAMESPACE -l component=osd --no-headers=true|awk '{print $1}'`\n do\n   ds_query=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status`\n   if echo $ds_query |grep -i \"numberAvailable\" ;then\n     currentNumberScheduled=`echo $ds_query|jq -r .currentNumberScheduled`\n     desiredNumberScheduled=`echo $ds_query|jq -r .desiredNumberScheduled`\n     numberAvailable=`echo $ds_query|jq -r .numberAvailable`\n     numberReady=`echo $ds_query|jq -r .numberReady`\n     updatedNumberScheduled=`echo $ds_query|jq -r .updatedNumberScheduled`\n     ds_check=`echo \"$currentNumberScheduled $desiredNumberScheduled $numberAvailable $numberReady $updatedNumberScheduled\"| \\\n       tr ' ' '\\n'|sort -u|wc -l`\n     if [ $ds_check != 1 ]; then\n       echo \"few pods under daemonset  $ds are  not yet ready\"\n       exit\n     else\n       echo \"all pods ubder deamonset $ds are ready\"\n     fi\n   else\n     echo \"this are no osds under daemonset $ds\"\n   fi\n done\n}\n\nfunction wait_for_pgs () {\n  echo \"#### Start: Checking pgs ####\"\n\n  pgs_ready=0\n  pgs_inactive=0\n  query='map({state: .state}) | group_by(.state) | map({state: .[0].state, count: length}) | .[] | select(.state | contains(\"active\") | not)'\n\n  if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then\n    query=\".pg_stats | ${query}\"\n  fi\n\n  # Loop until all pgs are active\n  while [[ $pgs_ready -lt 3 ]]; do\n    pgs_state=$(ceph --cluster ${CLUSTER} pg ls -f json | jq -c \"${query}\")\n    if [[ $(jq -c '. | select(.state | contains(\"peering\") | not)' <<< \"${pgs_state}\") ]]; then\n      if [[ $pgs_inactive -gt 200 ]]; then\n        # If inactive PGs aren't peering after ~10 minutes, fail\n        echo \"Failure, found inactive PGs that aren't peering\"\n        exit 1\n      fi\n      (( pgs_inactive+=1 ))\n    else\n      pgs_inactive=0\n    fi\n    if [[ \"${pgs_state}\" ]]; then\n      pgs_ready=0\n    else\n      (( pgs_ready+=1 ))\n    fi\n    sleep 30\n  done\n}\n\nfunction wait_for_degraded_objects () {\n  echo \"#### Start: Checking for degraded objects ####\"\n\n  # Loop until no degraded objects\n    while [[ ! -z \"`ceph --cluster ${CLUSTER} -s | grep 'degraded'`\" ]]\n    do\n      sleep 30\n      ceph -s\n    done\n}\n\nfunction wait_for_degraded_and_misplaced_objects () {\n  echo \"#### Start: Checking for degraded and misplaced objects ####\"\n\n  # Loop until no degraded or misplaced objects\n    while [[ ! -z \"`ceph --cluster ${CLUSTER} -s | grep 'degraded\\|misplaced'`\" ]]\n    do\n      sleep 30\n      ceph -s\n    done\n}\n\nfunction restart_by_rack() {\n\n  racks=`ceph osd tree | awk '/rack/{print $4}'`\n  echo \"Racks under ceph cluster are: $racks\"\n  for rack in $racks\n  do\n     hosts_in_rack=(`ceph osd tree | sed -n \"/rack $rack/,/rack/p\" | awk '/host/{print $4}' | tr '\\n' ' '|sed 's/ *$//g'`)\n     echo \"hosts under rack \"$rack\" are: ${hosts_in_rack[@]}\"\n     echo \"hosts count under $rack are: ${#hosts_in_rack[@]}\"\n     for host in ${hosts_in_rack[@]}\n     do\n      echo \"host is : $host\"\n      if [[ ! -z \"$host\" ]]; then\n        pods_on_host=$(kubectl get po -n \"$CEPH_NAMESPACE\" -l component=osd -o wide |grep \"$host\"|awk '{print $1}' | tr '\\n' ' '|sed 's/ *$//g')\n        echo \"Restarting  the pods under host $host\"\n        for pod in ${pods_on_host}\n        do\n          kubectl delete  pod -n \"$CEPH_NAMESPACE\" \"${pod}\" || true\n        done\n      fi\n     done\n     echo \"waiting for the pods under host $host from restart\"\n     # The pods will not be ready in first 60 seconds. Thus we can reduce\n     # amount of queries to kubernetes.\n     sleep 60\n     # Degraded objects won't recover with noout set unless pods come back and\n     # PGs become healthy, so simply wait for 0 degraded objects\n     wait_for_degraded_objects\n     ceph -s\n  done\n}\n\nif [[ \"$DISRUPTIVE_OSD_RESTART\" != \"true\" ]]; then\n  wait_for_pods $CEPH_NAMESPACE\nfi\n\nrequire_upgrade=0\nmax_release=0\n\nfor ds in `kubectl get ds --namespace=$CEPH_NAMESPACE -l component=osd --no-headers=true|awk '{print $1}'`\ndo\n  updatedNumberScheduled=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status.updatedNumberScheduled`\n  desiredNumberScheduled=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status.desiredNumberScheduled`\n  if [[ $updatedNumberScheduled != $desiredNumberScheduled ]]; then\n    if kubectl get ds -n $CEPH_NAMESPACE  $ds -o json|jq -r .status|grep -i \"numberAvailable\" ;then\n      require_upgrade=$((require_upgrade+1))\n      _release=`kubectl get ds -n $CEPH_NAMESPACE $ds  -o json|jq -r .status.observedGeneration`\n      max_release=$(( max_release > _release ? max_release : _release ))\n    fi\n  fi\ndone\n\necho \"Latest revision of the helm chart(s) is : $max_release\"\n\n# If flags are set that will prevent recovery, don't restart OSDs\nceph -s | grep \"noup\\|noin\\|nobackfill\\|norebalance\\|norecover\" > /dev/null\nif [[ $? -ne 0 ]]; then\n  if [[ \"$UNCONDITIONAL_OSD_RESTART\" == \"true\" ]] || [[ $max_release -gt 1  ]]; then\n    if [[ \"$UNCONDITIONAL_OSD_RESTART\" == \"true\" ]] || [[  $require_upgrade -gt 0 ]]; then\n      if [[ \"$DISRUPTIVE_OSD_RESTART\" == \"true\" ]]; then\n        echo \"restarting all osds simultaneously\"\n        kubectl -n $CEPH_NAMESPACE delete pod -l component=osd\n        sleep 60\n        echo \"waiting for pgs to become active and for degraded objects to recover\"\n        wait_for_pgs\n        wait_for_degraded_objects\n        ceph -s\n      else\n        echo \"waiting for inactive pgs and degraded objects before upgrade\"\n        wait_for_pgs\n        wait_for_degraded_and_misplaced_objects\n        ceph -s\n        ceph osd \"set\" noout\n        echo \"lets restart the osds rack by rack\"\n        restart_by_rack\n        ceph osd \"unset\" noout\n      fi\n    fi\n\n    #lets check all the ceph-osd daemonsets\n    echo \"checking DS\"\n    check_ds\n  else\n    echo \"No revisions found for upgrade\"\n  fi\nelse\n  echo \"Skipping OSD restarts because flags are set that would prevent recovery\"\nfi\n"
  },
  {
    "path": "ceph-osd/templates/bin/osd/_check.sh.tpl",
    "content": "#!/bin/sh\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# A liveness check for ceph OSDs: exit 0 if\n# all OSDs on this host are in the \"active\" state\n# per their admin sockets.\n\nSOCKDIR=${CEPH_SOCKET_DIR:-/run/ceph}\nSBASE=${CEPH_OSD_SOCKET_BASE:-ceph-osd}\nSSUFFIX=${CEPH_SOCKET_SUFFIX:-asok}\n\n# default: no sockets, not live\ncond=1\nfor sock in $SOCKDIR/$SBASE.*.$SSUFFIX; do\n if [ -S $sock ]; then\n  OSD_ID=$(echo $sock | awk -F. '{print $2}')\n  OSD_STATE=$(ceph -f json --connect-timeout 1 --admin-daemon \"${sock}\" status|jq -r '.state')\n  echo \"OSD ${OSD_ID} ${OSD_STATE}\";\n  # Succeed if the OSD state is active (running) or preboot (starting)\n  if [ \"${OSD_STATE}\" = \"active\" ] || [ \"${OSD_STATE}\" = \"preboot\" ]; then\n   cond=0\n  else\n   # Any other state is unexpected and the probe fails\n   exit 1\n  fi\n else\n  echo \"No daemon sockets found in $SOCKDIR\"\n fi\ndone\nexit $cond\n"
  },
  {
    "path": "ceph-osd/templates/bin/osd/_config.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nsource /tmp/osd-common-ceph-volume.sh\n\n# This script will execute a series of:\n#   ceph config set <section> <key> <value>\n# for values defined in .Values.conf.ceph\n#\n# .Values.conf.ceph is expected to be a map where each key is the section name:\n# Example values structure (Helm values.yaml):\n# conf:\n#   ceph:\n#     osd:\n#       debug_osd: 10\n#     mon:\n#       debug_mon: 20\n\n# helper: run a ceph config set and log the command\nrun_ceph_set() {\n    echo \"+ ceph config set $1 $2 $3\"\n    timeout 10 ceph --name client.bootstrap-osd --keyring ${OSD_BOOTSTRAP_KEYRING} config set \"$1\" \"$2\" \"$3\"\n}\n\n# Disable exit on error to ignore config set failures\nset +e\n\n# The following block is generated by Helm templating and will expand into\n# concrete ceph config set commands. Do not edit at runtime.\n\n{{- /* Iterate sections (keys are section names). Sections must not contain nested maps. */ -}}\n{{- if .Values.conf.ceph }}\n{{- range $secName, $secValues := .Values.conf.ceph }}\necho \"Applying Ceph config for section: {{ $secName }}\"\n{{- if eq (kindOf $secValues) \"map\" }}\n  {{- range $k, $v := $secValues }}\n    {{- if eq (kindOf $v) \"map\" }}\necho \"ERROR: nested maps are not allowed in section '{{ $secName }}' for key '{{ $k }}'. Aborting.\"\nexit 1\n    {{- else }}\nrun_ceph_set {{ $secName }} {{ $k }} {{ $v | quote }}\n    {{- end }}\n  {{- end }}\n{{- else }}\necho \"ERROR: section '{{ $secName }}' is not a valid map. Aborting.\nexit 1\n{{- end }}\n{{- end }}\n{{- else }}\necho \"No .Values.conf.ceph defined, nothing to configure.\"\n{{- end -}}\n\nexit 0\n"
  },
  {
    "path": "ceph-osd/templates/bin/osd/_directory.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport LC_ALL=C\n\nsource /tmp/osd-common-ceph-volume.sh\n\n: \"${JOURNAL_DIR:=/var/lib/ceph/journal}\"\n\nif [[ ! -d /var/lib/ceph/osd ]]; then\n  echo \"ERROR- could not find the osd directory, did you bind mount the OSD data directory?\"\n  echo \"ERROR- use -v <host_osd_data_dir>:/var/lib/ceph/osd\"\n  exit 1\nfi\n\n# check if anything is present, if not, create an osd and its directory\nif [[ -n \"$(find /var/lib/ceph/osd -type d  -empty ! -name \"lost+found\")\" ]]; then\n  echo \"Creating osd\"\n  UUID=$(uuidgen)\n  OSD_SECRET=$(ceph-authtool --gen-print-key)\n  OSD_ID=$(echo \"{\\\"cephx_secret\\\": \\\"${OSD_SECRET}\\\"}\" | ceph osd new ${UUID} -i - -n client.bootstrap-osd -k \"$OSD_BOOTSTRAP_KEYRING\")\n\n  # test that the OSD_ID is an integer\n  if [[ \"$OSD_ID\" =~ ^-?[0-9]+$ ]]; then\n    echo \"OSD created with ID: ${OSD_ID}\"\n  else\n    echo \"OSD creation failed: ${OSD_ID}\"\n    exit 1\n  fi\n\n  OSD_PATH=\"$OSD_PATH_BASE-$OSD_ID/\"\n  if [ -n \"${JOURNAL_DIR}\" ]; then\n     OSD_JOURNAL=\"${JOURNAL_DIR}/journal.${OSD_ID}\"\n     chown -R ceph. ${JOURNAL_DIR}\n  else\n     if [ -n \"${JOURNAL}\" ]; then\n        OSD_JOURNAL=${JOURNAL}\n        chown -R ceph. $(dirname ${JOURNAL_DIR})\n     else\n        OSD_JOURNAL=${OSD_PATH%/}/journal\n     fi\n  fi\n  # create the folder and own it\n  mkdir -p \"${OSD_PATH}\"\n  echo \"created folder ${OSD_PATH}\"\n  # write the secret to the osd keyring file\n  ceph-authtool --create-keyring ${OSD_PATH%/}/keyring --name osd.${OSD_ID} --add-key ${OSD_SECRET}\n  chown -R \"${CHOWN_OPT[@]}\" ceph. \"${OSD_PATH}\"\n  OSD_KEYRING=\"${OSD_PATH%/}/keyring\"\n  # init data directory\n  ceph-osd -i ${OSD_ID} --mkfs --osd-uuid ${UUID} --mkjournal --osd-journal ${OSD_JOURNAL} --setuser ceph --setgroup ceph\n  # add the osd to the crush map\n  crush_location\nfi\n\nfor OSD_ID in $(ls /var/lib/ceph/osd | sed 's/.*-//'); do\n  # NOTE(gagehugo): Writing the OSD_ID to tmp for logging\n  echo \"${OSD_ID}\" > /tmp/osd-id\n  OSD_PATH=\"$OSD_PATH_BASE-$OSD_ID/\"\n  OSD_KEYRING=\"${OSD_PATH%/}/keyring\"\n  if [ -n \"${JOURNAL_DIR}\" ]; then\n     OSD_JOURNAL=\"${JOURNAL_DIR}/journal.${OSD_ID}\"\n     chown -R ceph. ${JOURNAL_DIR}\n  else\n     if [ -n \"${JOURNAL}\" ]; then\n        OSD_JOURNAL=${JOURNAL}\n        chown -R ceph. $(dirname ${JOURNAL_DIR})\n     else\n        OSD_JOURNAL=${OSD_PATH%/}/journal\n        chown ceph. ${OSD_JOURNAL}\n     fi\n  fi\n  # log osd filesystem type\n  FS_TYPE=`stat --file-system -c \"%T\" ${OSD_PATH}`\n  echo \"OSD $OSD_PATH filesystem type: $FS_TYPE\"\n\n  # NOTE(supamatt): Just in case permissions do not align up, we recursively set them correctly.\n  if [ $(stat -c%U ${OSD_PATH}) != ceph ]; then\n    chown -R ceph. ${OSD_PATH};\n  fi\n\n  crush_location\ndone\n\nexec /usr/bin/ceph-osd \\\n    --cluster ${CLUSTER} \\\n    -f \\\n    -i ${OSD_ID} \\\n    --osd-journal ${OSD_JOURNAL} \\\n    -k ${OSD_KEYRING}\n    --setuser ceph \\\n    --setgroup disk $! > /run/ceph-osd.pid\n"
  },
  {
    "path": "ceph-osd/templates/bin/osd/_init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\n\necho \"Configuring Ceph from Helm values\"\n/tmp/osd-config.sh\n\nset -e\n\necho \"Initializing the osd with ${DEPLOY_TOOL}\"\nexec \"/tmp/init-${DEPLOY_TOOL}.sh\"\n"
  },
  {
    "path": "ceph-osd/templates/bin/osd/_log-runner-stop.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nsource /tmp/utils-resolveLocations.sh\n\ntouch /tmp/ceph-log-runner.stop\n\nTAIL_PID=\"$(cat /tmp/ceph-log-runner.pid)\"\nwhile kill -0 ${TAIL_PID} >/dev/null 2>&1;\ndo\n  kill -9 ${TAIL_PID};\n  sleep 1;\ndone\n\nSLEEP_PID=\"$(cat /tmp/ceph-log-runner-sleep.pid)\"\nwhile kill -0 ${SLEEP_PID} >/dev/null 2>&1;\ndo\n  kill -9 ${SLEEP_PID};\n  sleep 1;\ndone\n"
  },
  {
    "path": "ceph-osd/templates/bin/osd/_log-tail.sh.tpl",
    "content": "#!/bin/bash\nset -ex\n\nosd_id_file=\"/tmp/osd-id\"\n\nfunction wait_for_file() {\n  local file=\"$1\"; shift\n  local wait_seconds=\"${1:-30}\"; shift\n\n  until test $((wait_seconds--)) -eq 0 -o -f \"$file\" ; do\n    sleep 1\n  done\n\n  ((++wait_seconds))\n}\nwait_for_file \"${osd_id_file}\" \"${WAIT_FOR_OSD_ID_TIMEOUT}\"\n\nlog_file=\"/var/log/ceph/${DAEMON_NAME}.$(cat \"${osd_id_file}\").log\"\nwait_for_file \"${log_file}\" \"${WAIT_FOR_OSD_ID_TIMEOUT}\"\n\ntrap \"exit\" SIGTERM SIGINT\nkeep_running=true\n\nfunction tail_file () {\n  while $keep_running; do\n    tail --retry -f \"${log_file}\" &\n    tail_pid=$!\n    echo $tail_pid > /tmp/ceph-log-runner-tail.pid\n    wait $tail_pid\n    if [ -f /tmp/ceph-log-runner.stop ]; then\n      keep_running=false\n    fi\n    sleep 30\n  done\n}\n\nfunction truncate_log () {\n  while $keep_running; do\n    sleep ${TRUNCATE_PERIOD}\n    sleep_pid=$!\n    echo $sleep_pid > /tmp/ceph-log-runner-sleep.pid\n    if [[ -f ${log_file} ]] ; then\n      truncate -s \"${TRUNCATE_SIZE}\" \"${log_file}\"\n    fi\n  done\n}\n\ntail_file &\ntruncate_log &\n\nwait -n\nkeep_running=false\nwait\n"
  },
  {
    "path": "ceph-osd/templates/bin/osd/_start.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\necho \"LAUNCHING OSD: in ${STORAGE_TYPE%-*}:${STORAGE_TYPE#*-} mode\"\nexec \"/tmp/osd-${STORAGE_TYPE%-*}-${DEPLOY_TOOL}.sh\"\n"
  },
  {
    "path": "ceph-osd/templates/bin/osd/_stop.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nsource /tmp/utils-resolveLocations.sh\n\nCEPH_OSD_PID=\"$(cat /run/ceph-osd.pid)\"\nwhile kill -0 ${CEPH_OSD_PID} >/dev/null 2>&1; do\n    kill -SIGTERM ${CEPH_OSD_PID}\n    sleep 1\ndone\n\nif [ \"x${STORAGE_TYPE%-*}\" == \"xblock\" ]; then\n  OSD_DEVICE=$(readlink -f ${STORAGE_LOCATION})\n  OSD_JOURNAL=$(readlink -f ${JOURNAL_LOCATION})\n  if [ \"x${STORAGE_TYPE#*-}\" == \"xlogical\" ]; then\n    umount \"$(findmnt -S \"${OSD_DEVICE}1\" | tail -n +2 | awk '{ print $1 }')\"\n  fi\nfi\n\nfi\n"
  },
  {
    "path": "ceph-osd/templates/bin/osd/ceph-volume/_block.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nsource /tmp/osd-common-ceph-volume.sh\n\nset -ex\n\n: \"${OSD_SOFT_FORCE_ZAP:=1}\"\n: \"${OSD_JOURNAL_DISK:=}\"\n\nif [ \"x${STORAGE_TYPE%-*}\" == \"xdirectory\" ]; then\n  export OSD_DEVICE=\"/var/lib/ceph/osd\"\nelse\n  export OSD_DEVICE=$(readlink -f ${STORAGE_LOCATION})\nfi\n\nif [ \"x$JOURNAL_TYPE\" == \"xdirectory\" ]; then\n  export OSD_JOURNAL=\"/var/lib/ceph/journal\"\nelse\n  export OSD_JOURNAL=$(readlink -f ${JOURNAL_LOCATION})\nfi\n\nif [[ -z \"${OSD_DEVICE}\" ]];then\n  echo \"ERROR- You must provide a device to build your OSD ie: /dev/sdb\"\n  exit 1\nfi\n\nif [[ ! -b \"${OSD_DEVICE}\" ]]; then\n  echo \"ERROR- The device pointed by OSD_DEVICE ${OSD_DEVICE} doesn't exist !\"\n  exit 1\nfi\n\nACTIVATE_OPTIONS=\"\"\nCEPH_OSD_OPTIONS=\"\"\n\nudev_settle\n\nOSD_ID=$(ceph-volume inventory ${OSD_DEVICE} | grep \"osd id\" | awk '{print $3}')\nif [[ -z ${OSD_ID} ]]; then\n  echo \"OSD_ID not found from device ${OSD_DEVICE}\"\n  exit 1\nfi\nOSD_FSID=$(ceph-volume inventory ${OSD_DEVICE} | grep \"osd fsid\" | awk '{print $3}')\nif [[ -z ${OSD_FSID} ]]; then\n  echo \"OSD_FSID not found from device ${OSD_DEVICE}\"\n  exit 1\nfi\nOSD_PATH=\"${OSD_PATH_BASE}-${OSD_ID}\"\nOSD_KEYRING=\"${OSD_PATH}/keyring\"\n\nmkdir -p ${OSD_PATH}\n\nceph-volume lvm -v \\\n  --setuser ceph \\\n  --setgroup disk \\\n  activate ${ACTIVATE_OPTIONS} \\\n  --auto-detect-objectstore \\\n  --no-systemd ${OSD_ID} ${OSD_FSID}\n\n# NOTE(stevetaylor): Set the OSD's crush weight (use noin flag to prevent rebalancing if necessary)\nOSD_WEIGHT=$(get_osd_crush_weight_from_device ${OSD_DEVICE})\n# NOTE(supamatt): add or move the OSD's CRUSH location\ncrush_location\n\nif [ \"${OSD_BLUESTORE:-0}\" -ne 1 ]; then\n  if [ -n \"${OSD_JOURNAL}\" ]; then\n    if [ -b \"${OSD_JOURNAL}\" ]; then\n      OSD_JOURNAL_DISK=\"$(readlink -f ${OSD_PATH}/journal)\"\n      if [ -z \"${OSD_JOURNAL_DISK}\" ]; then\n        echo \"ERROR: Unable to find journal device ${OSD_JOURNAL_DISK}\"\n        exit 1\n      else\n        OSD_JOURNAL=\"${OSD_JOURNAL_DISK}\"\n        if [ -e \"${OSD_PATH}/run_mkjournal\" ]; then\n          ceph-osd -i ${OSD_ID} --mkjournal\n          rm -rf ${OSD_PATH}/run_mkjournal\n        fi\n      fi\n    fi\n    if [ \"x${JOURNAL_TYPE}\" == \"xdirectory\" ]; then\n      OSD_JOURNAL=\"${OSD_JOURNAL}/journal.${OSD_ID}\"\n      touch ${OSD_JOURNAL}\n      wait_for_file \"${OSD_JOURNAL}\"\n    else\n      if [ ! -b \"${OSD_JOURNAL}\" ]; then\n        echo \"ERROR: Unable to find journal device ${OSD_JOURNAL}\"\n        exit 1\n      else\n        chown ceph. \"${OSD_JOURNAL}\"\n      fi\n    fi\n  else\n    wait_for_file \"${OSD_JOURNAL}\"\n    chown ceph. \"${OSD_JOURNAL}\"\n  fi\nfi\n\n# NOTE(supamatt): Just in case permissions do not align up, we recursively set them correctly.\nif [ $(stat -c%U ${OSD_PATH}) != ceph ]; then\n  chown -R ceph. ${OSD_PATH};\nfi\n\n# NOTE(gagehugo): Writing the OSD_ID to tmp for logging\necho \"${OSD_ID}\" > /tmp/osd-id\n\nif [ \"x${JOURNAL_TYPE}\" == \"xdirectory\" ]; then\n  chown -R ceph. /var/lib/ceph/journal\n  ceph-osd \\\n    --cluster ceph \\\n    --osd-data ${OSD_PATH} \\\n    --osd-journal ${OSD_JOURNAL} \\\n    -f \\\n    -i ${OSD_ID} \\\n    --setuser ceph \\\n    --setgroup disk \\\n    --mkjournal\nfi\n\nexec /usr/bin/ceph-osd \\\n    --cluster ${CLUSTER} \\\n    ${CEPH_OSD_OPTIONS} \\\n    -f \\\n    -i ${OSD_ID} \\\n    --setuser ceph \\\n    --setgroup disk & echo $! > /run/ceph-osd.pid\nwait\n\n# Clean up resources held by the common script\ncommon_cleanup\n"
  },
  {
    "path": "ceph-osd/templates/bin/osd/ceph-volume/_bluestore.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nsource /tmp/osd-common-ceph-volume.sh\n\nset -ex\n\n: \"${OSD_SOFT_FORCE_ZAP:=1}\"\n\nexport OSD_DEVICE=$(readlink -f ${STORAGE_LOCATION})\n\nif [[ -z \"${OSD_DEVICE}\" ]];then\n  echo \"ERROR- You must provide a device to build your OSD ie: /dev/sdb\"\n  exit 1\nfi\n\nif [[ ! -b \"${OSD_DEVICE}\" ]]; then\n  echo \"ERROR- The device pointed by OSD_DEVICE ${OSD_DEVICE} doesn't exist !\"\n  exit 1\nfi\n\nACTIVATE_OPTIONS=\"\"\nCEPH_OSD_OPTIONS=\"\"\n\nudev_settle\n\nOSD_ID=$(get_osd_id_from_device ${OSD_DEVICE})\nif [[ -z ${OSD_ID} ]]; then\n  echo \"OSD_ID not found from device ${OSD_DEVICE}\"\n  exit 1\nfi\nOSD_FSID=$(get_osd_fsid_from_device ${OSD_DEVICE})\nif [[ -z ${OSD_FSID} ]]; then\n  echo \"OSD_FSID not found from device ${OSD_DEVICE}\"\n  exit 1\nfi\nOSD_PATH=\"${OSD_PATH_BASE}-${OSD_ID}\"\nOSD_KEYRING=\"${OSD_PATH}/keyring\"\n\nmkdir -p ${OSD_PATH}\n\nceph-volume lvm -v \\\n  --setuser ceph \\\n  --setgroup disk \\\n  activate ${ACTIVATE_OPTIONS} \\\n  --auto-detect-objectstore \\\n  --no-systemd ${OSD_ID} ${OSD_FSID}\n# Cross check the db and wal symlinks if missed\nDB_DEV=$(get_osd_db_device_from_device ${OSD_DEVICE})\nif [[ ! -z ${DB_DEV} ]]; then\n  if [[ ! -h /var/lib/ceph/osd/ceph-${OSD_ID}/block.db ]]; then\n    ln -snf ${DB_DEV} /var/lib/ceph/osd/ceph-${OSD_ID}/block.db\n    chown -h ceph:ceph ${DB_DEV}\n    chown -h ceph:ceph /var/lib/ceph/osd/ceph-${OSD_ID}/block.db\n  fi\nfi\nWAL_DEV=$(get_osd_wal_device_from_device ${OSD_DEVICE})\nif [[ ! -z ${WAL_DEV} ]]; then\n  if [[ ! -h /var/lib/ceph/osd/ceph-${OSD_ID}/block.wal ]]; then\n    ln -snf ${WAL_DEV} /var/lib/ceph/osd/ceph-${OSD_ID}/block.wal\n    chown -h ceph:ceph ${WAL_DEV}\n    chown -h ceph:ceph /var/lib/ceph/osd/ceph-${OSD_ID}/block.wal\n  fi\nfi\n\n# NOTE(stevetaylor): Set the OSD's crush weight (use noin flag to prevent rebalancing if necessary)\nOSD_WEIGHT=$(get_osd_crush_weight_from_device ${OSD_DEVICE})\n# NOTE(supamatt): add or move the OSD's CRUSH location\ncrush_location\n\n\n# NOTE(supamatt): Just in case permissions do not align up, we recursively set them correctly.\nif [ $(stat -c%U ${OSD_PATH}) != ceph ]; then\n  chown -R ceph. ${OSD_PATH};\nfi\n\n# NOTE(gagehugo): Writing the OSD_ID to tmp for logging\necho \"${OSD_ID}\" > /tmp/osd-id\n\nexec /usr/bin/ceph-osd \\\n    --cluster ${CLUSTER} \\\n    ${CEPH_OSD_OPTIONS} \\\n    -f \\\n    -i ${OSD_ID} \\\n    --setuser ceph \\\n    --setgroup disk & echo $! > /run/ceph-osd.pid\nwait\n\n# Clean up resources held by the common script\ncommon_cleanup\n"
  },
  {
    "path": "ceph-osd/templates/bin/osd/ceph-volume/_common.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nshopt -s expand_aliases\nexport lock_fd=''\nexport ALREADY_LOCKED=0\nexport PS4='+${BASH_SOURCE:+$(basename ${BASH_SOURCE}):${LINENO}:}${FUNCNAME:+${FUNCNAME}():} '\n\nsource /tmp/utils-resolveLocations.sh\n\n: \"${CRUSH_LOCATION:=root=default host=${HOSTNAME}}\"\n: \"${OSD_PATH_BASE:=/var/lib/ceph/osd/${CLUSTER}}\"\n: \"${CEPH_CONF:=\"/etc/ceph/${CLUSTER}.conf\"}\"\n: \"${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}\"\n: \"${OSD_JOURNAL_UUID:=$(uuidgen)}\"\n: \"${OSD_JOURNAL_SIZE:=$(awk '/^osd_journal_size/{print $3}' ${CEPH_CONF}.template)}\"\n: \"${OSD_WEIGHT:=1.0}\"\n\n{{ include \"helm-toolkit.snippets.mon_host_from_k8s_ep\" . }}\n\n# Obtain a global lock on /var/lib/ceph/tmp/init-osd.lock\nfunction lock() {\n  # Open a file descriptor for the lock file if there isn't one already\n  if [[ -z \"${lock_fd}\" ]]; then\n    exec {lock_fd}>/var/lib/ceph/tmp/init-osd.lock || exit 1\n  fi\n  flock -w 600 \"${lock_fd}\" &> /dev/null\n  ALREADY_LOCKED=1\n}\n\n# Release the global lock on /var/lib/ceph/tmp/init-osd.lock\nfunction unlock() {\n  flock -u \"${lock_fd}\" &> /dev/null\n  ALREADY_LOCKED=0\n}\n\n# \"Destructor\" for common.sh, must be called by scripts that source this one\nfunction common_cleanup() {\n  # Close the file descriptor for the lock file\n  if [[ ! -z \"${lock_fd}\" ]]; then\n    if [[ ${ALREADY_LOCKED} -ne 0 ]]; then\n      unlock\n    fi\n    eval \"exec ${lock_fd}>&-\"\n  fi\n}\n\n# Run a command within the global synchronization lock\nfunction locked() {\n  # Don't log every command inside locked() to keep logs cleaner\n  { set +x; } 2>/dev/null\n\n  local LOCK_SCOPE=0\n\n  # Allow locks to be re-entrant to avoid deadlocks\n  if [[ ${ALREADY_LOCKED} -eq 0 ]]; then\n    lock\n    LOCK_SCOPE=1\n  fi\n\n  # Execute the synchronized command\n  set -x\n  \"$@\"\n  { set +x; } 2>/dev/null\n\n  # Only unlock if the lock was obtained in this scope\n  if [[ ${LOCK_SCOPE} -ne 0 ]]; then\n    unlock\n  fi\n\n  # Re-enable command logging\n  set -x\n}\n\n# Alias commands that interact with disks so they are always synchronized\nalias dmsetup='locked dmsetup'\nalias pvs='locked pvs'\nalias vgs='locked vgs'\nalias lvs='locked lvs'\nalias pvdisplay='locked pvdisplay'\nalias vgdisplay='locked vgdisplay'\nalias lvdisplay='locked lvdisplay'\nalias pvcreate='locked pvcreate'\nalias vgcreate='locked vgcreate'\nalias lvcreate='locked lvcreate'\nalias pvremove='locked pvremove'\nalias vgremove='locked vgremove'\nalias lvremove='locked lvremove'\nalias pvrename='locked pvrename'\nalias vgrename='locked vgrename'\nalias lvrename='locked lvrename'\nalias pvchange='locked pvchange'\nalias vgchange='locked vgchange'\nalias lvchange='locked lvchange'\nalias pvscan='locked pvscan'\nalias vgscan='locked vgscan'\nalias lvscan='locked lvscan'\nalias lvm_scan='locked lvm_scan'\nalias partprobe='locked partprobe'\nalias ceph-volume='locked ceph-volume'\nalias disk_zap='locked disk_zap'\nalias zap_extra_partitions='locked zap_extra_partitions'\nalias udev_settle='locked udev_settle'\nalias wipefs='locked wipefs'\nalias sgdisk='locked sgdisk'\nalias dd='locked dd'\n\neval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data[\"failure_domain\"]))')\neval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data[\"failure_domain_name\"]))')\neval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data[\"failure_domain_name\"]))')\neval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data[\"failure_domain_by_hostname\"]))')\neval CRUSH_FAILURE_DOMAIN_FROM_HOSTNAME_MAP=$(cat /etc/ceph/storage.json | jq '.failure_domain_by_hostname_map.\"'$HOSTNAME'\"')\neval DEVICE_CLASS=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data[\"device_class\"]))')\n\nif [[ $(ceph -v | awk '/version/{print $3}' | cut -d. -f1) -lt 12 ]]; then\n    echo \"ERROR - The minimum Ceph version supported is Luminous 12.x.x\"\n    exit 1\nfi\n\nif [ -z \"${HOSTNAME}\" ]; then\n  echo \"HOSTNAME not set; This will prevent to add an OSD into the CRUSH map\"\n  exit 1\nfi\n\nif [[ ! -e ${CEPH_CONF}.template ]]; then\n  echo \"ERROR- ${CEPH_CONF}.template must exist; get it from your existing mon\"\n  exit 1\nelse\n  ENDPOINT=$(mon_host_from_k8s_ep \"${NAMESPACE}\" ceph-mon-discovery)\n  if [[ -z \"${ENDPOINT}\" ]]; then\n    /bin/sh -c -e \"cat ${CEPH_CONF}.template | tee ${CEPH_CONF}\" || true\n  else\n    /bin/sh -c -e \"cat ${CEPH_CONF}.template | sed 's#mon_host.*#mon_host = ${ENDPOINT}#g' | tee ${CEPH_CONF}\" || true\n  fi\nfi\n\n# Wait for a file to exist, regardless of the type\nfunction wait_for_file {\n  timeout 10 bash -c \"while [ ! -e ${1} ]; do echo 'Waiting for ${1} to show up' && sleep 1 ; done\"\n}\n\nfunction is_available {\n  command -v $@ &>/dev/null\n}\n\nfunction ceph_cmd_retry() {\n  cnt=0\n  until \"ceph\" \"$@\" || [ $cnt -ge 6 ]; do\n    sleep 10\n    ((cnt++))\n  done\n}\n\nfunction crush_create_or_move {\n  local crush_location=${1}\n  ceph_cmd_retry --cluster \"${CLUSTER}\" --name=\"osd.${OSD_ID}\" --keyring=\"${OSD_KEYRING}\" \\\n    osd crush create-or-move -- \"${OSD_ID}\" \"${OSD_WEIGHT}\" ${crush_location}\n}\n\nfunction crush_add_and_move {\n  local crush_failure_domain_type=${1}\n  local crush_failure_domain_name=${2}\n  local crush_location=$(echo \"root=default ${crush_failure_domain_type}=${crush_failure_domain_name} host=${HOSTNAME}\")\n  crush_create_or_move \"${crush_location}\"\n  local crush_failure_domain_location_check=$(ceph_cmd_retry --cluster \"${CLUSTER}\" --name=\"osd.${OSD_ID}\" --keyring=\"${OSD_KEYRING}\" osd find ${OSD_ID} | grep \"${crush_failure_domain_type}\" | awk -F '\"' '{print $4}')\n  if [ \"x${crush_failure_domain_location_check}\" != \"x${crush_failure_domain_name}\" ];  then\n    # NOTE(supamatt): Manually move the buckets for previously configured CRUSH configurations\n    # as create-or-move may not appropiately move them.\n    ceph_cmd_retry --cluster \"${CLUSTER}\" --name=\"osd.${OSD_ID}\" --keyring=\"${OSD_KEYRING}\" \\\n      osd crush add-bucket \"${crush_failure_domain_name}\" \"${crush_failure_domain_type}\" || true\n    ceph_cmd_retry --cluster \"${CLUSTER}\" --name=\"osd.${OSD_ID}\" --keyring=\"${OSD_KEYRING}\" \\\n      osd crush move \"${crush_failure_domain_name}\" root=default || true\n    ceph_cmd_retry --cluster \"${CLUSTER}\" --name=\"osd.${OSD_ID}\" --keyring=\"${OSD_KEYRING}\" \\\n      osd crush move \"${HOSTNAME}\" \"${crush_failure_domain_type}=${crush_failure_domain_name}\" || true\n  fi\n}\n\nfunction crush_location {\n  set_device_class\n  if [ \"x${CRUSH_FAILURE_DOMAIN_TYPE}\" != \"xhost\" ]; then\n\n    echo \"Lets check this host is registered in k8s\"\n    if kubectl get node  ${HOSTNAME}; then\n      CRUSH_FAILURE_DOMAIN_NAME_FROM_NODE_LABEL=$(kubectl get node  ${HOSTNAME} -o json| jq -r '.metadata.labels.rack')\n    else\n      echo \"It seems there is some issue with setting the hostname on this node hence we didnt found this node in k8s\"\n      kubectl get nodes\n      echo ${HOSTNAME}\n      exit 1\n    fi\n\n    if [ ${CRUSH_FAILURE_DOMAIN_NAME_FROM_NODE_LABEL} != \"null\" ]; then\n      CRUSH_FAILURE_DOMAIN_NAME=${CRUSH_FAILURE_DOMAIN_NAME_FROM_NODE_LABEL}\n    fi\n\n    if [ \"x${CRUSH_FAILURE_DOMAIN_NAME}\" != \"xfalse\" ]; then\n      crush_add_and_move \"${CRUSH_FAILURE_DOMAIN_TYPE}\" \"${CRUSH_FAILURE_DOMAIN_NAME}\"\n    elif [ \"x${CRUSH_FAILURE_DOMAIN_BY_HOSTNAME}\" != \"xfalse\" ]; then\n      crush_add_and_move \"${CRUSH_FAILURE_DOMAIN_TYPE}\" \"$(echo ${CRUSH_FAILURE_DOMAIN_TYPE}_$(echo ${HOSTNAME} | cut -c ${CRUSH_FAILURE_DOMAIN_BY_HOSTNAME}))\"\n    elif [ \"x${CRUSH_FAILURE_DOMAIN_FROM_HOSTNAME_MAP}\" != \"xnull\" ]; then\n      crush_add_and_move \"${CRUSH_FAILURE_DOMAIN_TYPE}\" \"${CRUSH_FAILURE_DOMAIN_FROM_HOSTNAME_MAP}\"\n    else\n      # NOTE(supamatt): neither variables are defined then we fall back to default behavior\n      crush_create_or_move \"${CRUSH_LOCATION}\"\n    fi\n  else\n    crush_create_or_move \"${CRUSH_LOCATION}\"\n  fi\n}\n\n# Calculate proper device names, given a device and partition number\nfunction dev_part {\n  local osd_device=${1}\n  local osd_partition=${2}\n\n  if [[ -L ${osd_device} ]]; then\n    # This device is a symlink. Work out it's actual device\n    local actual_device=$(readlink -f \"${osd_device}\")\n    local bn=$(basename \"${osd_device}\")\n    if [[ \"${actual_device:0-1:1}\" == [0-9] ]]; then\n      local desired_partition=\"${actual_device}p${osd_partition}\"\n    else\n      local desired_partition=\"${actual_device}${osd_partition}\"\n    fi\n    # Now search for a symlink in the directory of $osd_device\n    # that has the correct desired partition, and the longest\n    # shared prefix with the original symlink\n    local symdir=$(dirname \"${osd_device}\")\n    local link=\"\"\n    local pfxlen=0\n    for option in ${symdir}/*; do\n      [[ -e $option ]] || break\n      if [[ $(readlink -f \"${option}\") == \"${desired_partition}\" ]]; then\n        local optprefixlen=$(prefix_length \"${option}\" \"${bn}\")\n        if [[ ${optprefixlen} > ${pfxlen} ]]; then\n          link=${symdir}/${option}\n          pfxlen=${optprefixlen}\n        fi\n      fi\n    done\n    if [[ $pfxlen -eq 0 ]]; then\n      >&2 echo \"Could not locate appropriate symlink for partition ${osd_partition} of ${osd_device}\"\n      exit 1\n    fi\n    echo \"$link\"\n  elif [[ \"${osd_device:0-1:1}\" == [0-9] ]]; then\n    echo \"${osd_device}p${osd_partition}\"\n  else\n    echo \"${osd_device}${osd_partition}\"\n  fi\n}\n\nfunction zap_extra_partitions {\n  # Examine temp mount and delete any block.db and block.wal partitions\n  mountpoint=${1}\n  journal_disk=\"\"\n  journal_part=\"\"\n  block_db_disk=\"\"\n  block_db_part=\"\"\n  block_wal_disk=\"\"\n  block_wal_part=\"\"\n\n  # Discover journal, block.db, and block.wal partitions first before deleting anything\n  # If the partitions are on the same disk, deleting one can affect discovery of the other(s)\n  if [ -L \"${mountpoint}/journal\" ]; then\n    journal_disk=$(readlink -m ${mountpoint}/journal | sed 's/[0-9]*//g')\n    journal_part=$(readlink -m ${mountpoint}/journal | sed 's/[^0-9]*//g')\n  fi\n  if [ -L \"${mountpoint}/block.db\" ]; then\n    block_db_disk=$(readlink -m ${mountpoint}/block.db | sed 's/[0-9]*//g')\n    block_db_part=$(readlink -m ${mountpoint}/block.db | sed 's/[^0-9]*//g')\n  fi\n  if [ -L \"${mountpoint}/block.wal\" ]; then\n    block_wal_disk=$(readlink -m ${mountpoint}/block.wal | sed 's/[0-9]*//g')\n    block_wal_part=$(readlink -m ${mountpoint}/block.wal | sed 's/[^0-9]*//g')\n  fi\n\n  # Delete any discovered journal, block.db, and block.wal partitions\n  if [ ! -z \"${journal_disk}\" ]; then\n    sgdisk -d ${journal_part} ${journal_disk}\n    /usr/bin/flock -s ${journal_disk} /sbin/partprobe ${journal_disk}\n  fi\n  if [ ! -z \"${block_db_disk}\" ]; then\n    sgdisk -d ${block_db_part} ${block_db_disk}\n    /usr/bin/flock -s ${block_db_disk} /sbin/partprobe ${block_db_disk}\n  fi\n  if [ ! -z \"${block_wal_disk}\" ]; then\n    sgdisk -d ${block_wal_part} ${block_wal_disk}\n    /usr/bin/flock -s ${block_wal_disk} /sbin/partprobe ${block_wal_disk}\n  fi\n}\n\nfunction disk_zap {\n  # Run all the commands to clear a disk\n  local device=${1}\n  local dm_devices=$(get_dm_devices_from_osd_device \"${device}\" | xargs)\n  for dm_device in ${dm_devices}; do\n    if [[ \"$(dmsetup ls | grep ${dm_device})\" ]]; then\n      dmsetup remove ${dm_device}\n    fi\n  done\n  local logical_volumes=$(get_lv_paths_from_osd_device \"${device}\" | xargs)\n  if [[ \"${logical_volumes}\" ]]; then\n    lvremove -y ${logical_volumes}\n  fi\n  local volume_group=$(pvdisplay -ddd -v ${device} | grep \"VG Name\" | awk '/ceph/{print $3}' | grep \"ceph\")\n  if [[ ${volume_group} ]]; then\n    vgremove -y ${volume_group}\n    pvremove -y ${device}\n    ceph-volume lvm zap ${device} --destroy\n  fi\n  wipefs --all ${device}\n  sgdisk --zap-all -- ${device}\n  # Wipe the first 200MB boundary, as Bluestore redeployments will not work otherwise\n  dd if=/dev/zero of=${device} bs=1M count=200\n}\n\n# This should be run atomically to prevent unexpected cache states\nfunction lvm_scan {\n  pvscan --cache\n  vgscan --cache\n  lvscan --cache\n  pvscan\n  vgscan\n  lvscan\n}\n\nfunction wait_for_device {\n  local device=\"$1\"\n\n  echo \"Waiting for block device ${device} to appear\"\n  for countdown in {1..600}; do\n    test -b \"${device}\" && break\n    sleep 1\n  done\n  test -b \"${device}\" || exit 1\n}\n\nfunction udev_settle {\n  osd_devices=\"${OSD_DEVICE}\"\n  partprobe \"${OSD_DEVICE}\"\n  lvm_scan\n  if [ \"${OSD_BLUESTORE:-0}\" -eq 1 ]; then\n    if [ ! -z \"$BLOCK_DB\" ]; then\n      osd_devices=\"${osd_devices}\\|${BLOCK_DB}\"\n      # BLOCK_DB could be a physical or logical device here\n      local block_db=\"$BLOCK_DB\"\n      local db_vg=\"$(echo $block_db | cut -d'/' -f1)\"\n      if [ ! -z \"$db_vg\" ]; then\n        block_db=$(pvdisplay -ddd -v | grep -B1 \"$db_vg\" | awk '/PV Name/{print $3}')\n      fi\n      partprobe \"${block_db}\"\n    fi\n    if [ ! -z \"$BLOCK_WAL\" ] && [ \"$BLOCK_WAL\" != \"$BLOCK_DB\" ]; then\n      osd_devices=\"${osd_devices}\\|${BLOCK_WAL}\"\n      # BLOCK_WAL could be a physical or logical device here\n      local block_wal=\"$BLOCK_WAL\"\n      local wal_vg=\"$(echo $block_wal | cut -d'/' -f1)\"\n      if [ ! -z \"$wal_vg\" ]; then\n        block_wal=$(pvdisplay -ddd -v | grep -B1 \"$wal_vg\" | awk '/PV Name/{print $3}')\n      fi\n      partprobe \"${block_wal}\"\n    fi\n  else\n    if [ \"x$JOURNAL_TYPE\" == \"xblock-logical\" ] && [ ! -z \"$OSD_JOURNAL\" ]; then\n      OSD_JOURNAL=$(readlink -f ${OSD_JOURNAL})\n      if [ ! -z \"$OSD_JOURNAL\" ]; then\n        local JDEV=$(echo ${OSD_JOURNAL} | sed 's/[0-9]//g')\n        osd_devices=\"${osd_devices}\\|${JDEV}\"\n        partprobe \"${JDEV}\"\n        wait_for_device \"${JDEV}\"\n      fi\n    fi\n  fi\n\n  # On occassion udev may not make the correct device symlinks for Ceph, just in case we make them manually\n  mkdir -p /dev/disk/by-partuuid\n  for dev in $(awk '!/rbd/{print $4}' /proc/partitions | grep \"${osd_devices}\" | grep \"[0-9]\"); do\n    diskdev=$(echo \"${dev//[!a-z]/}\")\n    partnum=$(echo \"${dev//[!0-9]/}\")\n    symlink=\"/dev/disk/by-partuuid/$(sgdisk -i ${partnum} /dev/${diskdev} | awk '/Partition unique GUID/{print tolower($4)}')\"\n    if [ ! -e \"${symlink}\" ]; then\n      ln -s \"../../${dev}\" \"${symlink}\"\n    fi\n  done\n}\n\n# Helper function to get a logical volume from a physical volume\nfunction get_lv_from_device {\n  device=\"$1\"\n\n  pvdisplay -ddd -v -m ${device} | awk '/Logical volume/{print $3}'\n}\n\n# Helper function to get an lvm tag from a logical volume\nfunction get_lvm_tag_from_volume {\n  logical_volume=\"$1\"\n  tag=\"$2\"\n\n  if [[ \"$#\" -lt 2 ]] || [[ -z \"${logical_volume}\" ]]; then\n    # Return an empty string if the logical volume doesn't exist\n    echo\n  else\n    # Get and return the specified tag from the logical volume\n    lvs -o lv_tags ${logical_volume} | tr ',' '\\n' | grep ${tag} | cut -d'=' -f2\n  fi\n}\n\n# Helper function to get an lvm tag from a physical device\nfunction get_lvm_tag_from_device {\n  device=\"$1\"\n  tag=\"$2\"\n  # Attempt to get a logical volume for the physical device\n  logical_volume=\"$(get_lv_from_device ${device})\"\n\n  # Use get_lvm_tag_from_volume to get the specified tag from the logical volume\n  get_lvm_tag_from_volume ${logical_volume} ${tag}\n}\n\n# Helper function to get the size of a logical volume\nfunction get_lv_size_from_device {\n  device=\"$1\"\n  logical_volume=\"$(get_lv_from_device ${device})\"\n\n  lvs ${logical_volume} -o LV_SIZE --noheadings --units k --nosuffix | xargs | cut -d'.' -f1\n}\n\n# Helper function to get the crush weight for an osd device\nfunction get_osd_crush_weight_from_device {\n  device=\"$1\"\n  lv_size=\"$(get_lv_size_from_device ${device})\" # KiB\n\n  if [[ ! -z \"${BLOCK_DB_SIZE}\" ]]; then\n    db_size=$(echo \"${BLOCK_DB_SIZE}\" | cut -d'B' -f1 | numfmt --from=iec | awk '{print $1/1024}') # KiB\n    lv_size=$((lv_size+db_size)) # KiB\n  fi\n\n  echo ${lv_size} | awk '{printf(\"%.2f\\n\", $1/1073741824)}' # KiB to TiB\n}\n\n# Helper function to get a cluster FSID from a physical device\nfunction get_cluster_fsid_from_device {\n  device=\"$1\"\n\n  # Use get_lvm_tag_from_device to get the cluster FSID from the device\n  get_lvm_tag_from_device ${device} ceph.cluster_fsid\n}\n\n# Helper function to get an OSD ID from a logical volume\nfunction get_osd_id_from_volume {\n  logical_volume=\"$1\"\n\n  # Use get_lvm_tag_from_volume to get the OSD ID from the logical volume\n  get_lvm_tag_from_volume ${logical_volume} ceph.osd_id\n}\n\n# Helper function get an OSD ID from a physical device\nfunction get_osd_id_from_device {\n  device=\"$1\"\n\n  # Use get_lvm_tag_from_device to get the OSD ID from the device\n  get_lvm_tag_from_device ${device} ceph.osd_id\n}\n\n# Helper function get an OSD FSID from a physical device\nfunction get_osd_fsid_from_device {\n  device=\"$1\"\n\n  # Use get_lvm_tag_from_device to get the OSD FSID from the device\n  get_lvm_tag_from_device ${device} ceph.osd_fsid\n}\n\n# Helper function get an OSD DB device from a physical device\nfunction get_osd_db_device_from_device {\n  device=\"$1\"\n\n  # Use get_lvm_tag_from_device to get the OSD DB device from the device\n  get_lvm_tag_from_device ${device} ceph.db_device\n}\n\n# Helper function get an OSD WAL device from a physical device\nfunction get_osd_wal_device_from_device {\n  device=\"$1\"\n\n  # Use get_lvm_tag_from_device to get the OSD WAL device from the device\n  get_lvm_tag_from_device ${device} ceph.wal_device\n}\n\nfunction get_block_uuid_from_device {\n  device=\"$1\"\n\n  get_lvm_tag_from_device ${device} ceph.block_uuid\n}\n\nfunction get_dm_devices_from_osd_device {\n  device=\"$1\"\n  pv_uuid=$(pvdisplay -ddd -v ${device} | awk '/PV UUID/{print $3}')\n\n  # Return the list of dm devices that belong to the osd\n  if [[ \"${pv_uuid}\" ]]; then\n    dmsetup ls | grep \"$(echo \"${pv_uuid}\" | sed 's/-/--/g')\" | awk '{print $1}'\n  fi\n}\n\nfunction get_lv_paths_from_osd_device {\n  device=\"$1\"\n  pv_uuid=$(pvdisplay -ddd -v ${device} | awk '/PV UUID/{print $3}')\n\n  # Return the list of lvs that belong to the osd\n  if [[ \"${pv_uuid}\" ]]; then\n    lvdisplay | grep \"LV Path\" | grep \"${pv_uuid}\" | awk '{print $3}'\n  fi\n}\n\nfunction get_vg_name_from_device {\n  device=\"$1\"\n  pv_uuid=$(pvdisplay -ddd -v ${device} | awk '/PV UUID/{print $3}')\n\n  if [[ \"${pv_uuid}\" ]]; then\n    echo \"ceph-vg-${pv_uuid}\"\n  fi\n}\n\nfunction get_lv_name_from_device {\n  device=\"$1\"\n  device_type=\"$2\"\n  pv_uuid=$(pvdisplay -ddd -v ${device} | awk '/PV UUID/{print $3}')\n\n  if [[ \"${pv_uuid}\" ]]; then\n    echo \"ceph-${device_type}-${pv_uuid}\"\n  fi\n}\n\nfunction set_device_class {\n  if [ ! -z \"$DEVICE_CLASS\" ]; then\n    if [ \"x$DEVICE_CLASS\" != \"x$(get_device_class)\" ]; then\n      ceph_cmd_retry --cluster \"${CLUSTER}\" --name=\"osd.${OSD_ID}\" --keyring=\"${OSD_KEYRING}\" \\\n        osd crush rm-device-class \"osd.${OSD_ID}\"\n      ceph_cmd_retry --cluster \"${CLUSTER}\" --name=\"osd.${OSD_ID}\" --keyring=\"${OSD_KEYRING}\" \\\n        osd crush set-device-class \"${DEVICE_CLASS}\" \"osd.${OSD_ID}\"\n    fi\n  fi\n}\n\nfunction get_device_class {\n  echo $(ceph_cmd_retry --cluster \"${CLUSTER}\" --name=\"osd.${OSD_ID}\" --keyring=\"${OSD_KEYRING}\" \\\n    osd crush get-device-class \"osd.${OSD_ID}\")\n}\n"
  },
  {
    "path": "ceph-osd/templates/bin/osd/ceph-volume/_init-ceph-volume-helper-block-logical.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n# We do not want to zap journal disk. Tracking this option seperatly.\n: \"${JOURNAL_FORCE_ZAP:=0}\"\n\nexport OSD_DEVICE=$(readlink -f ${STORAGE_LOCATION})\nexport OSD_BLUESTORE=0\n\nif [ \"x$JOURNAL_TYPE\" == \"xdirectory\" ]; then\n  export OSD_JOURNAL=\"/var/lib/ceph/journal\"\nelse\n  export OSD_JOURNAL=$(readlink -f ${JOURNAL_LOCATION})\nfi\n\n# Check OSD FSID and journalling metadata\n# Returns 1 if the disk should be zapped; 0 otherwise.\nfunction check_osd_metadata {\n  local ceph_fsid=$1\n  retcode=0\n  local tmpmnt=$(mktemp -d)\n  mount ${DM_DEV} ${tmpmnt}\n\n  if [ \"x${JOURNAL_TYPE}\" != \"xdirectory\" ]; then\n    if [  -f \"${tmpmnt}/whoami\" ]; then\n      OSD_JOURNAL_DISK=$(readlink -f \"${tmpmnt}/journal\")\n      local osd_id=$(cat \"${tmpmnt}/whoami\")\n      if [ ! -b \"${OSD_JOURNAL_DISK}\" ]; then\n        OSD_JOURNAL=$(readlink -f ${OSD_JOURNAL})\n        local jdev=$(echo ${OSD_JOURNAL} | sed 's/[0-9]//g')\n        if [ ${jdev} == ${OSD_JOURNAL} ]; then\n          echo \"OSD Init: It appears that ${OSD_DEVICE} is missing the journal at ${OSD_JOURNAL}.\"\n          echo \"OSD Init: Because OSD_FORCE_REPAIR is set, we will wipe the metadata of the OSD and zap it.\"\n          rm -rf ${tmpmnt}/ceph_fsid\n        else\n          echo \"OSD Init: It appears that ${OSD_DEVICE} is missing the journal at ${OSD_JOURNAL_DISK}.\"\n          echo \"OSD Init: Because OSD_FORCE_REPAIR is set and paritions are manually defined, we will\"\n          echo \"OSD Init: attempt to recreate the missing journal device partitions.\"\n          osd_journal_create ${OSD_JOURNAL}\n          ln -sf /dev/disk/by-partuuid/${OSD_JOURNAL_UUID} ${tmpmnt}/journal\n          echo ${OSD_JOURNAL_UUID} | tee ${tmpmnt}/journal_uuid\n          chown ceph. ${OSD_JOURNAL}\n          # During OSD start we will format the journal and set the fsid\n          touch ${tmpmnt}/run_mkjournal\n        fi\n      fi\n    else\n      echo \"OSD Init: It looks like ${OSD_DEVICE} has a ceph data partition but is missing it's metadata.\"\n      echo \"OSD Init: The device may contain inconsistent metadata or be corrupted.\"\n      echo \"OSD Init: Because OSD_FORCE_REPAIR is set, we will wipe the metadata of the OSD and zap it.\"\n      rm -rf ${tmpmnt}/ceph_fsid\n    fi\n  fi\n\n  if [ -f \"${tmpmnt}/ceph_fsid\" ]; then\n    local osd_fsid=$(cat \"${tmpmnt}/ceph_fsid\")\n\n    if [ ${osd_fsid} != ${ceph_fsid} ]; then\n      echo \"OSD Init: ${OSD_DEVICE} is an OSD belonging to a different (or old) ceph cluster.\"\n      echo \"OSD Init: The OSD FSID is ${osd_fsid} while this cluster is ${ceph_fsid}\"\n      echo \"OSD Init: Because OSD_FORCE_REPAIR was set, we will zap this device.\"\n      ZAP_EXTRA_PARTITIONS=${tmpmnt}\n      retcode=1\n    else\n      echo \"It looks like ${OSD_DEVICE} is an OSD belonging to a this ceph cluster.\"\n      echo \"OSD_FORCE_REPAIR is set, but will be ignored and the device will not be zapped.\"\n      echo \"Moving on, trying to activate the OSD now.\"\n    fi\n  else\n    echo \"OSD Init: ${OSD_DEVICE} has a ceph data partition but no FSID.\"\n    echo \"OSD Init: Because OSD_FORCE_REPAIR was set, we will zap this device.\"\n    ZAP_EXTRA_PARTITIONS=${tmpmnt}\n    retcode=1\n  fi\n  umount ${tmpmnt}\n  return ${retcode}\n}\n\nfunction determine_what_needs_zapping {\n\n  if [[ ! -z ${OSD_ID} ]]; then\n    local dm_num=$(dmsetup ls | grep $(lsblk -J ${OSD_DEVICE} | jq -r '.blockdevices[].children[].name') | awk '{print $2}' | cut -d':' -f2 | cut -d')' -f1)\n    DM_DEV=\"/dev/dm-\"${dm_num}\n  elif [[ $(sgdisk --print ${OSD_DEVICE} | grep \"F800\") ]]; then\n    # Ceph-disk was used to initialize the disk, but this is not supported\n    echo \"OSD Init: ceph-disk was used to initialize the disk, but this is no longer supported\"\n    exit 1\n  else\n    if [[ ${OSD_FORCE_REPAIR} -eq 1 ]]; then\n      echo \"OSD Init: It looks like ${OSD_DEVICE} isn't consistent, however OSD_FORCE_REPAIR is enabled so we are zapping the device anyway\"\n      ZAP_DEVICE=1\n    else\n      echo \"OSD Init: Regarding parted, device ${OSD_DEVICE} is inconsistent/broken/weird.\"\n      echo \"OSD Init: It would be too dangerous to destroy it without any notification.\"\n      echo \"OSD Init: Please set OSD_FORCE_REPAIR to '1' if you really want to zap this disk.\"\n      exit 1\n    fi\n  fi\n\n  if [ ${OSD_FORCE_REPAIR} -eq 1 ] && [ ! -z ${DM_DEV} ]; then\n    if [ -b ${DM_DEV} ]; then\n      local ceph_fsid=$(ceph-conf --lookup fsid)\n      if [ ! -z \"${ceph_fsid}\"  ]; then\n        # Check the OSD metadata and zap the disk if necessary\n        if [[ $(check_osd_metadata ${ceph_fsid}) -eq 1 ]]; then\n          echo \"OSD Init: ${OSD_DEVICE} needs to be zapped...\"\n          ZAP_DEVICE=1\n        fi\n      else\n        echo \"Unable to determine the FSID of the current cluster.\"\n        echo \"OSD_FORCE_REPAIR is set, but this OSD will not be zapped.\"\n        echo \"Moving on, trying to activate the OSD now.\"\n      fi\n    else\n      echo \"parted says ${DM_DEV} should exist, but we do not see it.\"\n      echo \"We will ignore OSD_FORCE_REPAIR and try to use the device as-is\"\n      echo \"Moving on, trying to activate the OSD now.\"\n    fi\n  else\n    echo \"INFO- It looks like ${OSD_DEVICE} is an OSD LVM\"\n    echo \"Moving on, trying to prepare and activate the OSD LVM now.\"\n  fi\n}\n\nfunction osd_journal_create {\n  local osd_journal=${1}\n  local osd_journal_partition=$(echo ${osd_journal} | sed 's/[^0-9]//g')\n  local jdev=$(echo ${osd_journal} | sed 's/[0-9]//g')\n  if [ -b \"${jdev}\" ]; then\n    sgdisk --new=${osd_journal_partition}:0:+${OSD_JOURNAL_SIZE}M \\\n      --change-name='${osd_journal_partition}:ceph journal' \\\n      --partition-guid=${osd_journal_partition}:${OSD_JOURNAL_UUID} \\\n      --typecode=${osd_journal_partition}:45b0969e-9b03-4f30-b4c6-b4b80ceff106 --mbrtogpt -- ${jdev}\n    OSD_JOURNAL=$(dev_part ${jdev} ${osd_journal_partition})\n    udev_settle\n  else\n    echo \"OSD Init: The backing device ${jdev} for ${OSD_JOURNAL} does not exist on this system.\"\n    exit 1\n  fi\n}\n\nfunction osd_journal_prepare {\n  if [ -n \"${OSD_JOURNAL}\" ]; then\n    if [ -b ${OSD_JOURNAL} ]; then\n      OSD_JOURNAL=$(readlink -f ${OSD_JOURNAL})\n      OSD_JOURNAL_PARTITION=$(echo ${OSD_JOURNAL} | sed 's/[^0-9]//g')\n      local jdev=$(echo ${OSD_JOURNAL} | sed 's/[0-9]//g')\n      if [ -z \"${OSD_JOURNAL_PARTITION}\" ]; then\n        OSD_JOURNAL=$(dev_part ${jdev} ${OSD_JOURNAL_PARTITION})\n      else\n        OSD_JOURNAL=${OSD_JOURNAL}\n      fi\n    elif [ \"x${JOURNAL_TYPE}\" != \"xdirectory\" ]; then\n      # The block device exists but doesn't appear to be paritioned, we will proceed with parititioning the device.\n      OSD_JOURNAL=$(readlink -f ${OSD_JOURNAL})\n      until [ -b ${OSD_JOURNAL} ]; do\n        osd_journal_create ${OSD_JOURNAL}\n      done\n    fi\n    chown ceph. ${OSD_JOURNAL};\n  elif [ \"x${JOURNAL_TYPE}\" != \"xdirectory\" ]; then\n    echo \"No journal device specified. OSD and journal will share ${OSD_DEVICE}\"\n    echo \"For better performance on HDD, consider moving your journal to a separate device\"\n  fi\n  CLI_OPTS=\"${CLI_OPTS} --filestore\"\n}\n\nfunction osd_disk_prepare {\n\n  if [[ ${CEPH_LVM_PREPARE} -eq 1 ]] || [[ ${DISK_ZAPPED} -eq 1 ]]; then\n    udev_settle\n    RESULTING_VG=\"\"; RESULTING_LV=\"\";\n    create_vg_if_needed \"${OSD_DEVICE}\"\n    create_lv_if_needed \"${OSD_DEVICE}\" \"${RESULTING_VG}\" \"--yes -l 100%FREE\"\n\n    CLI_OPTS=\"${CLI_OPTS} --data ${RESULTING_LV}\"\n    CEPH_LVM_PREPARE=1\n    udev_settle\n  fi\n  if pvdisplay -ddd -v ${OSD_DEVICE} | awk '/VG Name/{print $3}' | grep \"ceph\"; then\n    echo \"OSD Init: Device is already set up. LVM prepare does not need to be called.\"\n    CEPH_LVM_PREPARE=0\n  fi\n\n  osd_journal_prepare\n  CLI_OPTS=\"${CLI_OPTS} --data ${OSD_DEVICE} --journal ${OSD_JOURNAL}\"\n  udev_settle\n\n  if [ ! -z \"$DEVICE_CLASS\" ]; then\n    CLI_OPTS=\"${CLI_OPTS} --crush-device-class ${DEVICE_CLASS}\"\n  fi\n\n  if [[ ${CEPH_LVM_PREPARE} -eq 1 ]]; then\n    echo \"OSD Init: Calling ceph-volume lvm-v prepare ${CLI_OPTS}\"\n    ceph-volume lvm -v prepare ${CLI_OPTS}\n    udev_settle\n  fi\n}\n\n"
  },
  {
    "path": "ceph-osd/templates/bin/osd/ceph-volume/_init-ceph-volume-helper-bluestore.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexport OSD_DEVICE=$(readlink -f ${STORAGE_LOCATION})\nexport OSD_BLUESTORE=1\nalias prep_device='locked prep_device'\n\nfunction check_block_device_for_zap {\n  local block_device=$1\n  local device_type=$2\n\n  if [[ ${block_device} ]]; then\n    local vg_name=$(get_vg_name_from_device ${block_device})\n    local lv_name=$(get_lv_name_from_device ${OSD_DEVICE} ${device_type})\n    local vg=$(vgs --noheadings -o vg_name -S \"vg_name=${vg_name}\" | tr -d '[:space:]')\n    if [[ \"${vg}\" ]]; then\n      local device_osd_id=$(get_osd_id_from_volume \"/dev/${vg_name}/${lv_name}\")\n      CEPH_LVM_PREPARE=1\n      if [[ -n \"${device_osd_id}\" ]] && [[ -n \"${OSD_ID}\" ]]; then\n        if [[ \"${device_osd_id}\" == \"${OSD_ID}\" ]]; then\n          echo \"OSD Init: OSD ID matches the OSD ID already on the data volume. LVM prepare does not need to be called.\"\n          CEPH_LVM_PREPARE=0\n        else\n          echo \"OSD Init: OSD ID does match the OSD ID on the data volume. Device needs to be zapped.\"\n          ZAP_DEVICE=1\n        fi\n      fi\n\n      # Check if this device (db or wal) has no associated data volume\n      local logical_volumes=\"$(lvs --noheadings -o lv_name ${vg} | xargs)\"\n      for volume in ${logical_volumes}; do\n        local data_volume=$(echo ${volume} | sed -E -e 's/-db-|-wal-/-lv-/g')\n        if [[ -z $(lvs --noheadings -o lv_name -S \"lv_name=${data_volume}\") ]]; then\n          # DB or WAL volume without a corresponding data volume, remove it\n          lvremove -y /dev/${vg}/${volume}\n          echo \"OSD Init: LV /dev/${vg}/${volume} was removed as it did not have a data volume.\"\n        fi\n      done\n    else\n      if [[ \"${vg_name}\" ]]; then\n        local logical_devices=$(get_dm_devices_from_osd_device \"${OSD_DEVICE}\")\n        local device_filter=$(echo \"${vg_name}\" | sed 's/-/--/g')\n        local logical_devices=$(echo \"${logical_devices}\" | grep \"${device_filter}\" | xargs)\n        if [[ \"$logical_devices\" ]]; then\n          echo \"OSD Init: No VG resources found with name ${vg_name}. Device needs to be zapped.\"\n          ZAP_DEVICE=1\n        fi\n      fi\n    fi\n  fi\n}\n\nfunction determine_what_needs_zapping {\n\n  local osd_fsid=$(get_cluster_fsid_from_device ${OSD_DEVICE})\n  local cluster_fsid=$(ceph-conf --lookup fsid)\n\n  # If the OSD FSID is defined within the device, check if we're already bootstrapped.\n  if [[ ! -z \"${osd_fsid}\" ]]; then\n    # Check if the OSD FSID is the same as the cluster FSID. If so, then we're\n    # already bootstrapped; otherwise, this is an old disk and needs to\n    # be zapped.\n    if [[ \"${osd_fsid}\" == \"${cluster_fsid}\" ]]; then\n      if [[ ! -z \"${OSD_ID}\" ]]; then\n        # Check to see what needs to be done to prepare the disk. If the OSD\n        # ID is in the Ceph OSD list, then LVM prepare does not need to be done.\n        if ceph --name client.bootstrap-osd --keyring $OSD_BOOTSTRAP_KEYRING osd ls |grep -w ${OSD_ID}; then\n          echo \"OSD Init: Running bluestore mode and ${OSD_DEVICE} already bootstrapped. LVM prepare does not need to be called.\"\n          CEPH_LVM_PREPARE=0\n        elif [[ ${OSD_FORCE_REPAIR} -eq 1 ]]; then\n          echo \"OSD initialized for this cluster, but OSD ID not found in the cluster, reinitializing\"\n          ZAP_DEVICE=1\n        else\n          echo \"OSD initialized for this cluster, but OSD ID not found in the cluster, repair manually\"\n        fi\n      fi\n    else\n      echo \"OSD Init: OSD FSID ${osd_fsid} initialized for a different cluster. It needs to be zapped.\"\n      ZAP_DEVICE=1\n    fi\n  elif [[ $(sgdisk --print ${OSD_DEVICE} | grep \"F800\") ]]; then\n    # Ceph-disk was used to initialize the disk, but this is not supported\n    echo \"ceph-disk was used to initialize the disk, but this is no longer supported\"\n    exit 1\n  fi\n\n  check_block_device_for_zap \"${BLOCK_DB}\" db\n  check_block_device_for_zap \"${BLOCK_WAL}\" wal\n\n  # Zapping extra partitions isn't done for bluestore\n  ZAP_EXTRA_PARTITIONS=0\n}\n\nfunction prep_device {\n  local block_device=$1\n  local block_device_size=$2\n  local device_type=$3\n  local vg_name lv_name vg device_osd_id logical_devices logical_volume\n  RESULTING_VG=\"\"; RESULTING_LV=\"\";\n\n  udev_settle\n  vg_name=$(get_vg_name_from_device ${block_device})\n  lv_name=$(get_lv_name_from_device ${OSD_DEVICE} ${device_type})\n  vg=$(vgs --noheadings -o vg_name -S \"vg_name=${vg_name}\" | tr -d '[:space:]')\n  if [[ -z \"${vg}\" ]]; then\n    create_vg_if_needed \"${block_device}\"\n    vg=${RESULTING_VG}\n  fi\n  udev_settle\n\n  create_lv_if_needed \"${block_device}\" \"${vg}\" \"--yes -L ${block_device_size}\" \"${lv_name}\"\n  if [[ \"${device_type}\" == \"db\" ]]; then\n    BLOCK_DB=${RESULTING_LV}\n  elif [[ \"${device_type}\" == \"wal\" ]]; then\n    BLOCK_WAL=${RESULTING_LV}\n  fi\n  udev_settle\n}\n\nfunction osd_disk_prepare {\n\n  if [[ ${CEPH_LVM_PREPARE} -eq 1 ]] || [[ ${DISK_ZAPPED} -eq 1 ]]; then\n    udev_settle\n    RESULTING_VG=\"\"; RESULTING_LV=\"\";\n    create_vg_if_needed \"${OSD_DEVICE}\"\n    create_lv_if_needed \"${OSD_DEVICE}\" \"${RESULTING_VG}\" \"--yes -l 100%FREE\"\n\n    CLI_OPTS=\"${CLI_OPTS} --data ${RESULTING_LV}\"\n    CEPH_LVM_PREPARE=1\n    udev_settle\n  fi\n\n  if [[ ${BLOCK_DB} && ${BLOCK_WAL} ]]; then\n    prep_device \"${BLOCK_DB}\" \"${BLOCK_DB_SIZE}\" \"db\" \"${OSD_DEVICE}\"\n    prep_device \"${BLOCK_WAL}\" \"${BLOCK_WAL_SIZE}\" \"wal\" \"${OSD_DEVICE}\"\n  elif [[ -z ${BLOCK_DB} && ${BLOCK_WAL} ]]; then\n    prep_device \"${BLOCK_WAL}\" \"${BLOCK_WAL_SIZE}\" \"wal\" \"${OSD_DEVICE}\"\n  elif [[ ${BLOCK_DB} && -z ${BLOCK_WAL} ]]; then\n    prep_device \"${BLOCK_DB}\" \"${BLOCK_DB_SIZE}\" \"db\" \"${OSD_DEVICE}\"\n  fi\n\n  CLI_OPTS=\"${CLI_OPTS} --bluestore\"\n\n  if [ ! -z \"$BLOCK_DB\" ]; then\n    CLI_OPTS=\"${CLI_OPTS} --block.db ${BLOCK_DB}\"\n  fi\n\n  if [ ! -z \"$BLOCK_WAL\" ]; then\n    CLI_OPTS=\"${CLI_OPTS} --block.wal ${BLOCK_WAL}\"\n  fi\n\n  if [ ! -z \"$DEVICE_CLASS\" ]; then\n    CLI_OPTS=\"${CLI_OPTS} --crush-device-class ${DEVICE_CLASS}\"\n  fi\n\n  if [[ ${CEPH_LVM_PREPARE} -eq 1 ]]; then\n    echo \"OSD Init: Calling ceph-volume lvm-v prepare ${CLI_OPTS}\"\n    ceph-volume lvm -v prepare ${CLI_OPTS}\n    udev_settle\n  fi\n}\n"
  },
  {
    "path": "ceph-osd/templates/bin/osd/ceph-volume/_init-ceph-volume-helper-directory.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n# We do not want to zap journal disk. Tracking this option seperatly.\n: \"${JOURNAL_FORCE_ZAP:=0}\"\n\nexport OSD_DEVICE=\"/var/lib/ceph/osd\"\nexport OSD_JOURNAL=\"/var/lib/ceph/journal\"\n"
  },
  {
    "path": "ceph-osd/templates/bin/osd/ceph-volume/_init-with-ceph-volume.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n: \"${OSD_FORCE_REPAIR:=0}\"\n\nsource /tmp/osd-common-ceph-volume.sh\n\nsource /tmp/init-ceph-volume-helper-${STORAGE_TYPE}.sh\n\n\n# Set up aliases for functions that require disk synchronization\nalias rename_vg='locked rename_vg'\nalias rename_lvs='locked rename_lvs'\nalias update_lv_tags='locked update_lv_tags'\n\n# Renames a single VG if necessary\nfunction rename_vg {\n  local physical_disk=$1\n  local old_vg_name=$(pvdisplay -ddd -v ${physical_disk} | awk '/VG Name/{print $3}')\n  local vg_name=$(get_vg_name_from_device ${physical_disk})\n\n  if [[ \"${old_vg_name}\" ]] && [[ \"${vg_name}\" != \"${old_vg_name}\" ]]; then\n    vgrename ${old_vg_name} ${vg_name}\n    echo \"OSD Init: Renamed volume group ${old_vg_name} to ${vg_name}.\"\n  fi\n}\n\n# Renames all LVs associated with an OSD as necesasry\nfunction rename_lvs {\n  local data_disk=$1\n  local vg_name=$(pvdisplay -ddd -v ${data_disk} | awk '/VG Name/{print $3}')\n\n  if [[ \"${vg_name}\" ]]; then\n    # Rename the OSD volume if necessary\n    local old_lv_name=$(lvdisplay ${vg_name} | awk '/LV Name/{print $3}')\n    local lv_name=$(get_lv_name_from_device ${data_disk} lv)\n\n    if [[ \"${old_lv_name}\" ]] && [[ \"${lv_name}\" != \"${old_lv_name}\" ]]; then\n      lvrename ${vg_name} ${old_lv_name} ${lv_name}\n      echo \"OSD Init: Renamed logical volume ${old_lv_name} (from group ${vg_name}) to ${lv_name}.\"\n    fi\n\n    # Rename the OSD's block.db volume if necessary, referenced by UUID\n    local lv_tag=$(get_lvm_tag_from_device ${data_disk} ceph.db_uuid)\n\n    if [[ \"${lv_tag}\" ]]; then\n      local lv_device=$(lvdisplay | grep -B4 \"${lv_tag}\" | awk '/LV Path/{print $3}')\n\n      if [[ \"${lv_device}\" ]]; then\n        local db_vg=$(echo ${lv_device} | awk -F \"/\" '{print $3}')\n        old_lv_name=$(echo ${lv_device} | awk -F \"/\" '{print $4}')\n        local db_name=$(get_lv_name_from_device ${data_disk} db)\n\n        if [[ \"${old_lv_name}\" ]] && [[ \"${db_name}\" != \"${old_lv_name}\" ]]; then\n          lvrename ${db_vg} ${old_lv_name} ${db_name}\n          echo \"OSD Init: Renamed DB logical volume ${old_lv_name} (from group ${db_vg}) to ${db_name}.\"\n        fi\n      fi\n    fi\n\n    # Rename the OSD's WAL volume if necessary, referenced by UUID\n    lv_tag=$(get_lvm_tag_from_device ${data_disk} ceph.wal_uuid)\n\n    if [[ \"${lv_tag}\" ]]; then\n      local lv_device=$(lvdisplay | grep -B4 \"${lv_tag}\" | awk '/LV Path/{print $3}')\n\n      if [[ \"${lv_device}\" ]]; then\n        local wal_vg=$(echo ${lv_device} | awk -F \"/\" '{print $3}')\n        old_lv_name=$(echo ${lv_device} | awk -F \"/\" '{print $4}')\n        local wal_name=$(get_lv_name_from_device ${data_disk} wal)\n\n        if [[ \"${old_lv_name}\" ]] && [[ \"${wal_name}\" != \"${old_lv_name}\" ]]; then\n          lvrename ${wal_vg} ${old_lv_name} ${wal_name}\n          echo \"OSD Init: Renamed WAL logical volume ${old_lv_name} (from group ${wal_vg}) to ${wal_name}.\"\n        fi\n      fi\n    fi\n  fi\n}\n\n# Fixes up the tags that reference block, db, and wal logical_volumes\n# NOTE: This updates tags based on current VG and LV names, so any necessary\n#       renaming should be completed prior to calling this\nfunction update_lv_tags {\n  local data_disk=$1\n  local pv_uuid=$(pvdisplay -ddd -v ${data_disk} | awk '/PV UUID/{print $3}')\n\n  if [[ \"${pv_uuid}\" ]]; then\n    local volumes=\"$(lvs --no-headings | grep -e \"${pv_uuid}\")\"\n    local block_device db_device wal_device vg_name\n    local old_block_device old_db_device old_wal_device\n\n    # Build OSD device paths from current VG and LV names\n    while read lv vg other_stuff; do\n      if [[ \"${lv}\" == \"$(get_lv_name_from_device ${data_disk} lv)\" ]]; then\n        block_device=\"/dev/${vg}/${lv}\"\n        old_block_device=$(get_lvm_tag_from_volume ${block_device} ceph.block_device)\n      fi\n      if [[ \"${lv}\" == \"$(get_lv_name_from_device ${data_disk} db)\" ]]; then\n        db_device=\"/dev/${vg}/${lv}\"\n        old_db_device=$(get_lvm_tag_from_volume ${block_device} ceph.db_device)\n      fi\n      if [[ \"${lv}\" == \"$(get_lv_name_from_device ${data_disk} wal)\" ]]; then\n        wal_device=\"/dev/${vg}/${lv}\"\n        old_wal_device=$(get_lvm_tag_from_volume ${block_device} ceph.wal_device)\n      fi\n    done <<< ${volumes}\n\n    # Set new tags on all of the volumes using paths built above\n    while read lv vg other_stuff; do\n      if [[ \"${block_device}\" ]]; then\n        if [[ \"${old_block_device}\" ]]; then\n          lvchange --deltag \"ceph.block_device=${old_block_device}\" /dev/${vg}/${lv}\n        fi\n        lvchange --addtag \"ceph.block_device=${block_device}\" /dev/${vg}/${lv}\n        echo \"OSD Init: Updated lv tags for data volume ${block_device}.\"\n      fi\n      if [[ \"${db_device}\" ]]; then\n        if [[ \"${old_db_device}\" ]]; then\n          lvchange --deltag \"ceph.db_device=${old_db_device}\" /dev/${vg}/${lv}\n        fi\n        lvchange --addtag \"ceph.db_device=${db_device}\" /dev/${vg}/${lv}\n        echo \"OSD Init: Updated lv tags for DB volume ${db_device}.\"\n      fi\n      if [[ \"${wal_device}\" ]]; then\n        if [[ \"${old_wal_device}\" ]]; then\n          lvchange --deltag \"ceph.wal_device=${old_wal_device}\" /dev/${vg}/${lv}\n        fi\n        lvchange --addtag \"ceph.wal_device=${wal_device}\" /dev/${vg}/${lv}\n        echo \"OSD Init: Updated lv tags for WAL volume ${wal_device}.\"\n      fi\n    done <<< ${volumes}\n  fi\n}\n\nfunction create_vg_if_needed {\n  local bl_device=$1\n  local vg_name=$(get_vg_name_from_device ${bl_device})\n  if [[ -z \"${vg_name}\" ]]; then\n    local random_uuid=$(uuidgen)\n    vgcreate ceph-vg-${random_uuid} ${bl_device}\n    vg_name=$(get_vg_name_from_device ${bl_device})\n    vgrename ceph-vg-${random_uuid} ${vg_name}\n    echo \"OSD Init: Created volume group ${vg_name} for device ${bl_device}.\"\n  fi\n  RESULTING_VG=${vg_name}\n}\n\nfunction create_lv_if_needed {\n  local bl_device=$1\n  local vg_name=$2\n  local options=$3\n  local lv_name=${4:-$(get_lv_name_from_device ${bl_device} lv)}\n\n  if [[ ! \"$(lvdisplay | awk '/LV Name/{print $3}' | grep ${lv_name})\" ]]; then\n    lvcreate ${options} -n ${lv_name} ${vg_name}\n    echo \"OSD Init: Created logical volume ${lv_name} in group ${vg_name} for device ${bl_device}.\"\n  fi\n  RESULTING_LV=${vg_name}/${lv_name}\n}\n\nfunction osd_disk_prechecks {\n  if [[ -z \"${OSD_DEVICE}\" ]]; then\n    echo \"ERROR- You must provide a device to build your OSD ie: /dev/sdb\"\n    exit 1\n  fi\n\n  if [[ ! -b \"${OSD_DEVICE}\" ]]; then\n    echo \"ERROR- The device pointed by OSD_DEVICE (${OSD_DEVICE}) doesn't exist !\"\n    exit 1\n  fi\n\n  if [ ! -e ${OSD_BOOTSTRAP_KEYRING} ]; then\n    echo \"ERROR- ${OSD_BOOTSTRAP_KEYRING} must exist. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-osd -o ${OSD_BOOTSTRAP_KEYRING}'\"\n    exit 1\n  fi\n\n  timeout 10 ceph --name client.bootstrap-osd --keyring ${OSD_BOOTSTRAP_KEYRING} health || exit 1\n}\n\nfunction perform_zap {\n  if [[ ${ZAP_EXTRA_PARTITIONS} != \"\" ]]; then\n    # This used for filestore/blockstore only\n    echo \"OSD Init: Zapping extra partitions ${ZAP_EXTRA_PARTITIONS}\"\n    zap_extra_partitions \"${ZAP_EXTRA_PARTITIONS}\"\n  fi\n  echo \"OSD Init: Zapping device ${OSD_DEVICE}...\"\n  disk_zap ${OSD_DEVICE}\n  DISK_ZAPPED=1\n  udev_settle\n}\n\n\n#######################################################################\n# Main program\n#######################################################################\n\nif [[ \"${STORAGE_TYPE}\" != \"directory\" ]]; then\n\n  # Check to make sure we have what we need to continue\n  osd_disk_prechecks\n\n  # Settle LVM changes before inspecting volumes\n  udev_settle\n\n  # Rename VGs first\n  if [[ \"${OSD_DEVICE}\" ]]; then\n    OSD_DEVICE=$(readlink -f ${OSD_DEVICE})\n    rename_vg ${OSD_DEVICE}\n  fi\n\n  # Rename block DB device VG next\n  if [[ \"${BLOCK_DB}\" ]]; then\n    BLOCK_DB=$(readlink -f ${BLOCK_DB})\n    rename_vg ${BLOCK_DB}\n  fi\n\n  # Rename block WAL device VG next\n  if [[ \"${BLOCK_WAL}\" ]]; then\n    BLOCK_WAL=$(readlink -f ${BLOCK_WAL})\n    rename_vg ${BLOCK_WAL}\n  fi\n\n  # Rename LVs after VGs are correct\n  rename_lvs ${OSD_DEVICE}\n\n  # Update tags (all VG and LV names should be correct before calling this)\n  update_lv_tags ${OSD_DEVICE}\n\n  # Settle LVM changes again after any changes have been made\n  udev_settle\n\n  # Initialize some important global variables\n  CEPH_LVM_PREPARE=1\n  OSD_ID=$(get_osd_id_from_device ${OSD_DEVICE})\n  DISK_ZAPPED=0\n  ZAP_DEVICE=0\n  ZAP_EXTRA_PARTITIONS=\"\"\n\n  # The disk may need to be zapped or some LVs may need to be deleted before\n  # moving on with the disk preparation.\n  determine_what_needs_zapping\n\n  if [[ ${ZAP_DEVICE} -eq 1 ]]; then\n    perform_zap\n  fi\n\n  # Prepare the disk for use\n  osd_disk_prepare\n\n  # Clean up resources held by the common script\n  common_cleanup\nfi\n"
  },
  {
    "path": "ceph-osd/templates/bin/utils/_checkDNS.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n: \"${CEPH_CONF:=\"/etc/ceph/${CLUSTER}.conf\"}\"\nENDPOINT=\"{$1}\"\n\nfunction check_mon_dns () {\n  GREP_CMD=$(grep -rl 'ceph-mon' ${CEPH_CONF})\n\n  if [[ \"${ENDPOINT}\" == \"{up}\" ]]; then\n    echo \"If DNS is working, we are good here\"\n  elif [[ \"${ENDPOINT}\" != \"\" ]]; then\n    if [[ ${GREP_CMD} != \"\" ]]; then\n      # No DNS, write CEPH MONs IPs into ${CEPH_CONF}\n      sh -c -e \"cat ${CEPH_CONF}.template | sed 's/mon_host.*/mon_host = ${ENDPOINT}/g' | tee ${CEPH_CONF}\" > /dev/null 2>&1\n    else\n      echo \"endpoints are already cached in ${CEPH_CONF}\"\n      exit\n    fi\n  fi\n}\n\ncheck_mon_dns\n\nexit\n"
  },
  {
    "path": "ceph-osd/templates/bin/utils/_defragOSDs.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nsource /tmp/utils-resolveLocations.sh\n\nif [ \"x${STORAGE_TYPE%-*}\" == \"xblock\" ]; then\n  OSD_DEVICE=$(readlink -f ${STORAGE_LOCATION})\n  ODEV=$(echo ${OSD_DEVICE} | sed 's/[0-9]//g' | cut -f 3 -d '/')\n  OSD_PATH=$(cat /proc/mounts | awk '/ceph-/{print $2}')\n  OSD_STORE=$(cat ${OSD_PATH}/type)\n  DATA_PART=$(cat /proc/mounts | awk '/ceph-/{print $1}')\n\n  ODEV_ROTATIONAL=$(cat /sys/block/${ODEV}/queue/rotational)\n  ODEV_SCHEDULER=$(cat /sys/block/${ODEV}/queue/scheduler | tr -d '[]')\n\n  # NOTE(supamatt): TODO implement bluestore defrag options once it's available upstream\n  if [ \"${ODEV_ROTATIONAL}\" -eq \"1\" ] && [ \"x${OSD_STORE}\" == \"xfilestore\" ]; then\n    # NOTE(supamatt): Switch to CFQ in order to not block I/O\n    echo \"cfq\" | tee /sys/block/${ODEV}/queue/scheduler || true\n    ionice -c 3 xfs_fsr \"${OSD_DEVICE}\" 2>/dev/null\n    # NOTE(supamatt): Switch back to previous IO scheduler\n    echo ${ODEV_SCHEDULER} | tee /sys/block/${ODEV}/queue/scheduler || true\n  fi\nfi\n\nexit 0\n"
  },
  {
    "path": "ceph-osd/templates/bin/utils/_resolveLocations.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nif [[ \"${STORAGE_LOCATION}\" ]]; then\n  STORAGE_LOCATION=$(ls ${STORAGE_LOCATION})\n  if [[ `echo \"${STORAGE_LOCATION}\" | wc -w` -ge 2 ]]; then\n    echo \"ERROR- Multiple locations found: ${STORAGE_LOCATION}\"\n    exit 1\n  fi\nfi\n\nif [[ \"${BLOCK_DB}\" ]]; then\n  BLOCK_DB=$(ls ${BLOCK_DB})\n  if [[ `echo \"${BLOCK_DB}\" | wc -w` -ge 2 ]]; then\n    echo \"ERROR- Multiple locations found: ${BLOCK_DB}\"\n    exit 1\n  fi\nfi\n\nif [[ \"${BLOCK_WAL}\" ]]; then\n  BLOCK_WAL=$(ls ${BLOCK_WAL})\n  if [[ `echo \"${BLOCK_WAL}\" | wc -w` -ge 2 ]]; then\n    echo \"ERROR- Multiple locations found: ${BLOCK_WAL}\"\n    exit 1\n  fi\nfi\n"
  },
  {
    "path": "ceph-osd/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.Release.Name \"bin\" | quote }}\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  post-apply.sh: |\n{{ tuple \"bin/_post-apply.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n  osd-start.sh: |\n{{ tuple \"bin/osd/_start.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n  log-tail.sh: |\n{{ tuple \"bin/osd/_log-tail.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n  osd-directory-ceph-volume.sh: |\n{{ tuple \"bin/osd/_directory.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n  osd-block-ceph-volume.sh: |\n{{ tuple \"bin/osd/ceph-volume/_block.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n  osd-bluestore-ceph-volume.sh: |\n{{ tuple \"bin/osd/ceph-volume/_bluestore.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n  osd-init-ceph-volume-helper-bluestore.sh: |\n{{ tuple \"bin/osd/ceph-volume/_init-ceph-volume-helper-bluestore.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n  osd-init-ceph-volume-helper-directory.sh: |\n{{ tuple \"bin/osd/ceph-volume/_init-ceph-volume-helper-directory.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n  osd-init-ceph-volume-helper-block-logical.sh: |\n{{ tuple \"bin/osd/ceph-volume/_init-ceph-volume-helper-block-logical.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n  osd-init-ceph-volume.sh: |\n{{ tuple \"bin/osd/ceph-volume/_init-with-ceph-volume.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n  osd-common-ceph-volume.sh: |\n{{ tuple \"bin/osd/ceph-volume/_common.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  osd-config.sh: |\n{{ tuple \"bin/osd/_config.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  osd-init.sh: |\n{{ tuple \"bin/osd/_init.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n  osd-check.sh: |\n{{ tuple \"bin/osd/_check.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  osd-stop.sh: |\n{{ tuple \"bin/osd/_stop.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n  log-runner-stop.sh: |\n{{ tuple \"bin/osd/_log-runner-stop.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n  init-dirs.sh: |\n{{ tuple \"bin/_init-dirs.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  helm-tests.sh: |\n{{ tuple \"bin/_helm-tests.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  utils-checkDNS.sh: |\n{{ tuple \"bin/utils/_checkDNS.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  utils-defragOSDs.sh: |\n{{ tuple \"bin/utils/_defragOSDs.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  utils-resolveLocations.sh: |\n{{ tuple \"bin/utils/_resolveLocations.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "ceph-osd/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"ceph.osd.configmap.etc\" }}\n{{- $configMapName := index . 0 }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n\n{{- if empty .Values.conf.ceph.global.mon_host -}}\n{{- $monHost := tuple \"ceph_mon\" \"internal\" \"mon_msgr2\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n{{- $_ := $monHost | set .Values.conf.ceph.global \"mon_host\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ceph.global.fsid -}}\n{{- $_ := uuidv4 | set .Values.conf.ceph.global \"fsid\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ceph.osd.cluster_network -}}\n{{- $_ := .Values.network.cluster | set .Values.conf.ceph.osd \"cluster_network\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ceph.osd.public_network -}}\n{{- $_ := .Values.network.public | set .Values.conf.ceph.osd \"public_network\" -}}\n{{- end -}}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ $configMapName }}\ndata:\n  ceph.conf: |\n{{ include \"helm-toolkit.utils.to_ini\" .Values.conf.ceph | indent 4 }}\n  storage.json: |\n{{ toPrettyJson .Values.conf.storage | indent 4 }}\n{{- end }}\n{{- end }}\n{{- if .Values.manifests.configmap_etc }}\n{{- list (printf \"%s-%s\" .Release.Name \"etc\") . | include \"ceph.osd.configmap.etc\" }}\n{{- end }}\n"
  },
  {
    "path": "ceph-osd/templates/daemonset-osd.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"osdLivenessProbeTemplate\" -}}\nexec:\n  command:\n    - /tmp/osd-check.sh\n{{- end -}}\n\n{{- define \"osdReadinessProbeTemplate\" -}}\nexec:\n  command:\n    - /tmp/osd-check.sh\n{{- end -}}\n\n{{- if .Values.manifests.daemonset_osd }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := (printf \"%s\" .Release.Name) }}\n{{ tuple . \"osd\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - nodes\n    verbs:\n      - get\n      - list\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ .Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $serviceAccountName }}\n  apiGroup: rbac.authorization.k8s.io\n{{- end }}\n\n{{- define \"ceph.osd.daemonset\" }}\n{{- $daemonset := index . 0 }}\n{{- $configMapName := index . 1 }}\n{{- $serviceAccountName := index . 2 }}\n{{- $envAll := index . 3 }}\n{{- with $envAll }}\n---\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n  name: ceph-osd\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"ceph\" \"osd\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"ceph\" \"osd\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"osd\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"osd\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-osd-default\" \"containerNames\" (list \"ceph-osd-default\" \"log-runner\" \"ceph-init-dirs\" \"ceph-log-ownership\" \"osd-init\" \"init\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"osd\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      nodeSelector:\n        {{ .Values.labels.osd.node_selector_key }}: {{ .Values.labels.osd.node_selector_value }}\n      hostNetwork: true\n      hostPID: true\n      hostIPC: true\n      dnsPolicy: {{ .Values.pod.dns_policy }}\n      initContainers:\n{{ tuple $envAll \"osd\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: ceph-init-dirs\n{{ tuple $envAll \"ceph_osd\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"osd\" \"container\" \"ceph_init_dirs\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/init-dirs.sh\n          env:\n          # NOTE(portdirect): These environment variables will be populated\n          # dynamicly at the point of render.\n          # - name: JOURNAL_LOCATION\n          #   value: /var/lib/openstack-helm/ceph/osd/journal-one\n          # - name: STORAGE_LOCATION\n          #   value: /var/lib/openstack-helm/ceph/osd/data-one\n          # - name: JOURNAL_TYPE\n          #   value: directory\n          # - name: STORAGE_TYPE\n          #   value: directory\n            - name: CLUSTER\n              value: \"ceph\"\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MON_PORT\n              value: {{ tuple \"ceph_mon\" \"internal\" \"mon\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: MON_PORT_V2\n              value: {{ tuple \"ceph_mon\" \"internal\" \"mon_msgr2\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-run\n              mountPath: /run\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-osd-bin\n              mountPath: /tmp/init-dirs.sh\n              subPath: init-dirs.sh\n              readOnly: true\n            - name: ceph-osd-etc\n              mountPath: /etc/ceph/storage.json\n              subPath: storage.json\n              readOnly: true\n            - name: pod-var-lib-ceph\n              mountPath: /var/lib/ceph\n              readOnly: false\n            - name: pod-var-lib-ceph-crash\n              mountPath: /var/lib/ceph/crash\n              readOnly: false\n            - name: pod-var-lib-ceph-tmp\n              mountPath: /var/lib/ceph/tmp\n              readOnly: false\n            - name: pod-var-crash\n              mountPath: /var/crash\n              mountPropagation: HostToContainer\n              readOnly: false\n        - name: ceph-log-ownership\n{{ tuple $envAll \"ceph_osd\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"osd\" \"container\" \"ceph_log_ownership\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n          # NOTE(portdirect): These environment variables will be populated\n          # dynamicly at the point of render and added to all containers in the\n          # pod\n          # - name: JOURNAL_LOCATION\n          #   value: /var/lib/openstack-helm/ceph/osd/journal-one\n          # - name: STORAGE_LOCATION\n          #   value: /var/lib/openstack-helm/ceph/osd/data-one\n          # - name: JOURNAL_TYPE\n          #   value: directory\n          # - name: STORAGE_TYPE\n          #   value: directory\n            - name: CLUSTER\n              value: \"ceph\"\n            - name: CEPH_GET_ADMIN_KEY\n              value: \"1\"\n          command:\n            - chown\n            - -R\n            - ceph:root\n            - /var/log/ceph\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-run\n              mountPath: /run\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: pod-var-log\n              mountPath: /var/log/ceph\n              readOnly: false\n        - name: osd-init\n{{ tuple $envAll \"ceph_osd\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.osd | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"osd\" \"container\" \"osd_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n          # NOTE(portdirect): These environment variables will be populated\n          # dynamicly at the point of render and added to all containers in the\n          # pod\n          # - name: JOURNAL_LOCATION\n          #   value: /var/lib/openstack-helm/ceph/osd/journal-one\n          # - name: STORAGE_LOCATION\n          #   value: /var/lib/openstack-helm/ceph/osd/data-one\n          # - name: JOURNAL_TYPE\n          #   value: directory\n          # - name: STORAGE_TYPE\n          #   value: directory\n            - name: CLUSTER\n              value: \"ceph\"\n            - name: DEPLOY_TOOL\n              value: {{ .Values.deploy.tool }}\n            - name: OSD_FORCE_REPAIR\n              value: {{ .Values.deploy.osd_force_repair | quote }}\n            - name: CEPH_GET_ADMIN_KEY\n              value: \"1\"\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MON_PORT\n              value: {{ tuple \"ceph_mon\" \"internal\" \"mon\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: MON_PORT_V2\n              value: {{ tuple \"ceph_mon\" \"internal\" \"mon_msgr2\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n          command:\n            - /tmp/osd-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-run\n              mountPath: /run\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-osd-bin\n              mountPath: /tmp/osd-init.sh\n              subPath: osd-init.sh\n              readOnly: true\n            - name: ceph-osd-bin\n              mountPath: /tmp/osd-config.sh\n              subPath: osd-config.sh\n              readOnly: true\n            - name: ceph-osd-bin\n              mountPath: /tmp/init-ceph-volume-helper-bluestore.sh\n              subPath: osd-init-ceph-volume-helper-bluestore.sh\n              readOnly: true\n            - name: ceph-osd-bin\n              mountPath: /tmp/init-ceph-volume-helper-directory.sh\n              subPath: osd-init-ceph-volume-helper-directory.sh\n              readOnly: true\n            - name: ceph-osd-bin\n              mountPath: /tmp/init-ceph-volume-helper-block-logical.sh\n              subPath: osd-init-ceph-volume-helper-block-logical.sh\n              readOnly: true\n            - name: ceph-osd-bin\n              mountPath: /tmp/init-ceph-volume.sh\n              subPath: osd-init-ceph-volume.sh\n              readOnly: true\n            - name: ceph-osd-bin\n              mountPath: /tmp/osd-common-ceph-volume.sh\n              subPath: osd-common-ceph-volume.sh\n              readOnly: true\n            - name: ceph-osd-bin\n              mountPath: /tmp/utils-resolveLocations.sh\n              subPath: utils-resolveLocations.sh\n              readOnly: true\n            - name: ceph-osd-etc\n              mountPath: /etc/ceph/ceph.conf.template\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-osd-etc\n              mountPath: /etc/ceph/storage.json\n              subPath: storage.json\n              readOnly: true\n            - name: ceph-bootstrap-osd-keyring\n              mountPath: /var/lib/ceph/bootstrap-osd/ceph.keyring\n              subPath: ceph.keyring\n              readOnly: false\n            - name: devices\n              mountPath: /dev\n              readOnly: false\n            - name: pod-var-lib-ceph\n              mountPath: /var/lib/ceph\n              readOnly: false\n            - name: pod-var-lib-ceph-crash\n              mountPath: /var/lib/ceph/crash\n              readOnly: false\n            - name: pod-var-lib-ceph-tmp\n              mountPath: /var/lib/ceph/tmp\n              readOnly: false\n            - name: run-lvm\n              mountPath: /run/lvm\n              readOnly: false\n            - name: run-udev\n              mountPath: /run/udev\n              readOnly: false\n            - name: pod-etc-lvm\n              mountPath: /etc/lvm\n              readOnly: false\n            - name: data\n              mountPath: /var/lib/ceph/osd\n              readOnly: false\n            - name: journal\n              mountPath: /var/lib/ceph/journal\n              readOnly: false\n            - name: pod-var-log\n              mountPath: /var/log/ceph\n              readOnly: false\n            - name: pod-var-crash\n              mountPath: /var/crash\n              mountPropagation: HostToContainer\n              readOnly: false\n      containers:\n        - name: log-runner\n{{ tuple $envAll \"ceph_osd\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"osd\" \"container\" \"log_runner\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: DAEMON_NAME\n              value: \"ceph-osd\"\n            - name: TRUNCATE_SIZE\n              value: {{ .Values.logging.truncate.size | quote }}\n            - name: TRUNCATE_PERIOD\n              value: {{ .Values.logging.truncate.period | quote }}\n            - name: WAIT_FOR_OSD_ID_TIMEOUT\n              value: {{ .Values.logging.osd_id.timeout | quote }}\n          command:\n            - /tmp/log-tail.sh\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/log-runner-stop.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ceph-osd-bin\n              mountPath: /tmp/log-tail.sh\n              subPath: log-tail.sh\n              readOnly: true\n            - name: pod-var-log\n              mountPath: /var/log/ceph\n              readOnly: false\n            - name: ceph-osd-bin\n              mountPath: /tmp/log-runner-stop.sh\n              subPath: log-runner-stop.sh\n              readOnly: true\n        - name: ceph-osd-default\n{{ tuple $envAll \"ceph_osd\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.osd | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"osd\" \"container\" \"osd_pod\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n          # NOTE(portdirect): These environment variables will be populated\n          # dynamicly at the point of render.\n          # - name: JOURNAL_LOCATION\n          #   value: /var/lib/openstack-helm/ceph/osd/journal-one\n          # - name: STORAGE_LOCATION\n          #   value: /var/lib/openstack-helm/ceph/osd/data-one\n          # - name: JOURNAL_TYPE\n          #   value: directory\n          # - name: STORAGE_TYPE\n          #   value: directory\n            - name: CLUSTER\n              value: \"ceph\"\n            - name: DEPLOY_TOOL\n              value: {{ .Values.deploy.tool }}\n            - name: CEPH_GET_ADMIN_KEY\n              value: \"1\"\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MON_PORT\n              value: {{ tuple \"ceph_mon\" \"internal\" \"mon\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: MON_PORT_V2\n              value: {{ tuple \"ceph_mon\" \"internal\" \"mon_msgr2\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n          command:\n            - /tmp/osd-start.sh\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/osd-stop.sh\n{{ dict \"envAll\" . \"component\" \"ceph-osd\" \"container\" \"ceph-osd\" \"type\" \"liveness\" \"probeTemplate\" (include \"osdLivenessProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | trim | indent 10 }}\n{{ dict \"envAll\" . \"component\" \"ceph-osd\" \"container\" \"ceph-osd\" \"type\" \"readiness\" \"probeTemplate\" (include \"osdReadinessProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | trim | indent 10 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-run\n              mountPath: /run\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: pod-forego\n              mountPath: /etc/forego\n            - name: ceph-osd-bin\n              mountPath: /tmp/osd-start.sh\n              subPath: osd-start.sh\n              readOnly: true\n            - name: ceph-osd-bin\n              mountPath: /tmp/osd-directory-ceph-volume.sh\n              subPath: osd-directory-ceph-volume.sh\n              readOnly: true\n            - name: ceph-osd-bin\n              mountPath: /tmp/osd-block-ceph-volume.sh\n              subPath: osd-block-ceph-volume.sh\n              readOnly: true\n            - name: ceph-osd-bin\n              mountPath: /tmp/osd-bluestore-ceph-volume.sh\n              subPath: osd-bluestore-ceph-volume.sh\n              readOnly: true\n            - name: ceph-osd-bin\n              mountPath: /tmp/osd-check.sh\n              subPath: osd-check.sh\n              readOnly: true\n            - name: ceph-osd-bin\n              mountPath: /tmp/osd-stop.sh\n              subPath: osd-stop.sh\n              readOnly: true\n            - name: ceph-osd-bin\n              mountPath: /tmp/utils-checkDNS.sh\n              subPath: utils-checkDNS.sh\n              readOnly: true\n            - name: ceph-osd-bin\n              mountPath: /tmp/osd-common-ceph-volume.sh\n              subPath: osd-common-ceph-volume.sh\n              readOnly: true\n            - name: ceph-osd-bin\n              mountPath: /tmp/utils-resolveLocations.sh\n              subPath: utils-resolveLocations.sh\n              readOnly: true\n            - name: ceph-osd-bin\n              mountPath: /tmp/utils-defragOSDs.sh\n              subPath: utils-defragOSDs.sh\n              readOnly: true\n            - name: ceph-osd-etc\n              mountPath: /etc/ceph/storage.json\n              subPath: storage.json\n              readOnly: true\n            - name: ceph-osd-etc\n              mountPath: /etc/ceph/ceph.conf.template\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-bootstrap-osd-keyring\n              mountPath: /var/lib/ceph/bootstrap-osd/ceph.keyring\n              subPath: ceph.keyring\n              readOnly: false\n            - name: devices\n              mountPath: /dev\n              readOnly: false\n            - name: pod-var-lib-ceph\n              mountPath: /var/lib/ceph\n              readOnly: false\n            - name: pod-var-lib-ceph-crash\n              mountPath: /var/lib/ceph/crash\n              readOnly: false\n            - name: pod-var-lib-ceph-tmp\n              mountPath: /var/lib/ceph/tmp\n              readOnly: false\n            - name: run-lvm\n              mountPath: /run/lvm\n              readOnly: false\n            - name: run-udev\n              mountPath: /run/udev\n              readOnly: false\n            - name: pod-etc-lvm\n              mountPath: /etc/lvm\n              readOnly: false\n            - name: data\n              mountPath: /var/lib/ceph/osd\n              readOnly: false\n            - name: journal\n              mountPath: /var/lib/ceph/journal\n              readOnly: false\n            - name: pod-var-log\n              mountPath: /var/log/ceph\n              readOnly: false\n            - name: pod-var-crash\n              mountPath: /var/crash\n              mountPropagation: HostToContainer\n              readOnly: false\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-run\n          emptyDir:\n            medium: \"Memory\"\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: pod-forego\n          emptyDir: {}\n        - name: devices\n          hostPath:\n            path: /dev\n        - name: run-lvm\n          hostPath:\n            path: /run/lvm\n        - name: run-udev\n          hostPath:\n            path: /run/udev\n        - name: pod-etc-lvm\n          emptyDir: {}\n        - name: pod-var-lib-ceph\n          emptyDir: {}\n        - name: pod-var-lib-ceph-crash\n          hostPath:\n            path: /var/lib/openstack-helm/ceph/crash\n            type: DirectoryOrCreate\n        - name: pod-var-lib-ceph-tmp\n          hostPath:\n            path: /var/lib/openstack-helm/ceph/var-tmp\n            type: DirectoryOrCreate\n        - name: pod-var-crash\n          hostPath:\n            path: /var/crash\n            type: DirectoryOrCreate\n        - name: pod-var-log\n          emptyDir: {}\n        - name: ceph-osd-bin\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"bin\" | quote }}\n            defaultMode: 0555\n        - name: ceph-osd-etc\n          configMap:\n            name: {{ $configMapName }}\n            defaultMode: 0444\n        - name: ceph-bootstrap-osd-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.osd }}\n      # NOTE(portdirect): If directory mounts are to be used for OSD's\n      # they will automaticly be inserted here, with the format:\n      # - name: data\n      #   hostPath:\n      #     path: /var/lib/foo\n      # - name: journal\n      #   hostPath:\n      #     path: /var/lib/bar\n\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.daemonset_osd }}\n{{- $daemonset := .Values.daemonset.prefix_name }}\n{{- $configMapName := (printf \"%s-%s\" .Release.Name \"etc\") }}\n{{- $serviceAccountName := (printf \"%s\" .Release.Name) }}\n{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include \"ceph.osd.daemonset\" | toString | fromYaml }}\n{{- $configmap_yaml := \"ceph.osd.configmap.etc\" }}\n{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include \"ceph.utils.osd_daemonset_overrides\" }}\n{{- end }}\n"
  },
  {
    "path": "ceph-osd/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "ceph-osd/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"ceph-osd-bootstrap\" }}\n{{ tuple $envAll \"bootstrap\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: ceph-osd-bootstrap\n  labels:\n{{ tuple $envAll \"ceph\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"bootstrap\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n      containers:\n        - name: ceph-osd-bootstrap\n{{ tuple $envAll \"ceph_bootstrap\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"bootstrap\" \"container\" \"ceph_osd_bootstrap\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/bootstrap.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-osd-bin\n              mountPath: /tmp/bootstrap.sh\n              subPath: bootstrap.sh\n              readOnly: true\n            - name: ceph-osd-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-osd-admin-keyring\n              mountPath: /etc/ceph/ceph.client.admin.keyring\n              subPath: ceph.client.admin.keyring\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: ceph-osd-bin\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"bin\" | quote }}\n            defaultMode: 0555\n        - name: ceph-osd-etc\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"etc\" | quote }}\n            defaultMode: 0444\n        - name: ceph-osd-admin-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.admin }}\n{{- end }}\n"
  },
  {
    "path": "ceph-osd/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"ceph-osd\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "ceph-osd/templates/job-post-apply.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if eq .Values.pod.lifecycle.upgrades.daemonsets.pod_replacement_strategy \"OnDelete\" }}\n{{- if and .Values.manifests.job_post_apply }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := printf \"%s-%s\" .Release.Name \"post-apply\" }}\n{{ tuple $envAll \"post-apply\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - ''\n    resources:\n      - pods\n      - events\n      - jobs\n      - pods/exec\n    verbs:\n      - create\n      - get\n      - delete\n      - list\n  - apiGroups:\n      - 'apps'\n    resources:\n      - daemonsets\n    verbs:\n      - get\n      - list\n  - apiGroups:\n      - 'batch'\n    resources:\n      - jobs\n    verbs:\n      - get\n      - list\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $serviceAccountName }}\n  apiGroup: rbac.authorization.k8s.io\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ $serviceAccountName }}\n  labels:\n{{ tuple $envAll \"ceph-upgrade\" \"post-apply\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph-upgrade\" \"post-apply\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-osd-post-apply\" \"containerNames\" (list \"ceph-osd-post-apply\" \"init\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"post_apply\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"post-apply\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n      containers:\n        - name: ceph-osd-post-apply\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"post_apply\" \"container\" \"ceph_osd_post_apply\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: CLUSTER\n              value: \"ceph\"\n            - name: CEPH_NAMESPACE\n              value: {{ .Release.Namespace }}\n            - name: RELEASE_GROUP_NAME\n              value: {{ .Release.Name }}\n            - name: REQUIRED_PERCENT_OF_OSDS\n              value: {{ .Values.conf.ceph.target.required_percent_of_osds | ceil | quote }}\n            - name: DISRUPTIVE_OSD_RESTART\n              value: {{ .Values.conf.storage.disruptive_osd_restart | quote }}\n            - name: UNCONDITIONAL_OSD_RESTART\n              value: {{ .Values.conf.storage.unconditional_osd_restart | quote }}\n          command:\n            - /tmp/post-apply.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-osd-bin\n              mountPath: /tmp/post-apply.sh\n              subPath: post-apply.sh\n              readOnly: true\n            - name: ceph-osd-bin\n              mountPath: /tmp/wait-for-pods.sh\n              subPath: wait-for-pods.sh\n              readOnly: true\n            - name: ceph-osd-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-osd-admin-keyring\n              mountPath: /etc/ceph/ceph.client.admin.keyring\n              subPath: ceph.client.admin.keyring\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: ceph-osd-bin\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"bin\" | quote }}\n            defaultMode: 0555\n        - name: ceph-osd-etc\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"etc\" | quote }}\n            defaultMode: 0444\n        - name: ceph-osd-admin-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.admin }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "ceph-osd/templates/pod-helm-tests.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.helm_tests }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := printf \"%s-%s\" $envAll.Release.Name \"test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: {{ $serviceAccountName }}\n  labels:\n{{ tuple $envAll \"ceph-osd\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-osd-test\" \"containerNames\" (list \"init\" \"ceph-cluster-helm-test\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n{{ dict \"envAll\" $envAll \"application\" \"test\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 2 }}\n  restartPolicy: Never\n  serviceAccountName: {{ $serviceAccountName }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n  initContainers:\n{{ tuple $envAll \"tests\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n  containers:\n    - name: ceph-cluster-helm-test\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" \"container\" \"ceph_cluster_helm_test\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      env:\n        - name: CLUSTER\n          value: \"ceph\"\n        - name: CEPH_DEPLOYMENT_NAMESPACE\n          value: {{ .Release.Namespace }}\n        - name: REQUIRED_PERCENT_OF_OSDS\n          value: {{ .Values.conf.ceph.target.required_percent_of_osds | ceil | quote }}\n      command:\n        - /tmp/helm-tests.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: pod-etc-ceph\n          mountPath: /etc/ceph\n        - name: ceph-osd-bin\n          mountPath: /tmp/helm-tests.sh\n          subPath: helm-tests.sh\n          readOnly: true\n        - name: ceph-client-admin-keyring\n          mountPath: /etc/ceph/ceph.client.admin.keyring\n          subPath: ceph.client.admin.keyring\n          readOnly: true\n        - name: ceph-osd-etc\n          mountPath: /etc/ceph/ceph.conf\n          subPath: ceph.conf\n          readOnly: true\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: pod-etc-ceph\n      emptyDir: {}\n    - name: ceph-osd-bin\n      configMap:\n        name: {{ printf \"%s-%s\" $envAll.Release.Name \"bin\" | quote }}\n        defaultMode: 0555\n    - name: ceph-client-admin-keyring\n      secret:\n        secretName: {{ .Values.secrets.keyrings.admin }}\n    - name: ceph-osd-etc\n      configMap:\n        name: {{ printf \"%s-%s\" $envAll.Release.Name \"etc\" | quote }}\n        defaultMode: 0444\n{{- end }}\n"
  },
  {
    "path": "ceph-osd/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "ceph-osd/templates/utils/_osd_daemonset_overrides.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"ceph.utils.match_exprs_hash\" }}\n  {{- $match_exprs := index . 0 }}\n  {{- $context := index . 1 }}\n  {{- $_ := set $context.Values \"__match_exprs_hash_content\" \"\" }}\n  {{- range $match_expr := $match_exprs }}\n    {{- $_ := set $context.Values \"__match_exprs_hash_content\" (print $context.Values.__match_exprs_hash_content $match_expr.key $match_expr.operator ($match_expr.values | quote)) }}\n  {{- end }}\n  {{- $context.Values.__match_exprs_hash_content | sha256sum | trunc 8 }}\n  {{- $_ := unset $context.Values \"__match_exprs_hash_content\" }}\n{{- end }}\n\n{{- define \"ceph.utils.osd_daemonset_overrides\" }}\n  {{- $daemonset := index . 0 }}\n  {{- $daemonset_yaml := index . 1 }}\n  {{- $configmap_include := index . 2 }}\n  {{- $configmap_name := index . 3 }}\n  {{- $context := index . 4 }}\n  {{- $_ := unset $context \".Files\" }}\n  {{- $_ := set $context.Values \"__daemonset_yaml\" $daemonset_yaml }}\n  {{- $daemonset_root_name := printf \"ceph_%s\" $daemonset }}\n  {{- $_ := set $context.Values \"__daemonset_list\" list }}\n  {{- $_ := set $context.Values \"__default\" dict }}\n  {{- if hasKey $context.Values.conf \"overrides\" }}\n    {{- range $key, $val := $context.Values.conf.overrides }}\n\n      {{- if eq $key $daemonset_root_name }}\n        {{- range $type, $type_data := . }}\n\n          {{- if eq $type \"hosts\" }}\n            {{- range $host_data := . }}\n              {{/* dictionary that will contain all info needed to generate this\n              iteration of the daemonset */}}\n              {{- $current_dict := dict }}\n\n              {{/* set daemonset name */}}\n              {{- $_ := set $current_dict \"name\" $host_data.name }}\n\n              {{/* apply overrides */}}\n              {{- $override_conf_copy := $host_data.conf }}\n              {{/* Deep copy to prevent https://storyboard.openstack.org/#!/story/2005936 */}}\n              {{- $root_conf_copy := omit ($context.Values.conf | toYaml | fromYaml) \"overrides\" }}\n              {{- $merged_dict := mergeOverwrite $root_conf_copy $override_conf_copy }}\n              {{- $root_conf_copy2 := dict \"conf\" $merged_dict }}\n              {{- $context_values := omit (omit ($context.Values | toYaml | fromYaml) \"conf\") \"__daemonset_list\" }}\n              {{- $root_conf_copy3 := mergeOverwrite $context_values $root_conf_copy2 }}\n              {{- $root_conf_copy4 := dict \"Values\" $root_conf_copy3 }}\n              {{- $_ := set $current_dict \"nodeData\" $root_conf_copy4 }}\n\n              {{/* Schedule to this host explicitly. */}}\n              {{- $nodeSelector_dict := dict }}\n\n              {{- $_ := set $nodeSelector_dict \"key\" \"kubernetes.io/hostname\" }}\n              {{- $_ := set $nodeSelector_dict \"operator\" \"In\" }}\n\n              {{- $values_list := list $host_data.name }}\n              {{- $_ := set $nodeSelector_dict \"values\" $values_list }}\n\n              {{- $list_aggregate := list $nodeSelector_dict }}\n              {{- $_ := set $current_dict \"matchExpressions\" $list_aggregate }}\n\n              {{/* store completed daemonset entry/info into global list */}}\n              {{- $list_aggregate := append $context.Values.__daemonset_list $current_dict }}\n              {{- $_ := set $context.Values \"__daemonset_list\" $list_aggregate }}\n\n            {{- end }}\n          {{- end }}\n\n          {{- if eq $type \"labels\" }}\n            {{- $_ := set $context.Values \"__label_list\" . }}\n            {{- range $label_data := . }}\n              {{/* dictionary that will contain all info needed to generate this\n              iteration of the daemonset. */}}\n              {{- $_ := set $context.Values \"__current_label\" dict }}\n\n              {{/* set daemonset name */}}\n              {{- $_ := set $context.Values.__current_label \"name\" $label_data.label.key }}\n\n              {{/* apply overrides */}}\n              {{- $override_conf_copy := $label_data.conf }}\n              {{/* Deep copy to prevent https://storyboard.openstack.org/#!/story/2005936 */}}\n              {{- $root_conf_copy := omit ($context.Values.conf | toYaml | fromYaml) \"overrides\" }}\n              {{- $merged_dict := mergeOverwrite $root_conf_copy $override_conf_copy }}\n              {{- $root_conf_copy2 := dict \"conf\" $merged_dict }}\n              {{- $context_values := omit (omit ($context.Values | toYaml | fromYaml) \"conf\") \"__daemonset_list\" }}\n              {{- $root_conf_copy3 := mergeOverwrite $context_values $root_conf_copy2 }}\n              {{- $root_conf_copy4 := dict \"Values\" $root_conf_copy3 }}\n              {{- $_ := set $context.Values.__current_label \"nodeData\" $root_conf_copy4 }}\n\n              {{/* Schedule to the provided label value(s) */}}\n              {{- $label_dict := omit $label_data.label \"NULL\" }}\n              {{- $_ := set $label_dict \"operator\" \"In\" }}\n              {{- $list_aggregate := list $label_dict }}\n              {{- $_ := set $context.Values.__current_label \"matchExpressions\" $list_aggregate }}\n\n              {{/* Do not schedule to other specified labels, with higher\n              precedence as the list position increases. Last defined label\n              is highest priority. */}}\n              {{- $other_labels := without $context.Values.__label_list $label_data }}\n              {{- range $label_data2 := $other_labels }}\n                {{- $label_dict := omit $label_data2.label \"NULL\" }}\n\n                {{- $_ := set $label_dict \"operator\" \"NotIn\" }}\n\n                {{- $list_aggregate := append $context.Values.__current_label.matchExpressions $label_dict }}\n                {{- $_ := set $context.Values.__current_label \"matchExpressions\" $list_aggregate }}\n              {{- end }}\n              {{- $_ := set $context.Values \"__label_list\" $other_labels }}\n\n              {{/* Do not schedule to any other specified hosts */}}\n              {{- range $type, $type_data := $val }}\n                {{- if eq $type \"hosts\" }}\n                  {{- range $host_data := . }}\n                    {{- $label_dict := dict }}\n\n                    {{- $_ := set $label_dict \"key\" \"kubernetes.io/hostname\" }}\n                    {{- $_ := set $label_dict \"operator\" \"NotIn\" }}\n\n                    {{- $values_list := list $host_data.name }}\n                    {{- $_ := set $label_dict \"values\" $values_list }}\n\n                    {{- $list_aggregate := append $context.Values.__current_label.matchExpressions $label_dict }}\n                    {{- $_ := set $context.Values.__current_label \"matchExpressions\" $list_aggregate }}\n                  {{- end }}\n                {{- end }}\n              {{- end }}\n\n              {{/* store completed daemonset entry/info into global list */}}\n              {{- $list_aggregate := append $context.Values.__daemonset_list $context.Values.__current_label }}\n              {{- $_ := set $context.Values \"__daemonset_list\" $list_aggregate }}\n              {{- $_ := unset $context.Values \"__current_label\" }}\n\n            {{- end }}\n          {{- end }}\n        {{- end }}\n\n        {{/* scheduler exceptions for the default daemonset */}}\n        {{- $_ := set $context.Values.__default \"matchExpressions\" list }}\n\n        {{- range $type, $type_data := . }}\n          {{/* Do not schedule to other specified labels */}}\n          {{- if eq $type \"labels\" }}\n            {{- range $label_data := . }}\n              {{- $default_dict := omit $label_data.label \"NULL\" }}\n\n              {{- $_ := set $default_dict \"operator\" \"NotIn\" }}\n\n              {{- $list_aggregate := append $context.Values.__default.matchExpressions $default_dict }}\n              {{- $_ := set $context.Values.__default \"matchExpressions\" $list_aggregate }}\n            {{- end }}\n          {{- end }}\n          {{/* Do not schedule to other specified hosts */}}\n          {{- if eq $type \"hosts\" }}\n            {{- range $host_data := . }}\n              {{- $default_dict := dict }}\n\n              {{- $_ := set $default_dict \"key\" \"kubernetes.io/hostname\" }}\n              {{- $_ := set $default_dict \"operator\" \"NotIn\" }}\n\n              {{- $values_list := list $host_data.name }}\n              {{- $_ := set $default_dict \"values\" $values_list }}\n\n              {{- $list_aggregate := append $context.Values.__default.matchExpressions $default_dict }}\n              {{- $_ := set $context.Values.__default \"matchExpressions\" $list_aggregate }}\n            {{- end }}\n          {{- end }}\n        {{- end }}\n      {{- end }}\n    {{- end }}\n  {{- end }}\n\n  {{/* generate the default daemonset */}}\n\n  {{/* set name */}}\n  {{- $_ := set $context.Values.__default \"name\" \"default\" }}\n\n  {{/* no overrides apply, so copy as-is */}}\n  {{- $root_conf_copy1 := omit $context.Values.conf \"overrides\" }}\n  {{- $root_conf_copy2 := dict \"conf\" $root_conf_copy1 }}\n  {{- $context_values := omit $context.Values \"conf\" }}\n  {{- $root_conf_copy3 := mergeOverwrite $context_values $root_conf_copy2 }}\n  {{- $root_conf_copy4 := dict \"Values\" $root_conf_copy3 }}\n  {{- $_ := set $context.Values.__default \"nodeData\" $root_conf_copy4 }}\n\n  {{/* add to global list */}}\n  {{- $list_aggregate := append $context.Values.__daemonset_list $context.Values.__default }}\n  {{- $_ := set $context.Values \"__daemonset_list\" $list_aggregate }}\n\n  {{- $_ := set $context.Values \"__last_configmap_name\" $configmap_name }}\n  {{- range $current_dict := $context.Values.__daemonset_list }}\n\n    {{- $context_novalues := omit $context \"Values\" }}\n    {{- $merged_dict := mergeOverwrite $context_novalues $current_dict.nodeData }}\n    {{- $_ := set $current_dict \"nodeData\" $merged_dict }}\n\n    {{/* name needs to be a DNS-1123 compliant name. Ensure lower case */}}\n    {{- $name_format1 := printf (print $daemonset_root_name \"-\" $current_dict.name) | lower }}\n    {{/* labels may contain underscores which would be invalid here, so we replace them with dashes\n    there may be other valid label names which would make for an invalid DNS-1123 name\n    but these will be easier to handle in future with sprig regex* functions\n    (not availabile in helm 2.5.1) */}}\n    {{- $name_format2 := $name_format1 | replace \"_\" \"-\" | replace \".\" \"-\" }}\n    {{/* To account for the case where the same label is defined multiple times in overrides\n    (but with different label values), we add a sha of the scheduling data to ensure\n    name uniqueness */}}\n    {{- $_ := set $current_dict \"dns_1123_name\" dict }}\n    {{- if hasKey $current_dict \"matchExpressions\" }}\n      {{- $_ := set $current_dict \"dns_1123_name\" (printf (print $name_format2 \"-\" (list $current_dict.matchExpressions $context | include \"ceph.utils.match_exprs_hash\"))) }}\n    {{- else }}\n      {{- $_ := set $current_dict \"dns_1123_name\" $name_format2 }}\n    {{- end }}\n\n    {{/* set daemonset metadata name */}}\n    {{- if not $context.Values.__daemonset_yaml.metadata }}{{- $_ := set $context.Values.__daemonset_yaml \"metadata\" dict }}{{- end }}\n    {{- if not $context.Values.__daemonset_yaml.metadata.name }}{{- $_ := set $context.Values.__daemonset_yaml.metadata \"name\" dict }}{{- end }}\n    {{- $_ := set $context.Values.__daemonset_yaml.metadata \"name\" $current_dict.dns_1123_name }}\n\n    {{/* cross-reference configmap name to container volume definitions */}}\n    {{- $_ := set $context.Values \"__volume_list\" list }}\n    {{- range $current_volume := $context.Values.__daemonset_yaml.spec.template.spec.volumes }}\n      {{- $_ := set $context.Values \"__volume\" $current_volume }}\n      {{- if hasKey $context.Values.__volume \"configMap\" }}\n        {{- if eq $context.Values.__volume.configMap.name $context.Values.__last_configmap_name }}\n          {{- $_ := set $context.Values.__volume.configMap \"name\" $current_dict.dns_1123_name }}\n        {{- end }}\n      {{- end }}\n      {{- $updated_list := append $context.Values.__volume_list $context.Values.__volume }}\n      {{- $_ := set $context.Values \"__volume_list\" $updated_list }}\n    {{- end }}\n    {{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec \"volumes\" $context.Values.__volume_list }}\n\n    {{/* populate scheduling restrictions */}}\n    {{- if hasKey $current_dict \"matchExpressions\" }}\n      {{- if not $context.Values.__daemonset_yaml.spec.template.spec }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template \"spec\" dict }}{{- end }}\n      {{- if not $context.Values.__daemonset_yaml.spec.template.spec.affinity }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec \"affinity\" dict }}{{- end }}\n      {{- if not $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec.affinity \"nodeAffinity\" dict }}{{- end }}\n      {{- if not $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity \"requiredDuringSchedulingIgnoredDuringExecution\" dict }}{{- end }}\n      {{- $match_exprs := dict }}\n      {{- $_ := set $match_exprs \"matchExpressions\" $current_dict.matchExpressions }}\n      {{- $appended_match_expr := list $match_exprs }}\n      {{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution \"nodeSelectorTerms\" $appended_match_expr }}\n    {{- end }}\n\n    {{/* input value hash for current set of values overrides */}}\n    {{- if not $context.Values.__daemonset_yaml.spec }}{{- $_ := set $context.Values.__daemonset_yaml \"spec\" dict }}{{- end }}\n    {{- if not $context.Values.__daemonset_yaml.spec.template }}{{- $_ := set $context.Values.__daemonset_yaml.spec \"template\" dict }}{{- end }}\n    {{- if not $context.Values.__daemonset_yaml.spec.template.metadata }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template \"metadata\" dict }}{{- end }}\n    {{- if not $context.Values.__daemonset_yaml.spec.template.metadata.annotations }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.metadata \"annotations\" dict }}{{- end }}\n    {{- $cmap := list $current_dict.dns_1123_name $current_dict.nodeData | include $configmap_include }}\n    {{- $values_hash := $cmap | quote | sha256sum }}\n    {{- $_ := set $context.Values.__daemonset_yaml.spec.template.metadata.annotations \"configmap-etc-hash\" $values_hash }}\n\n    {{/* generate configmap */}}\n---\n{{ $cmap }}\n\n    {{/* generate daemonset yaml */}}\n{{ range $k, $v := index $current_dict.nodeData.Values.conf.storage \"osd\" }}\n---\n{{- $_ := set $context.Values \"__tmpYAML\" dict }}\n\n{{ $dsNodeName := index $context.Values.__daemonset_yaml.metadata \"name\" }}\n{{ $localDsNodeName := print (trunc 54 $current_dict.dns_1123_name) \"-\" (print $dsNodeName $k | quote | sha256sum | trunc 8) }}\n{{- if not $context.Values.__tmpYAML.metadata }}{{- $_ := set $context.Values.__tmpYAML \"metadata\" dict }}{{- end }}\n{{- $_ := set $context.Values.__tmpYAML.metadata \"name\" $localDsNodeName }}\n\n{{ $podDataVols := index $context.Values.__daemonset_yaml.spec.template.spec \"volumes\" }}\n{{- $_ := set $context.Values \"__tmpPodVols\" $podDataVols }}\n\n  {{ if eq $v.data.type \"directory\" }}\n    {{ $dataDirVolume := dict \"hostPath\" (dict \"path\" $v.data.location) \"name\" \"data\" }}\n    {{ $newPodDataVols := append $context.Values.__tmpPodVols $dataDirVolume }}\n    {{- $_ := set $context.Values \"__tmpPodVols\" $newPodDataVols }}\n  {{ else }}\n    {{ $dataDirVolume := dict \"emptyDir\" dict \"name\" \"data\" }}\n    {{ $newPodDataVols := append $context.Values.__tmpPodVols $dataDirVolume }}\n    {{- $_ := set $context.Values \"__tmpPodVols\" $newPodDataVols }}\n  {{ end }}\n\n  {{- if ne $v.data.type \"bluestore\" }}\n  {{ if eq $v.journal.type \"directory\" }}\n    {{ $journalDirVolume := dict \"hostPath\" (dict \"path\" $v.journal.location) \"name\" \"journal\" }}\n    {{ $newPodDataVols := append $context.Values.__tmpPodVols $journalDirVolume }}\n    {{- $_ := set $context.Values \"__tmpPodVols\" $newPodDataVols }}\n  {{ else }}\n    {{ $dataDirVolume := dict \"emptyDir\" dict \"name\" \"journal\" }}\n    {{ $newPodDataVols := append $context.Values.__tmpPodVols $dataDirVolume }}\n    {{- $_ := set $context.Values \"__tmpPodVols\" $newPodDataVols }}\n  {{ end }}\n  {{ else }}\n    {{ $dataDirVolume := dict \"emptyDir\" dict \"name\" \"journal\" }}\n    {{ $newPodDataVols := append $context.Values.__tmpPodVols $dataDirVolume }}\n    {{- $_ := set $context.Values \"__tmpPodVols\" $newPodDataVols }}\n  {{- end }}\n\n  {{- if not $context.Values.__tmpYAML.spec }}{{- $_ := set $context.Values.__tmpYAML \"spec\" dict }}{{- end }}\n  {{- if not $context.Values.__tmpYAML.spec.template }}{{- $_ := set $context.Values.__tmpYAML.spec \"template\" dict }}{{- end }}\n  {{- if not $context.Values.__tmpYAML.spec.template.spec }}{{- $_ := set $context.Values.__tmpYAML.spec.template \"spec\" dict }}{{- end }}\n  {{- $_ := set $context.Values.__tmpYAML.spec.template.spec \"volumes\" $context.Values.__tmpPodVols }}\n\n  {{- if not $context.Values.__tmpYAML.spec }}{{- $_ := set $context.Values.__tmpYAML \"spec\" dict }}{{- end }}\n  {{- if not $context.Values.__tmpYAML.spec.template }}{{- $_ := set $context.Values.__tmpYAML.spec \"template\" dict }}{{- end }}\n  {{- if not $context.Values.__tmpYAML.spec.template.spec }}{{- $_ := set $context.Values.__tmpYAML.spec.template \"spec\" dict }}{{- end }}\n  {{- if not $context.Values.__tmpYAML.spec.template.spec.containers }}{{- $_ := set $context.Values.__tmpYAML.spec.template.spec \"containers\" list }}{{- end }}\n  {{- if not $context.Values.__tmpYAML.spec.template.spec.initContainers }}{{- $_ := set $context.Values.__tmpYAML.spec.template.spec \"initContainers\" list }}{{- end }}\n\n  {{- $_ := set $context.Values \"__tmpYAMLcontainers\" list }}\n  {{- range $podContainer := $context.Values.__daemonset_yaml.spec.template.spec.containers }}\n    {{- $_ := set $context.Values \"_tmpYAMLcontainer\" $podContainer }}\n    {{- if empty $context.Values._tmpYAMLcontainer.env }}\n    {{- $_ := set $context.Values._tmpYAMLcontainer \"env\" ( list ) }}\n    {{- end }}\n    {{- $tmpcontainerEnv := omit $context.Values._tmpYAMLcontainer \"env\" }}\n    {{- if eq $v.data.type \"bluestore\" }}\n    {{- if and $v.block_db $v.block_wal}}\n    {{ $containerEnv := prepend (prepend (prepend ( prepend ( prepend ( prepend (index $context.Values._tmpYAMLcontainer \"env\") (dict \"name\" \"STORAGE_TYPE\" \"value\" $v.data.type)) (dict \"name\" \"STORAGE_LOCATION\" \"value\" $v.data.location)) (dict \"name\" \"BLOCK_DB\" \"value\" $v.block_db.location)) (dict \"name\" \"BLOCK_DB_SIZE\" \"value\" $v.block_db.size)) (dict \"name\" \"BLOCK_WAL\" \"value\" $v.block_wal.location)) (dict \"name\" \"BLOCK_WAL_SIZE\" \"value\" $v.block_wal.size) }}\n    {{- $_ := set $tmpcontainerEnv \"env\" $containerEnv }}\n    {{- else if $v.block_db }}\n    {{ $containerEnv := prepend (prepend ( prepend ( prepend (index $context.Values._tmpYAMLcontainer \"env\") (dict \"name\" \"STORAGE_TYPE\" \"value\" $v.data.type)) (dict \"name\" \"STORAGE_LOCATION\" \"value\" $v.data.location)) (dict \"name\" \"BLOCK_DB\" \"value\" $v.block_db.location)) (dict \"name\" \"BLOCK_DB_SIZE\" \"value\" $v.block_db.size) }}\n    {{- $_ := set $tmpcontainerEnv \"env\" $containerEnv }}\n    {{- else if $v.block_wal }}\n    {{ $containerEnv := prepend (prepend ( prepend ( prepend (index $context.Values._tmpYAMLcontainer \"env\") (dict \"name\" \"STORAGE_TYPE\" \"value\" $v.data.type)) (dict \"name\" \"STORAGE_LOCATION\" \"value\" $v.data.location)) (dict \"name\" \"BLOCK_WAL\" \"value\" $v.block_wal.location)) (dict \"name\" \"BLOCK_WAL_SIZE\" \"value\" $v.block_wal.size) }}\n    {{- $_ := set $tmpcontainerEnv \"env\" $containerEnv }}\n    {{ else }}\n    {{ $containerEnv := prepend (prepend (index $context.Values._tmpYAMLcontainer \"env\") (dict \"name\" \"STORAGE_TYPE\" \"value\" $v.data.type)) (dict \"name\" \"STORAGE_LOCATION\" \"value\" $v.data.location) }}\n    {{- $_ := set $tmpcontainerEnv \"env\" $containerEnv }}\n    {{- end }}\n    {{ else }}\n    {{ $containerEnv := prepend (prepend (prepend ( prepend (index $context.Values._tmpYAMLcontainer \"env\") (dict \"name\" \"STORAGE_TYPE\" \"value\" $v.data.type)) (dict \"name\" \"JOURNAL_TYPE\" \"value\" $v.journal.type)) (dict \"name\" \"STORAGE_LOCATION\" \"value\" $v.data.location)) (dict \"name\" \"JOURNAL_LOCATION\" \"value\" $v.journal.location) }}\n    {{- $_ := set $tmpcontainerEnv \"env\" $containerEnv }}\n    {{- end }}\n    {{- $localInitContainerEnv := omit $context.Values._tmpYAMLcontainer \"env\" }}\n    {{- $_ := set $localInitContainerEnv \"env\" $tmpcontainerEnv.env }}\n    {{ $containerList := append $context.Values.__tmpYAMLcontainers $localInitContainerEnv }}\n    {{ $_ := set $context.Values \"__tmpYAMLcontainers\" $containerList }}\n  {{ end }}\n  {{- $_ := set $context.Values.__tmpYAML.spec.template.spec \"containers\" $context.Values.__tmpYAMLcontainers }}\n\n  {{- $_ := set $context.Values \"__tmpYAMLinitContainers\" list }}\n  {{- range $podContainer := $context.Values.__daemonset_yaml.spec.template.spec.initContainers }}\n    {{- $_ := set $context.Values \"_tmpYAMLinitContainer\" $podContainer }}\n    {{- $tmpinitcontainerEnv := omit $context.Values._tmpYAMLinitContainer \"env\" }}\n    {{- if eq $v.data.type \"bluestore\" }}\n    {{- if and $v.block_db $v.block_wal}}\n    {{ $initcontainerEnv := prepend (prepend (prepend ( prepend ( prepend ( prepend (index $context.Values._tmpYAMLinitContainer \"env\") (dict \"name\" \"STORAGE_TYPE\" \"value\" $v.data.type)) (dict \"name\" \"STORAGE_LOCATION\" \"value\" $v.data.location)) (dict \"name\" \"BLOCK_DB\" \"value\" $v.block_db.location)) (dict \"name\" \"BLOCK_DB_SIZE\" \"value\" $v.block_db.size)) (dict \"name\" \"BLOCK_WAL\" \"value\" $v.block_wal.location)) (dict \"name\" \"BLOCK_WAL_SIZE\" \"value\" $v.block_wal.size) }}\n    {{- $_ := set $tmpinitcontainerEnv \"env\" $initcontainerEnv }}\n    {{- else if $v.block_db }}\n    {{ $initcontainerEnv := prepend (prepend ( prepend ( prepend (index $context.Values._tmpYAMLinitContainer \"env\") (dict \"name\" \"STORAGE_TYPE\" \"value\" $v.data.type)) (dict \"name\" \"STORAGE_LOCATION\" \"value\" $v.data.location)) (dict \"name\" \"BLOCK_DB\" \"value\" $v.block_db.location)) (dict \"name\" \"BLOCK_DB_SIZE\" \"value\" $v.block_db.size) }}\n    {{- $_ := set $tmpinitcontainerEnv \"env\" $initcontainerEnv }}\n    {{- else if $v.block_wal }}\n    {{ $initcontainerEnv := prepend (prepend ( prepend ( prepend (index $context.Values._tmpYAMLinitContainer \"env\") (dict \"name\" \"STORAGE_TYPE\" \"value\" $v.data.type)) (dict \"name\" \"STORAGE_LOCATION\" \"value\" $v.data.location)) (dict \"name\" \"BLOCK_WAL\" \"value\" $v.block_wal.location)) (dict \"name\" \"BLOCK_WAL_SIZE\" \"value\" $v.block_wal.size) }}\n    {{- $_ := set $tmpinitcontainerEnv \"env\" $initcontainerEnv }}\n    {{ else }}\n    {{ $initcontainerEnv := prepend (prepend (index $context.Values._tmpYAMLinitContainer \"env\") (dict \"name\" \"STORAGE_TYPE\" \"value\" $v.data.type)) (dict \"name\" \"STORAGE_LOCATION\" \"value\" $v.data.location) }}\n    {{- $_ := set $tmpinitcontainerEnv \"env\" $initcontainerEnv }}\n    {{- end }}\n    {{ else }}\n    {{ $initcontainerEnv := prepend (prepend (prepend ( prepend (index $context.Values._tmpYAMLinitContainer \"env\") (dict \"name\" \"STORAGE_TYPE\" \"value\" $v.data.type)) (dict \"name\" \"JOURNAL_TYPE\" \"value\" $v.journal.type)) (dict \"name\" \"STORAGE_LOCATION\" \"value\" $v.data.location)) (dict \"name\" \"JOURNAL_LOCATION\" \"value\" $v.journal.location) }}\n    {{- $_ := set $tmpinitcontainerEnv \"env\" $initcontainerEnv }}\n    {{- end }}\n    {{- $localInitContainerEnv := omit $context.Values._tmpYAMLinitContainer \"env\" }}\n    {{- $_ := set $localInitContainerEnv \"env\" $tmpinitcontainerEnv.env }}\n    {{ $initContainerList := append $context.Values.__tmpYAMLinitContainers $localInitContainerEnv }}\n    {{ $_ := set $context.Values \"__tmpYAMLinitContainers\" $initContainerList }}\n  {{ end }}\n  {{- $_ := set $context.Values.__tmpYAML.spec.template.spec \"initContainers\" $context.Values.__tmpYAMLinitContainers }}\n\n  {{- $_ := set $context.Values.__tmpYAML.spec.template.spec \"volumes\" $context.Values.__tmpPodVols }}\n\n{{ merge $context.Values.__tmpYAML $context.Values.__daemonset_yaml | toYaml }}\n\n{{ end }}\n\n---\n    {{- $_ := set $context.Values \"__last_configmap_name\" $current_dict.dns_1123_name }}\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "ceph-osd/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for ceph-osd.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nimages:\n  pull_policy: IfNotPresent\n  tags:\n    ceph_osd: 'quay.io/airshipit/ceph-daemon:ubuntu_jammy_20.2.1-1-20260407'\n    ceph_bootstrap: 'quay.io/airshipit/ceph-daemon:ubuntu_jammy_20.2.1-1-20260407'\n    ceph_config_helper: 'quay.io/airshipit/ceph-config-helper:ubuntu_jammy_20.2.1-1-20260407'\n    dep_check: 'quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy'\n    image_repo_sync: 'quay.io/airshipit/docker:27.5.0'\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  osd:\n    node_selector_key: ceph-osd\n    node_selector_value: enabled\n\n# The default deploy tool is ceph-volume. \"ceph-disk\" was finally removed as it\n# had been deprecated from Nautilus and was not being used.\ndeploy:\n  tool: \"ceph-volume\"\n# NOTE: set this to 1 if osd disk needs wiping in case of reusing from previous deployment\n  osd_force_repair: 1\n\npod:\n  security_context:\n    osd:\n      pod:\n        runAsUser: 65534\n      container:\n        ceph_init_dirs:\n          runAsUser: 0\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        ceph_log_ownership:\n          runAsUser: 0\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        osd_init:\n          runAsUser: 0\n          privileged: true\n          readOnlyRootFilesystem: true\n        osd_pod:\n          runAsUser: 0\n          privileged: true\n          readOnlyRootFilesystem: true\n        log_runner:\n          # run as \"ceph\" user\n          runAsUser: 64045\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    bootstrap:\n      pod:\n        runAsUser: 65534\n      container:\n        ceph_osd_bootstrap:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    post_apply:\n      pod:\n        runAsUser: 65534\n      container:\n        ceph_osd_post_apply:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    test:\n      pod:\n        runAsUser: 65534\n      container:\n        ceph_cluster_helm_test:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n  dns_policy: \"ClusterFirstWithHostNet\"\n  lifecycle:\n    upgrades:\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        osd:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  resources:\n    enabled: false\n    osd:\n      requests:\n        memory: \"2Gi\"\n        cpu: \"1000m\"\n      limits:\n        memory: \"5Gi\"\n        cpu: \"2000m\"\n    tests:\n      requests:\n        memory: \"10Mi\"\n        cpu: \"250m\"\n      limits:\n        memory: \"50Mi\"\n        cpu: \"500m\"\n    jobs:\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n  probes:\n    ceph-osd:\n      ceph-osd:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 60\n            periodSeconds: 60\n            timeoutSeconds: 5\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 120\n            periodSeconds: 60\n            timeoutSeconds: 5\n\nsecrets:\n  keyrings:\n    osd: ceph-bootstrap-osd-keyring\n    admin: ceph-client-admin-keyring\n  oci_image_registry:\n    ceph-osd: ceph-osh-oci-image-registry-key\n\nnetwork:\n  public: 192.168.0.0/16\n  cluster: 192.168.0.0/16\n\njobs:\n  ceph_defragosds:\n    # Execute the 1st of each month\n    cron: \"0 0 1 * *\"\n    history:\n      # Number of successful job to keep\n      successJob: 1\n      # Number of failed job to keep\n      failJob: 1\n    concurrency:\n      # Skip new job if previous job still active\n      execPolicy: Forbid\n    startingDeadlineSecs: 60\n\nconf:\n  ceph:\n    global:\n      # auth\n      cephx: true\n      cephx_require_signatures: false\n      cephx_cluster_require_signatures: true\n      cephx_service_require_signatures: false\n      objecter_inflight_op_bytes: \"1073741824\"\n      objecter_inflight_ops: 10240\n      debug_ms: \"0/0\"\n      mon_osd_down_out_interval: 1800\n      mon_osd_down_out_subtree_limit: root\n      mon_osd_min_in_ratio: 0\n      mon_osd_min_up_ratio: 0\n    osd:\n      osd_mkfs_type: xfs\n      osd_mkfs_options_xfs: -f -i size=2048\n      osd_max_object_name_len: 256\n      ms_bind_port_min: 6800\n      ms_bind_port_max: 7100\n      osd_snap_trim_priority: 1\n      osd_snap_trim_sleep: 0.1\n      osd_pg_max_concurrent_snap_trims: 1\n      filestore_merge_threshold: -10\n      filestore_split_multiple: 12\n      filestore_max_sync_interval: 10\n      osd_scrub_begin_hour: 22\n      osd_scrub_end_hour: 4\n      osd_scrub_during_recovery: false\n      osd_scrub_sleep: 0.1\n      osd_scrub_chunk_min: 1\n      osd_scrub_chunk_max: 4\n      osd_scrub_load_threshold: 10.0\n      osd_deep_scrub_stride: \"1048576\"\n      osd_scrub_priority: 1\n      osd_recovery_op_priority: 1\n      osd_recovery_max_active: 1\n      osd_mount_options_xfs: \"rw,noatime,largeio,inode64,swalloc,logbufs=8,logbsize=256k,allocsize=4M\"\n      osd_journal_size: 10240\n      osd_crush_update_on_start: false\n      bluestore_elastic_shared_blobs: false\n    target:\n      # This is just for helm tests to proceed the deployment if we have mentioned % of\n      # osds are up and running.\n      required_percent_of_osds: 75\n\n  storage:\n    # NOTE(supamatt): By default use host based buckets for failure domains. Any `failure_domain` defined must\n    # match the failure domain used on your CRUSH rules for pools. For example with a crush rule of\n    # rack_replicated_rule you would specify \"rack\" as the `failure_domain` to use.\n    # `failure_domain`: Set the CRUSH bucket type for your OSD to reside in. See the supported CRUSH configuration\n    #  as listed here: Supported CRUSH configuration is listed here: http://docs.ceph.com/docs/nautilus/rados/operations/crush-map/\n    #  if failure domain is rack then it will check for node label \"rack\" and get the value from it to create the rack, if there\n    #  is no label rack then it will use following options.\n    # `failure_domain_by_hostname`: Specify the portion of the hostname to use for your failure domain bucket name.\n    # `failure_domain_by_hostname_map`: Explicit mapping of hostname to failure domain, as a simpler alternative to overrides.\n    # `failure_domain_name`: Manually name the failure domain bucket name. This configuration option should only be used\n    #  when using host based overrides.\n    failure_domain: \"host\"\n    failure_domain_by_hostname: \"false\"\n    failure_domain_by_hostname_map: {}\n    # Example:\n    #   failure_domain_map_hostname_map:\n    #     hostfoo: rack1\n    #     hostbar: rack1\n    #     hostbaz: rack2\n    #     hostqux: rack2\n    failure_domain_name: \"false\"\n\n    # Note: You can override the device class by adding the value (e.g., hdd, ssd or nvme).\n    # Leave it empty if you don't need to modify the device class.\n    device_class: \"\"\n\n    # NOTE(portdirect): for homogeneous clusters the `osd` key can be used to\n    # define OSD pods that will be deployed across the cluster.\n    # when specifing whole disk (/dev/sdf) for journals, ceph-osd chart will create\n    # needed partitions for each OSDs.\n    osd:\n    # Below is the current configuration default, which is Bluestore with co-located metadata\n    # - data:\n    #     type: bluestore\n    #     location: /dev/sdb   # Use a valid device here\n\n    # Separate block devices may be used for block.db and/or block.wal\n    # Specify the location and size in Gb. It is recommended that the\n    # block_db size isn't smaller than 4% of block. For example, if the\n    # block size is 1TB, then block_db shouldn't be less than 40GB.\n    # A size suffix of K for kilobytes, M for megabytes, G for gigabytes,\n    # T for terabytes, P for petabytes or E for exabytes is optional.\n    # Default unit is megabytes.\n    #   block_db:\n    #     location: /dev/sdc\n    #     size: \"96GB\"\n    #   block_wal:\n    #     location: /dev/sdc\n    #     size: \"2GB\"\n\n    # Block-based Filestore OSDs with separate journal block devices\n    # - data:\n    #     type: block-logical\n    #     location: /dev/sdd\n    #   journal:\n    #     type: block-logical\n    #     location: /dev/sdf1\n    # - data:\n    #     type: block-logical\n    #     location: /dev/sde\n    #   journal:\n    #     type: block-logical\n    #     location: /dev/sdf2\n\n    # Block-based Filestore OSDs with directory-based journals\n    # - data:\n    #     type: block-logical\n    #     location: /dev/sdg\n    #   journal:\n    #     type: directory\n    #     location: /var/lib/openstack-helm/ceph/osd/journal-sdg\n\n    # Directory-based Filestore OSD\n    # - data:\n    #     type: directory\n    #     location: /var/lib/openstack-helm/ceph/osd/osd-one\n    #   journal:\n    #     type: directory\n    #     location: /var/lib/openstack-helm/ceph/osd/journal-one\n\n    # The post-apply job will restart OSDs without disruption by default. Set\n    # this value to \"true\" to restart all OSDs at once. This will accomplish\n    # OSD restarts more quickly with disruption.\n    disruptive_osd_restart: \"false\"\n\n    # The post-apply job will try to determine if OSDs need to be restarted and\n    # only restart them if necessary. Set this value to \"true\" to restart OSDs\n    # unconditionally.\n    unconditional_osd_restart: \"false\"\n\n# NOTE(portdirect): for heterogeneous clusters the overrides section can be used to define\n# OSD pods that will be deployed upon specifc nodes.\n# overrides:\n#   ceph_osd:\n#     hosts:\n#       - name: host1.fqdn\n#         conf:\n#           storage:\n#             failure_domain_name: \"rack1\"\n#             osd:\n#               - data:\n#                   type: directory\n#                   location: /var/lib/openstack-helm/ceph/osd/data-three\n#                 journal:\n#                   type: directory\n#                   location: /var/lib/openstack-helm/ceph/osd/journal-three\n\ndaemonset:\n  prefix_name: \"osd\"\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - ceph-osd-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    osd:\n      jobs:\n        - ceph-storage-keys-generator\n        - ceph-osd-keyring-generator\n      services:\n        - endpoint: internal\n          service: ceph_mon\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    tests:\n      jobs:\n        - ceph-storage-keys-generator\n        - ceph-osd-keyring-generator\n      services:\n        - endpoint: internal\n          service: ceph_mon\n\nlogging:\n  truncate:\n    size: 0\n    period: 3600\n  osd_id:\n    timeout: 300\n\nbootstrap:\n  enabled: true\n  script: |\n    ceph -s\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      ceph-osd:\n        username: ceph-osd\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  ceph_mon:\n    namespace: null\n    hosts:\n      default: ceph-mon\n      discovery: ceph-mon-discovery\n    host_fqdn_override:\n      default: null\n    port:\n      mon:\n        default: 6789\n      mon_msgr2:\n        default: 3300\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  configmap_test_bin: true\n  daemonset_osd: true\n  job_bootstrap: false\n  job_post_apply: true\n  job_image_repo_sync: true\n  helm_tests: true\n  secret_registry: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "ceph-provisioners/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Ceph Provisioner\nname: ceph-provisioners\nversion: 2025.2.0\nhome: https://github.com/ceph/ceph\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "ceph-provisioners/crds/snapshot.storage.k8s.io_volumesnapshotclasses.yaml",
    "content": "---\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n  annotations:\n    controller-gen.kubebuilder.io/version: v0.8.0\n    api-approved.kubernetes.io: \"https://github.com/kubernetes-csi/external-snapshotter/pull/665\"\n  creationTimestamp: null\n  name: volumesnapshotclasses.snapshot.storage.k8s.io\nspec:\n  group: snapshot.storage.k8s.io\n  names:\n    kind: VolumeSnapshotClass\n    listKind: VolumeSnapshotClassList\n    plural: volumesnapshotclasses\n    shortNames:\n    - vsclass\n    - vsclasses\n    singular: volumesnapshotclass\n  scope: Cluster\n  versions:\n  - additionalPrinterColumns:\n    - jsonPath: .driver\n      name: Driver\n      type: string\n    - description: Determines whether a VolumeSnapshotContent created through the\n        VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted.\n      jsonPath: .deletionPolicy\n      name: DeletionPolicy\n      type: string\n    - jsonPath: .metadata.creationTimestamp\n      name: Age\n      type: date\n    name: v1\n    schema:\n      openAPIV3Schema:\n        description: VolumeSnapshotClass specifies parameters that a underlying storage\n          system uses when creating a volume snapshot. A specific VolumeSnapshotClass\n          is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses\n          are non-namespaced\n        properties:\n          apiVersion:\n            description: 'APIVersion defines the versioned schema of this representation\n              of an object. Servers should convert recognized schemas to the latest\n              internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n            type: string\n          deletionPolicy:\n            description: deletionPolicy determines whether a VolumeSnapshotContent\n              created through the VolumeSnapshotClass should be deleted when its bound\n              VolumeSnapshot is deleted. Supported values are \"Retain\" and \"Delete\".\n              \"Retain\" means that the VolumeSnapshotContent and its physical snapshot\n              on underlying storage system are kept. \"Delete\" means that the VolumeSnapshotContent\n              and its physical snapshot on underlying storage system are deleted.\n              Required.\n            enum:\n            - Delete\n            - Retain\n            type: string\n          driver:\n            description: driver is the name of the storage driver that handles this\n              VolumeSnapshotClass. Required.\n            type: string\n          kind:\n            description: 'Kind is a string value representing the REST resource this\n              object represents. Servers may infer this from the endpoint the client\n              submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n            type: string\n          parameters:\n            additionalProperties:\n              type: string\n            description: parameters is a key-value map with storage driver specific\n              parameters for creating snapshots. These values are opaque to Kubernetes.\n            type: object\n        required:\n        - deletionPolicy\n        - driver\n        type: object\n    served: true\n    storage: true\n    subresources: {}\nstatus:\n  acceptedNames:\n    kind: \"\"\n    plural: \"\"\n  conditions: []\n  storedVersions: []\n...\n"
  },
  {
    "path": "ceph-provisioners/crds/snapshot.storage.k8s.io_volumesnapshotcontents.yaml",
    "content": "---\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n  annotations:\n    controller-gen.kubebuilder.io/version: v0.8.0\n    api-approved.kubernetes.io: \"https://github.com/kubernetes-csi/external-snapshotter/pull/665\"\n  creationTimestamp: null\n  name: volumesnapshotcontents.snapshot.storage.k8s.io\nspec:\n  group: snapshot.storage.k8s.io\n  names:\n    kind: VolumeSnapshotContent\n    listKind: VolumeSnapshotContentList\n    plural: volumesnapshotcontents\n    shortNames:\n    - vsc\n    - vscs\n    singular: volumesnapshotcontent\n  scope: Cluster\n  versions:\n  - additionalPrinterColumns:\n    - description: Indicates if the snapshot is ready to be used to restore a volume.\n      jsonPath: .status.readyToUse\n      name: ReadyToUse\n      type: boolean\n    - description: Represents the complete size of the snapshot in bytes\n      jsonPath: .status.restoreSize\n      name: RestoreSize\n      type: integer\n    - description: Determines whether this VolumeSnapshotContent and its physical\n        snapshot on the underlying storage system should be deleted when its bound\n        VolumeSnapshot is deleted.\n      jsonPath: .spec.deletionPolicy\n      name: DeletionPolicy\n      type: string\n    - description: Name of the CSI driver used to create the physical snapshot on\n        the underlying storage system.\n      jsonPath: .spec.driver\n      name: Driver\n      type: string\n    - description: Name of the VolumeSnapshotClass to which this snapshot belongs.\n      jsonPath: .spec.volumeSnapshotClassName\n      name: VolumeSnapshotClass\n      type: string\n    - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent\n        object is bound.\n      jsonPath: .spec.volumeSnapshotRef.name\n      name: VolumeSnapshot\n      type: string\n    - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound.\n      jsonPath: .spec.volumeSnapshotRef.namespace\n      name: VolumeSnapshotNamespace\n      type: string\n    - jsonPath: .metadata.creationTimestamp\n      name: Age\n      type: date\n    name: v1\n    schema:\n      openAPIV3Schema:\n        description: VolumeSnapshotContent represents the actual \"on-disk\" snapshot\n          object in the underlying storage system\n        properties:\n          apiVersion:\n            description: 'APIVersion defines the versioned schema of this representation\n              of an object. Servers should convert recognized schemas to the latest\n              internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n            type: string\n          kind:\n            description: 'Kind is a string value representing the REST resource this\n              object represents. Servers may infer this from the endpoint the client\n              submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n            type: string\n          spec:\n            description: spec defines properties of a VolumeSnapshotContent created\n              by the underlying storage system. Required.\n            properties:\n              deletionPolicy:\n                description: deletionPolicy determines whether this VolumeSnapshotContent\n                  and its physical snapshot on the underlying storage system should\n                  be deleted when its bound VolumeSnapshot is deleted. Supported values\n                  are \"Retain\" and \"Delete\". \"Retain\" means that the VolumeSnapshotContent\n                  and its physical snapshot on underlying storage system are kept.\n                  \"Delete\" means that the VolumeSnapshotContent and its physical snapshot\n                  on underlying storage system are deleted. For dynamically provisioned\n                  snapshots, this field will automatically be filled in by the CSI\n                  snapshotter sidecar with the \"DeletionPolicy\" field defined in the\n                  corresponding VolumeSnapshotClass. For pre-existing snapshots, users\n                  MUST specify this field when creating the VolumeSnapshotContent\n                  object. Required.\n                enum:\n                - Delete\n                - Retain\n                type: string\n              driver:\n                description: driver is the name of the CSI driver used to create the\n                  physical snapshot on the underlying storage system. This MUST be\n                  the same as the name returned by the CSI GetPluginName() call for\n                  that driver. Required.\n                type: string\n              source:\n                description: source specifies whether the snapshot is (or should be)\n                  dynamically provisioned or already exists, and just requires a Kubernetes\n                  object representation. This field is immutable after creation. Required.\n                properties:\n                  snapshotHandle:\n                    description: snapshotHandle specifies the CSI \"snapshot_id\" of\n                      a pre-existing snapshot on the underlying storage system for\n                      which a Kubernetes object representation was (or should be)\n                      created. This field is immutable.\n                    type: string\n                  volumeHandle:\n                    description: volumeHandle specifies the CSI \"volume_id\" of the\n                      volume from which a snapshot should be dynamically taken from.\n                      This field is immutable.\n                    type: string\n                type: object\n                oneOf:\n                - required: [\"snapshotHandle\"]\n                - required: [\"volumeHandle\"]\n              sourceVolumeMode:\n                description: SourceVolumeMode is the mode of the volume whose snapshot\n                  is taken. Can be either “Filesystem” or “Block”. If not specified,\n                  it indicates the source volume's mode is unknown. This field is\n                  immutable. This field is an alpha field.\n                type: string\n              volumeSnapshotClassName:\n                description: name of the VolumeSnapshotClass from which this snapshot\n                  was (or will be) created. Note that after provisioning, the VolumeSnapshotClass\n                  may be deleted or recreated with different set of values, and as\n                  such, should not be referenced post-snapshot creation.\n                type: string\n              volumeSnapshotRef:\n                description: volumeSnapshotRef specifies the VolumeSnapshot object\n                  to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName\n                  field must reference to this VolumeSnapshotContent's name for the\n                  bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent\n                  object, name and namespace of the VolumeSnapshot object MUST be\n                  provided for binding to happen. This field is immutable after creation.\n                  Required.\n                properties:\n                  apiVersion:\n                    description: API version of the referent.\n                    type: string\n                  fieldPath:\n                    description: 'If referring to a piece of an object instead of\n                      an entire object, this string should contain a valid JSON/Go\n                      field access statement, such as desiredState.manifest.containers[2].\n                      For example, if the object reference is to a container within\n                      a pod, this would take on a value like: \"spec.containers{name}\"\n                      (where \"name\" refers to the name of the container that triggered\n                      the event) or if no container name is specified \"spec.containers[2]\"\n                      (container with index 2 in this pod). This syntax is chosen\n                      only to have some well-defined way of referencing a part of\n                      an object. TODO: this design is not final and this field is\n                      subject to change in the future.'\n                    type: string\n                  kind:\n                    description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n                    type: string\n                  name:\n                    description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'\n                    type: string\n                  namespace:\n                    description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'\n                    type: string\n                  resourceVersion:\n                    description: 'Specific resourceVersion to which this reference\n                      is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'\n                    type: string\n                  uid:\n                    description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'\n                    type: string\n                type: object\n            required:\n            - deletionPolicy\n            - driver\n            - source\n            - volumeSnapshotRef\n            type: object\n          status:\n            description: status represents the current information of a snapshot.\n            properties:\n              creationTime:\n                description: creationTime is the timestamp when the point-in-time\n                  snapshot is taken by the underlying storage system. In dynamic snapshot\n                  creation case, this field will be filled in by the CSI snapshotter\n                  sidecar with the \"creation_time\" value returned from CSI \"CreateSnapshot\"\n                  gRPC call. For a pre-existing snapshot, this field will be filled\n                  with the \"creation_time\" value returned from the CSI \"ListSnapshots\"\n                  gRPC call if the driver supports it. If not specified, it indicates\n                  the creation time is unknown. The format of this field is a Unix\n                  nanoseconds time encoded as an int64. On Unix, the command `date\n                  +%s%N` returns the current time in nanoseconds since 1970-01-01\n                  00:00:00 UTC.\n                format: int64\n                type: integer\n              error:\n                description: error is the last observed error during snapshot creation,\n                  if any. Upon success after retry, this error field will be cleared.\n                properties:\n                  message:\n                    description: 'message is a string detailing the encountered error\n                      during snapshot creation if specified. NOTE: message may be\n                      logged, and it should not contain sensitive information.'\n                    type: string\n                  time:\n                    description: time is the timestamp when the error was encountered.\n                    format: date-time\n                    type: string\n                type: object\n              readyToUse:\n                description: readyToUse indicates if a snapshot is ready to be used\n                  to restore a volume. In dynamic snapshot creation case, this field\n                  will be filled in by the CSI snapshotter sidecar with the \"ready_to_use\"\n                  value returned from CSI \"CreateSnapshot\" gRPC call. For a pre-existing\n                  snapshot, this field will be filled with the \"ready_to_use\" value\n                  returned from the CSI \"ListSnapshots\" gRPC call if the driver supports\n                  it, otherwise, this field will be set to \"True\". If not specified,\n                  it means the readiness of a snapshot is unknown.\n                type: boolean\n              restoreSize:\n                description: restoreSize represents the complete size of the snapshot\n                  in bytes. In dynamic snapshot creation case, this field will be\n                  filled in by the CSI snapshotter sidecar with the \"size_bytes\" value\n                  returned from CSI \"CreateSnapshot\" gRPC call. For a pre-existing\n                  snapshot, this field will be filled with the \"size_bytes\" value\n                  returned from the CSI \"ListSnapshots\" gRPC call if the driver supports\n                  it. When restoring a volume from this snapshot, the size of the\n                  volume MUST NOT be smaller than the restoreSize if it is specified,\n                  otherwise the restoration will fail. If not specified, it indicates\n                  that the size is unknown.\n                format: int64\n                minimum: 0\n                type: integer\n              snapshotHandle:\n                description: snapshotHandle is the CSI \"snapshot_id\" of a snapshot\n                  on the underlying storage system. If not specified, it indicates\n                  that dynamic snapshot creation has either failed or it is still\n                  in progress.\n                type: string\n            type: object\n        required:\n        - spec\n        type: object\n    served: true\n    storage: true\n    subresources:\n      status: {}\nstatus:\n  acceptedNames:\n    kind: \"\"\n    plural: \"\"\n  conditions: []\n  storedVersions: []\n...\n"
  },
  {
    "path": "ceph-provisioners/crds/snapshot.storage.k8s.io_volumesnapshots.yaml",
    "content": "---\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n  annotations:\n    controller-gen.kubebuilder.io/version: v0.8.0\n    api-approved.kubernetes.io: \"https://github.com/kubernetes-csi/external-snapshotter/pull/665\"\n  creationTimestamp: null\n  name: volumesnapshots.snapshot.storage.k8s.io\nspec:\n  group: snapshot.storage.k8s.io\n  names:\n    kind: VolumeSnapshot\n    listKind: VolumeSnapshotList\n    plural: volumesnapshots\n    shortNames:\n    - vs\n    singular: volumesnapshot\n  scope: Namespaced\n  versions:\n  - additionalPrinterColumns:\n    - description: Indicates if the snapshot is ready to be used to restore a volume.\n      jsonPath: .status.readyToUse\n      name: ReadyToUse\n      type: boolean\n    - description: If a new snapshot needs to be created, this contains the name of\n        the source PVC from which this snapshot was (or will be) created.\n      jsonPath: .spec.source.persistentVolumeClaimName\n      name: SourcePVC\n      type: string\n    - description: If a snapshot already exists, this contains the name of the existing\n        VolumeSnapshotContent object representing the existing snapshot.\n      jsonPath: .spec.source.volumeSnapshotContentName\n      name: SourceSnapshotContent\n      type: string\n    - description: Represents the minimum size of volume required to rehydrate from\n        this snapshot.\n      jsonPath: .status.restoreSize\n      name: RestoreSize\n      type: string\n    - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot.\n      jsonPath: .spec.volumeSnapshotClassName\n      name: SnapshotClass\n      type: string\n    - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot\n        object intends to bind to. Please note that verification of binding actually\n        requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure\n        both are pointing at each other. Binding MUST be verified prior to usage of\n        this object.\n      jsonPath: .status.boundVolumeSnapshotContentName\n      name: SnapshotContent\n      type: string\n    - description: Timestamp when the point-in-time snapshot was taken by the underlying\n        storage system.\n      jsonPath: .status.creationTime\n      name: CreationTime\n      type: date\n    - jsonPath: .metadata.creationTimestamp\n      name: Age\n      type: date\n    name: v1\n    schema:\n      openAPIV3Schema:\n        description: VolumeSnapshot is a user's request for either creating a point-in-time\n          snapshot of a persistent volume, or binding to a pre-existing snapshot.\n        properties:\n          apiVersion:\n            description: 'APIVersion defines the versioned schema of this representation\n              of an object. Servers should convert recognized schemas to the latest\n              internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n            type: string\n          kind:\n            description: 'Kind is a string value representing the REST resource this\n              object represents. Servers may infer this from the endpoint the client\n              submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n            type: string\n          spec:\n            description: 'spec defines the desired characteristics of a snapshot requested\n              by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots\n              Required.'\n            properties:\n              source:\n                description: source specifies where a snapshot will be created from.\n                  This field is immutable after creation. Required.\n                properties:\n                  persistentVolumeClaimName:\n                    description: persistentVolumeClaimName specifies the name of the\n                      PersistentVolumeClaim object representing the volume from which\n                      a snapshot should be created. This PVC is assumed to be in the\n                      same namespace as the VolumeSnapshot object. This field should\n                      be set if the snapshot does not exists, and needs to be created.\n                      This field is immutable.\n                    type: string\n                  volumeSnapshotContentName:\n                    description: volumeSnapshotContentName specifies the name of a\n                      pre-existing VolumeSnapshotContent object representing an existing\n                      volume snapshot. This field should be set if the snapshot already\n                      exists and only needs a representation in Kubernetes. This field\n                      is immutable.\n                    type: string\n                type: object\n                oneOf:\n                - required: [\"persistentVolumeClaimName\"]\n                - required: [\"volumeSnapshotContentName\"]\n              volumeSnapshotClassName:\n                description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass\n                  requested by the VolumeSnapshot. VolumeSnapshotClassName may be\n                  left nil to indicate that the default SnapshotClass should be used.\n                  A given cluster may have multiple default Volume SnapshotClasses:\n                  one default per CSI Driver. If a VolumeSnapshot does not specify\n                  a SnapshotClass, VolumeSnapshotSource will be checked to figure\n                  out what the associated CSI Driver is, and the default VolumeSnapshotClass\n                  associated with that CSI Driver will be used. If more than one VolumeSnapshotClass\n                  exist for a given CSI Driver and more than one have been marked\n                  as default, CreateSnapshot will fail and generate an event. Empty\n                  string is not allowed for this field.'\n                type: string\n            required:\n            - source\n            type: object\n          status:\n            description: status represents the current information of a snapshot.\n              Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent\n              objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent\n              point at each other) before using this object.\n            properties:\n              boundVolumeSnapshotContentName:\n                description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent\n                  object to which this VolumeSnapshot object intends to bind to. If\n                  not specified, it indicates that the VolumeSnapshot object has not\n                  been successfully bound to a VolumeSnapshotContent object yet. NOTE:\n                  To avoid possible security issues, consumers must verify binding\n                  between VolumeSnapshot and VolumeSnapshotContent objects is successful\n                  (by validating that both VolumeSnapshot and VolumeSnapshotContent\n                  point at each other) before using this object.'\n                type: string\n              creationTime:\n                description: creationTime is the timestamp when the point-in-time\n                  snapshot is taken by the underlying storage system. In dynamic snapshot\n                  creation case, this field will be filled in by the snapshot controller\n                  with the \"creation_time\" value returned from CSI \"CreateSnapshot\"\n                  gRPC call. For a pre-existing snapshot, this field will be filled\n                  with the \"creation_time\" value returned from the CSI \"ListSnapshots\"\n                  gRPC call if the driver supports it. If not specified, it may indicate\n                  that the creation time of the snapshot is unknown.\n                format: date-time\n                type: string\n              error:\n                description: error is the last observed error during snapshot creation,\n                  if any. This field could be helpful to upper level controllers(i.e.,\n                  application controller) to decide whether they should continue on\n                  waiting for the snapshot to be created based on the type of error\n                  reported. The snapshot controller will keep retrying when an error\n                  occurs during the snapshot creation. Upon success, this error field\n                  will be cleared.\n                properties:\n                  message:\n                    description: 'message is a string detailing the encountered error\n                      during snapshot creation if specified. NOTE: message may be\n                      logged, and it should not contain sensitive information.'\n                    type: string\n                  time:\n                    description: time is the timestamp when the error was encountered.\n                    format: date-time\n                    type: string\n                type: object\n              readyToUse:\n                description: readyToUse indicates if the snapshot is ready to be used\n                  to restore a volume. In dynamic snapshot creation case, this field\n                  will be filled in by the snapshot controller with the \"ready_to_use\"\n                  value returned from CSI \"CreateSnapshot\" gRPC call. For a pre-existing\n                  snapshot, this field will be filled with the \"ready_to_use\" value\n                  returned from the CSI \"ListSnapshots\" gRPC call if the driver supports\n                  it, otherwise, this field will be set to \"True\". If not specified,\n                  it means the readiness of a snapshot is unknown.\n                type: boolean\n              restoreSize:\n                type: string\n                description: restoreSize represents the minimum size of volume required\n                  to create a volume from this snapshot. In dynamic snapshot creation\n                  case, this field will be filled in by the snapshot controller with\n                  the \"size_bytes\" value returned from CSI \"CreateSnapshot\" gRPC call.\n                  For a pre-existing snapshot, this field will be filled with the\n                  \"size_bytes\" value returned from the CSI \"ListSnapshots\" gRPC call\n                  if the driver supports it. When restoring a volume from this snapshot,\n                  the size of the volume MUST NOT be smaller than the restoreSize\n                  if it is specified, otherwise the restoration will fail. If not\n                  specified, it indicates that the size is unknown.\n                pattern: ^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$\n                x-kubernetes-int-or-string: true\n            type: object\n        required:\n        - spec\n        type: object\n    served: true\n    storage: true\n    subresources:\n      status: {}\nstatus:\n  acceptedNames:\n    kind: \"\"\n    plural: \"\"\n  conditions: []\n  storedVersions: []\n...\n"
  },
  {
    "path": "ceph-provisioners/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "ceph-provisioners/templates/bin/_helm-tests.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nfunction reset_test_env()\n{\n  pvc_namespace=$1\n  pod_name=$2\n  pvc_name=$3\n  echo \"--> Resetting POD and PVC before/after test\"\n  if kubectl get pod -n $pvc_namespace $pod_name; then\n    kubectl delete pod -n $pvc_namespace $pod_name\n  fi\n\n  if kubectl get cm -n $pvc_namespace ${pod_name}-bin; then\n    kubectl delete cm -n $pvc_namespace ${pod_name}-bin\n  fi\n\n  if kubectl get pvc -n $pvc_namespace $pvc_name; then\n    kubectl delete pvc -n $pvc_namespace $pvc_name;\n  fi\n}\n\n\nfunction storageclass_validation()\n{\n  pvc_namespace=$1\n  pod_name=$2\n  pvc_name=$3\n  storageclass=$4\n\n  echo \"--> Starting validation\"\n\n  # storageclass check\n  if ! kubectl get storageclass $storageclass; then\n    echo \"Storageclass: $storageclass is not provisioned.\"\n    exit 1\n  fi\n\n  tee <<EOF | kubectl apply -n $pvc_namespace -f -\n---\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: $pvc_name\nspec:\n  accessModes:\n    - ReadWriteOnce\n  storageClassName: $storageclass\n  resources:\n    requests:\n      storage: 3Gi\nEOF\n\n  # waiting for pvc to get create\n  end=$(($(date +%s) + TEST_POD_WAIT_TIMEOUT))\n  while ! kubectl get pvc -n $pvc_namespace $pvc_name | grep Bound; do\n    if [ \"$(date +%s)\" -gt \"${end}\" ]; then\n      kubectl get pvc -n $pvc_namespace $pvc_name\n      kubectl get pv\n      echo \"Storageclass is available but can't create PersistentVolumeClaim.\"\n      exit 1\n    fi\n    sleep 10\n  done\n\n  tee <<EOF | kubectl apply --namespace $pvc_namespace -f -\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: ${pod_name}-bin\ndata:\n  test.sh: |\n    #!/bin/bash\n\n    tmpdir=\\$(mktemp -d)\n    declare -a files_list\n    total_files=10\n\n    function check_result ()\n    {\n      red='\\033[0;31m'\n      green='\\033[0;32m'\n      bw='\\033[0m'\n      if [ \"\\$1\" -ne 0 ]; then\n        echo -e \"\\${red}\\$2\\${bw}\"\n        exit 1\n      else\n        echo -e \"\\${green}\\$3\\${bw}\"\n      fi\n    }\n\n    echo \"Preparing \\${total_objects} files for test\"\n    for i in \\$(seq \\$total_files); do\n      files_list[\\$i]=\"\\$(mktemp -p \"$tmpdir\" -t XXXXXXXX)\"\n      echo \"Creating \\${files_list[\\$i]} file\"\n      dd if=/dev/urandom of=\"\\${files_list[\\$i]}\" bs=1M count=8\n\n      echo \"Writing to /mnt/\\${files_list[\\$i]##*/}\"\n      cp \"\\${files_list[\\$i]}\" \"/mnt/\\${files_list[\\$i]##*/}\"\n      check_result \\$? \"The action failed\" \"The action succeeded\"\n    done\n\n    for i in \\$(seq \\$total_files); do\n      echo \"Comparing files: \\${files_list[\\$i]} and /mnt/\\${files_list[\\$i]##*/}\"\n      cmp \"\\${files_list[\\$i]}\" \"/mnt/\\${files_list[\\$i]##*/}\"\n      check_result \\$? \"The files are not equal\" \"The files are equal\"\n    done\n\n    touch /mnt/SUCCESS && exit 0 || exit 1\n\n---\nkind: Pod\napiVersion: v1\nmetadata:\n  name: $pod_name\nspec:\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n  containers:\n  - name: task-pv-storage\n    image: {{ .Values.images.tags.ceph_config_helper }}\n    command:\n    - /tmp/test.sh\n    volumeMounts:\n    - name: ceph-cm-test\n      mountPath: /tmp/test.sh\n      subPath: test.sh\n      readOnly: true\n    - name: pvc\n      mountPath: \"/mnt\"\n      readOnly: false\n  restartPolicy: \"Never\"\n  volumes:\n  - name: ceph-cm-test\n    configMap:\n      name: ${pod_name}-bin\n      defaultMode: 0555\n  - name: pvc\n    persistentVolumeClaim:\n      claimName: $pvc_name\n...\nEOF\n\n  # waiting for pod to get completed\n  end=$(($(date +%s) + TEST_POD_WAIT_TIMEOUT))\n  while ! kubectl get pods -n $pvc_namespace $pod_name | grep -i Completed; do\n    if [ \"$(date +%s)\" -gt \"${end}\" ]; then\n      kubectl get pods -n $pvc_namespace $pod_name\n      kubectl logs -n $pvc_namespace $pod_name\n      echo \"Cannot create POD with rbd storage class $storageclass based PersistentVolumeClaim.\"\n      exit 1\n    fi\n    sleep 10\n  done\n\n  kubectl logs -n $pvc_namespace $pod_name\n}\n\n\nreset_test_env $PVC_NAMESPACE $RBD_TEST_POD_NAME $RBD_TEST_PVC_NAME\nreset_test_env $PVC_NAMESPACE $CSI_RBD_TEST_POD_NAME $CSI_RBD_TEST_PVC_NAME\nreset_test_env $PVC_NAMESPACE $CEPHFS_TEST_POD_NAME $CEPHFS_TEST_PVC_NAME\n\n{{- range $storageclass, $val := .Values.storageclass }}\nif [ {{ $val.provisioner }} == \"ceph.com/rbd\" ] && [ {{ $val.provision_storage_class }} == true ];\nthen\n  echo \"--> Checking RBD storage class.\"\n  storageclass={{ $val.metadata.name }}\n\n  storageclass_validation $PVC_NAMESPACE $RBD_TEST_POD_NAME $RBD_TEST_PVC_NAME $storageclass\n  reset_test_env $PVC_NAMESPACE $RBD_TEST_POD_NAME $RBD_TEST_PVC_NAME\nfi\n\nif [ {{ $val.provisioner }} == \"ceph.rbd.csi.ceph.com\" ] && [ {{ $val.provision_storage_class }} == true ];\nthen\n  echo \"--> Checking CSI RBD storage class.\"\n  storageclass={{ $val.metadata.name }}\n  storageclass_validation $PVC_NAMESPACE $CSI_RBD_TEST_POD_NAME $CSI_RBD_TEST_PVC_NAME $storageclass\n  reset_test_env $PVC_NAMESPACE $CSI_RBD_TEST_POD_NAME $CSI_RBD_TEST_PVC_NAME\nfi\n\nif [ {{ $val.provisioner }} == \"ceph.com/cephfs\" ] && [ {{ $val.provision_storage_class }} == true ];\nthen\n  echo \"--> Checking cephfs storage class.\"\n  storageclass={{ $val.metadata.name }}\n  storageclass_validation $PVC_NAMESPACE $CEPHFS_TEST_POD_NAME $CEPHFS_TEST_PVC_NAME $storageclass\n  reset_test_env $PVC_NAMESPACE $CEPHFS_TEST_POD_NAME $CEPHFS_TEST_PVC_NAME\nfi\n{{- end }}\n"
  },
  {
    "path": "ceph-provisioners/templates/bin/provisioner/cephfs/_client-key-manager.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{- $envAll := . }}\n\nCEPH_CEPHFS_KEY=$(kubectl get secret ${PVC_CEPH_CEPHFS_STORAGECLASS_ADMIN_SECRET_NAME} \\\n    --namespace=${PVC_CEPH_CEPHFS_STORAGECLASS_DEPLOYED_NAMESPACE} \\\n    -o json )\n\nceph_activate_namespace() {\n  kube_namespace=$1\n  secret_type=$2\n  secret_name=$3\n  ceph_key=$4\n  {\n  cat <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: \"${secret_name}\"\n  labels:\n{{ tuple $envAll \"ceph\" \"cephfs\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\ntype: \"${secret_type}\"\ndata:\n  key: $( echo ${ceph_key} )\nEOF\n  } | kubectl apply --namespace ${kube_namespace} -f -\n}\n\nif ! kubectl get --namespace ${DEPLOYMENT_NAMESPACE} secrets ${PVC_CEPH_CEPHFS_STORAGECLASS_USER_SECRET_NAME}; then\n  ceph_activate_namespace \\\n    ${DEPLOYMENT_NAMESPACE} \\\n    \"kubernetes.io/cephfs\" \\\n    ${PVC_CEPH_CEPHFS_STORAGECLASS_USER_SECRET_NAME} \\\n    \"$(echo ${CEPH_CEPHFS_KEY} | jq -r '.data.key')\"\nfi\n"
  },
  {
    "path": "ceph-provisioners/templates/bin/provisioner/cephfs/_start.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec /usr/local/bin/cephfs-provisioner -id \"${POD_NAME}\"\n"
  },
  {
    "path": "ceph-provisioners/templates/bin/provisioner/rbd/_namespace-client-ceph-config-manager.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{- $envAll := . }}\n\n{{ include \"helm-toolkit.snippets.mon_host_from_k8s_ep\" . }}\n\nENDPOINT=$(mon_host_from_k8s_ep ${PVC_CEPH_RBD_STORAGECLASS_DEPLOYED_NAMESPACE} ceph-mon-discovery)\n\nif [ -z \"$ENDPOINT\" ]; then\n  echo \"Ceph Mon endpoint is empty\"\n  exit 1\nelse\n  echo $ENDPOINT\nfi\n\nkubectl get cm ${CEPH_CONF_ETC} -n  ${DEPLOYMENT_NAMESPACE}  -o yaml | \\\n  sed \"s#mon_host.*#mon_host = ${ENDPOINT}#g\" | \\\n  kubectl apply -f -\n\nkubectl get cm ${CEPH_CONF_ETC} -n  ${DEPLOYMENT_NAMESPACE}  -o yaml\n"
  },
  {
    "path": "ceph-provisioners/templates/bin/provisioner/rbd/_namespace-client-key-cleaner.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nkubectl delete secret \\\n  --namespace ${DEPLOYMENT_NAMESPACE} \\\n  --ignore-not-found=true \\\n  ${PVC_CEPH_RBD_STORAGECLASS_USER_SECRET_NAME}\n"
  },
  {
    "path": "ceph-provisioners/templates/bin/provisioner/rbd/_namespace-client-key-manager.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{- $envAll := . }}\n\nCEPH_RBD_KEY=$(kubectl get secret ${PVC_CEPH_RBD_STORAGECLASS_ADMIN_SECRET_NAME} \\\n    --namespace=${PVC_CEPH_RBD_STORAGECLASS_DEPLOYED_NAMESPACE} \\\n    -o json )\n\nif [[ ${CONNECT_TO_ROOK_CEPH_CLUSTER} == \"true\" ]] ; then\n  CEPH_CLUSTER_KEY=$(echo \"${CEPH_RBD_KEY}\" | jq -r '.data[\"ceph-secret\"]')\nelse\n  CEPH_CLUSTER_KEY=$(echo \"${CEPH_RBD_KEY}\" | jq -r '.data.key')\nfi\n\nceph_activate_namespace() {\n  kube_namespace=$1\n  secret_type=$2\n  secret_name=$3\n  ceph_key=$4\n  {\n  cat <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: \"${secret_name}\"\n  labels:\n{{ tuple $envAll \"ceph\" \"rbd\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\ntype: \"${secret_type}\"\ndata:\n  key: $( echo ${ceph_key} )\nEOF\n  } | kubectl apply --namespace ${kube_namespace} -f -\n}\n\nceph_activate_namespace ${DEPLOYMENT_NAMESPACE} \"kubernetes.io/rbd\" ${PVC_CEPH_RBD_STORAGECLASS_USER_SECRET_NAME} \"${CEPH_CLUSTER_KEY}\"\n"
  },
  {
    "path": "ceph-provisioners/templates/bin/provisioner/rbd/_start.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec /usr/local/bin/rbd-provisioner -id \"${POD_NAME}\"\n"
  },
  {
    "path": "ceph-provisioners/templates/configmap-bin-provisioner.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.configmap_bin .Values.deployment.client_secrets }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.Release.Name \"ceph-prov-bin-clients\" | quote }}\ndata:\n  provisioner-rbd-namespace-client-ceph-config-manager.sh: |\n{{ tuple \"bin/provisioner/rbd/_namespace-client-ceph-config-manager.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  provisioner-rbd-namespace-client-key-manager.sh: |\n{{ tuple \"bin/provisioner/rbd/_namespace-client-key-manager.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  provisioner-rbd-namespace-client-key-cleaner.sh: |\n{{ tuple \"bin/provisioner/rbd/_namespace-client-key-cleaner.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  helm-tests.sh: |\n{{ tuple \"bin/_helm-tests.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "ceph-provisioners/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.configmap_bin_common .Values.deployment.ceph }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.Release.Name \"ceph-prov-bin\" | quote }}\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n\n  provisioner-cephfs-start.sh: |\n{{ tuple \"bin/provisioner/cephfs/_start.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n  provisioner-cephfs-client-key-manager.sh: |\n{{ tuple \"bin/provisioner/cephfs/_client-key-manager.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n\n  provisioner-rbd-start.sh: |\n{{ tuple \"bin/provisioner/rbd/_start.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n\n{{- end }}\n"
  },
  {
    "path": "ceph-provisioners/templates/configmap-etc-client.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"ceph.configmap.etc\" }}\n{{- $configMapName := index . 0 }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n\n{{- if or (.Values.deployment.ceph) (.Values.deployment.client_secrets) }}\n\n{{- if empty .Values.conf.ceph.global.mon_host -}}\n{{- $monHost := tuple \"ceph_mon\" \"internal\" \"mon_msgr2\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n{{- $_ := $monHost | set .Values.conf.ceph.global \"mon_host\" -}}\n{{- end -}}\n\n\n{{- if empty .Values.conf.ceph.osd.cluster_network -}}\n{{- $_ := .Values.network.cluster | set .Values.conf.ceph.osd \"cluster_network\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ceph.osd.public_network -}}\n{{- $_ := .Values.network.public | set .Values.conf.ceph.osd \"public_network\" -}}\n{{- end -}}\n\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ $configMapName }}\ndata:\n  ceph.conf: |\n{{ include \"helm-toolkit.utils.to_ini\" .Values.conf.ceph | indent 4 }}\n\n{{- end }}\n{{- end }}\n{{- end }}\n{{- if .Values.manifests.configmap_etc }}\n{{- if eq .Values.storageclass.csi_rbd.provision_storage_class true }}\n{{- list  .Values.storageclass.csi_rbd.ceph_configmap_name . | include \"ceph.configmap.etc\" }}\n{{- else }}\n{{- list  .Values.storageclass.rbd.ceph_configmap_name . | include \"ceph.configmap.etc\" }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "ceph-provisioners/templates/configmap-etc-csi.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"ceph.configmap.etc.csi\" }}\n{{- $configMapName := index . 0 }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n\n{{- if and (.Values.deployment.ceph) (.Values.deployment.csi_rbd_provisioner) }}\n\n{{- if empty .Values.conf.ceph.global.mon_host -}}\n{{- $monHost := tuple \"ceph_mon\" \"internal\" \"mon_msgr2\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n{{- $_ := $monHost | set .Values.conf.ceph.global \"mon_host\" -}}\n{{- end -}}\n\n---\napiVersion: v1\nkind: ConfigMap\ndata:\n  config.json: |-\n    [\n      {\n        \"clusterID\": {{ .Release.Namespace | quote }},\n        \"monitors\": [\n          {{ .Values.conf.ceph.global.mon_host | quote }}\n        ]\n      }\n    ]\nmetadata:\n  name: ceph-csi-config\n{{- end }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- list  .Values.storageclass.csi_rbd.ceph_configmap_name . | include \"ceph.configmap.etc.csi\" }}\n{{- end }}\n"
  },
  {
    "path": "ceph-provisioners/templates/daemonset-csi-rbd-plugin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.deployment_csi_rbd_provisioner .Values.deployment.csi_rbd_provisioner }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := printf \"%s-%s\" .Release.Name \"ceph-rbd-csi-nodeplugin\" }}\n{{ tuple $envAll \"rbd_provisioner\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups: [\"\"]\n    resources: [\"nodes\"]\n    verbs: [\"get\", \"watch\", \"list\"]\n  - apiGroups: [\"\"]\n    resources: [\"persistentvolumes\"]\n    verbs: [\"get\"]\n  - apiGroups: [\"storage.k8s.io\"]\n    resources: [\"volumeattachments\"]\n    verbs: [\"get\", \"watch\", \"list\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $serviceAccountName }}\n  apiGroup: rbac.authorization.k8s.io\n---\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n  name: ceph-rbd-plugin\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"rbd\" \"plugin\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"rbd\" \"plugin\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"plugin\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"rbd\" \"plugin\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-rbd-plugin\" \"containerNames\" (list \"driver-registrar\" \"csi-rbdplugin\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"plugin\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      nodeSelector:\n        {{ .Values.labels.csi_rbd_plugin.node_selector_key }}: {{ .Values.labels.csi_rbd_plugin.node_selector_value }}\n      hostNetwork: true\n      hostPID: true\n      dnsPolicy: {{ .Values.pod.dns_policy }}\n      initContainers:\n{{ tuple $envAll \"rbd_plugin\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: driver-registrar\n{{ tuple $envAll \"csi_registrar\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.rbd_registrar | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"plugin\" \"container\" \"ceph_rbd_registrar\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          args:\n            - \"--v=0\"\n            - \"--csi-address=/csi/csi.sock\"\n            - \"--kubelet-registration-path=/var/lib/kubelet/plugins/$(DEPLOYMENT_NAMESPACE).rbd.csi.ceph.com/csi.sock\"\n          env:\n            - name: DEPLOYMENT_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: KUBE_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n          volumeMounts:\n            - name: socket-dir\n              mountPath: /csi\n            - name: registration-dir\n              mountPath: /registration\n        - name: csi-rbdplugin\n{{ tuple $envAll \"cephcsi\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.rbd_cephcsi | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"plugin\" \"container\" \"ceph_csi_rbd_plugin\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          args:\n            - \"--nodeid=$(NODE_ID)\"\n            - \"--type=rbd\"\n            - \"--nodeserver=true\"\n            - \"--endpoint=$(CSI_ENDPOINT)\"\n            - \"--v=0\"\n            - \"--drivername={{ $envAll.Values.storageclass.csi_rbd.provisioner }}\"\n            - \"--pidlimit=-1\"\n          env:\n            - name: DEPLOYMENT_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: POD_IP\n              valueFrom:\n                fieldRef:\n                  fieldPath: status.podIP\n            - name: NODE_ID\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            - name: CSI_ENDPOINT\n              value: unix:///csi/csi.sock\n          volumeMounts:\n            - name: socket-dir\n              mountPath: /csi\n            - mountPath: /dev\n              name: host-dev\n            - mountPath: /sys\n              name: host-sys\n            - mountPath: /run/mount\n              name: host-mount\n            - mountPath: /lib/modules\n              name: lib-modules\n              readOnly: true\n            - name: ceph-csi-config\n              mountPath: /etc/ceph-csi-config/\n            - name: plugin-dir\n              mountPath: /var/lib/kubelet/plugins\n              mountPropagation: \"Bidirectional\"\n            - name: mountpoint-dir\n              mountPath: /var/lib/kubelet/pods\n              mountPropagation: \"Bidirectional\"\n            - name: keys-tmp-dir\n              mountPath: /tmp/csi/keys\n      volumes:\n        - name: socket-dir\n          hostPath:\n            path: /var/lib/kubelet/plugins/ceph.rbd.csi.ceph.com\n            type: DirectoryOrCreate\n        - name: plugin-dir\n          hostPath:\n            path: /var/lib/kubelet/plugins\n            type: Directory\n        - name: mountpoint-dir\n          hostPath:\n            path: /var/lib/kubelet/pods\n            type: DirectoryOrCreate\n        - name: registration-dir\n          hostPath:\n            path: /var/lib/kubelet/plugins_registry/\n            type: Directory\n        - name: host-dev\n          hostPath:\n            path: /dev\n        - name: host-sys\n          hostPath:\n            path: /sys\n        - name: host-mount\n          hostPath:\n            path: /run/mount\n        - name: lib-modules\n          hostPath:\n            path: /lib/modules\n        - name: ceph-csi-config\n          configMap:\n            name: ceph-csi-config\n        - name: keys-tmp-dir\n          emptyDir: {\n            medium: \"Memory\"\n          }\n{{- end }}\n"
  },
  {
    "path": "ceph-provisioners/templates/deployment-csi-rbd-provisioner.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.deployment_csi_rbd_provisioner .Values.deployment.csi_rbd_provisioner }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := printf \"%s-%s\" .Release.Name \"ceph-rbd-csi-provisioner\" }}\n{{ tuple $envAll \"rbd_provisioner\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups: [\"\"]\n    resources: [\"nodes\"]\n    verbs: [\"get\", \"list\", \"watch\"]\n  - apiGroups: [\"\"]\n    resources: [\"pods\"]\n    verbs: [\"list\", \"watch\"]\n  - apiGroups: [\"\"]\n    resources: [\"secrets\"]\n    verbs: [\"get\", \"list\"]\n  - apiGroups: [\"\"]\n    resources: [\"events\"]\n    verbs: [\"list\", \"watch\", \"create\", \"update\", \"patch\"]\n  - apiGroups: [\"\"]\n    resources: [\"persistentvolumes\"]\n    verbs: [\"get\", \"list\", \"watch\", \"create\", \"update\", \"delete\", \"patch\"]\n  - apiGroups: [\"\"]\n    resources: [\"persistentvolumeclaims\"]\n    verbs: [\"get\", \"list\", \"watch\", \"update\"]\n  - apiGroups: [\"\"]\n    resources: [\"persistentvolumeclaims/status\"]\n    verbs: [\"update\", \"patch\"]\n  - apiGroups: [\"storage.k8s.io\"]\n    resources: [\"storageclasses\"]\n    verbs: [\"get\", \"list\", \"watch\"]\n  - apiGroups: [\"snapshot.storage.k8s.io\"]\n    resources: [\"volumesnapshots\"]\n    verbs: [\"get\", \"list\"]\n  - apiGroups: [\"snapshot.storage.k8s.io\"]\n    resources: [\"volumesnapshotcontents\"]\n    verbs: [\"create\", \"get\", \"list\", \"watch\", \"update\", \"delete\"]\n  - apiGroups: [\"snapshot.storage.k8s.io\"]\n    resources: [\"volumesnapshotclasses\"]\n    verbs: [\"get\", \"list\", \"watch\"]\n  - apiGroups: [\"storage.k8s.io\"]\n    resources: [\"volumeattachments\"]\n    verbs: [\"get\", \"list\", \"watch\", \"update\", \"patch\"]\n  - apiGroups: [\"storage.k8s.io\"]\n    resources: [\"volumeattachments/status\"]\n    verbs: [\"update\", \"patch\"]\n  - apiGroups: [\"storage.k8s.io\"]\n    resources: [\"csinodes\"]\n    verbs: [\"get\", \"list\", \"watch\"]\n  - apiGroups: [\"snapshot.storage.k8s.io\"]\n    resources: [\"volumesnapshotcontents/status\"]\n    verbs: [\"update\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: {{ $serviceAccountName }}-run-rbd-provisioner\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $serviceAccountName }}\n  apiGroup: rbac.authorization.k8s.io\n---\nkind: Role\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  namespace: {{ $envAll.Release.Namespace }}\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups: [\"\"]\n    resources: [\"configmaps\"]\n    verbs: [\"get\", \"list\", \"watch\", \"create\", \"delete\"]\n  - apiGroups: [\"coordination.k8s.io\"]\n    resources: [\"leases\"]\n    verbs: [\"get\", \"watch\", \"list\", \"delete\", \"update\", \"create\"]\n---\nkind: RoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: {{ $serviceAccountName }}\n  namespace: {{ $envAll.Release.Namespace }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\nroleRef:\n  kind: Role\n  name: {{ $serviceAccountName }}\n  apiGroup: rbac.authorization.k8s.io\n---\nkind: Deployment\napiVersion: apps/v1\nmetadata:\n  name: ceph-rbd-csi-provisioner\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"rbd\" \"provisioner\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.csi_rbd_provisioner }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"rbd\" \"provisioner\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"rbd\" \"provisioner\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-rbd-csi-provisioner\" \"containerNames\" (list \"ceph-rbd-provisioner\" \"ceph-rbd-snapshotter\" \"ceph-rbd-attacher\" \"csi-resizer\" \"csi-rbdplugin\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"provisioner\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"rbd\" \"provisioner\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n{{ tuple $envAll \"csi_rbd_provisioner\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n      nodeSelector:\n        {{ .Values.labels.provisioner.node_selector_key }}: {{ .Values.labels.provisioner.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"rbd_provisioner\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: ceph-rbd-provisioner\n{{ tuple $envAll \"csi_provisioner\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.csi_rbd_provisioner | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"provisioner\" \"container\" \"ceph_rbd_provisioner\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: DEPLOYMENT_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: ADDRESS\n              value: unix:///csi/csi-provisioner.sock\n\n          args:\n            - \"--csi-address=$(ADDRESS)\"\n            - \"--v=0\"\n            - \"--timeout=150s\"\n            - \"--retry-interval-start=500ms\"\n            - \"--leader-election-namespace=$(DEPLOYMENT_NAMESPACE)\"\n          volumeMounts:\n            - name: socket-dir\n              mountPath: /csi\n        - name: ceph-rbd-snapshotter\n{{ tuple $envAll \"csi_snapshotter\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.rbd_snapshotter | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"provisioner\" \"container\" \"ceph_rbd_snapshotter\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          args:\n            - \"--csi-address=$(ADDRESS)\"\n            - \"--v=0\"\n            - \"--timeout=150s\"\n            - \"--leader-election=true\"\n            - \"--leader-election-namespace=$(DEPLOYMENT_NAMESPACE)\"\n          env:\n            - name: DEPLOYMENT_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: ADDRESS\n              value: unix:///csi/csi-provisioner.sock\n          volumeMounts:\n            - name: socket-dir\n              mountPath: /csi\n        - name: ceph-rbd-attacher\n{{ tuple $envAll \"csi_attacher\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.rbd_attacher | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"provisioner\" \"container\" \"ceph_rbd_attacher\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          args:\n            - \"--v=0\"\n            - \"--csi-address=$(ADDRESS)\"\n            - \"--leader-election=true\"\n            - \"--retry-interval-start=500ms\"\n            - \"--leader-election-namespace=$(DEPLOYMENT_NAMESPACE)\"\n          env:\n            - name: DEPLOYMENT_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: ADDRESS\n              value: /csi/csi-provisioner.sock\n          volumeMounts:\n            - name: socket-dir\n              mountPath: /csi\n        - name: csi-resizer\n{{ tuple $envAll \"csi_resizer\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.rbd_resizer | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"provisioner\" \"container\" \"ceph_rbd_resizer\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          args:\n            - \"--csi-address=$(ADDRESS)\"\n            - \"--v=0\"\n            - \"--leader-election\"\n            - \"--leader-election-namespace=$(DEPLOYMENT_NAMESPACE)\"\n          env:\n            - name: DEPLOYMENT_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: ADDRESS\n              value: unix:///csi/csi-provisioner.sock\n          volumeMounts:\n            - name: socket-dir\n              mountPath: /csi\n        - name: csi-rbdplugin\n{{ tuple $envAll \"cephcsi\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.rbd_cephcsi | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"provisioner\" \"container\" \"ceph_rbd_cephcsi\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          args:\n            - \"--nodeid=$(NODE_ID)\"\n            - \"--type=rbd\"\n            - \"--controllerserver=true\"\n            - \"--endpoint=$(CSI_ENDPOINT)\"\n            - \"--v=0\"\n            - \"--drivername={{ $envAll.Values.storageclass.csi_rbd.provisioner }}\"\n            - \"--pidlimit=-1\"\n          env:\n            - name: DEPLOYMENT_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: POD_IP\n              valueFrom:\n                fieldRef:\n                  fieldPath: status.podIP\n            - name: NODE_ID\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            - name: CSI_ENDPOINT\n              value: unix:///csi/csi-provisioner.sock\n          volumeMounts:\n            - name: socket-dir\n              mountPath: /csi\n            - mountPath: /dev\n              name: host-dev\n            - mountPath: /sys\n              name: host-sys\n            - mountPath: /lib/modules\n              name: lib-modules\n              readOnly: true\n            - name: ceph-csi-config\n              mountPath: /etc/ceph-csi-config/\n            - name: keys-tmp-dir\n              mountPath: /tmp/csi/keys\n      volumes:\n        - name: host-dev\n          hostPath:\n            path: /dev\n        - name: host-sys\n          hostPath:\n            path: /sys\n        - name: lib-modules\n          hostPath:\n            path: /lib/modules\n        - name: socket-dir\n          emptyDir: {\n            medium: \"Memory\"\n          }\n        - name: ceph-csi-config\n          configMap:\n            name: ceph-csi-config\n        - name: keys-tmp-dir\n          emptyDir: {\n            medium: \"Memory\"\n          }\n{{- end }}\n"
  },
  {
    "path": "ceph-provisioners/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "ceph-provisioners/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"ceph-client-bootstrap\" }}\n{{ tuple $envAll \"bootstrap\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: ceph-client-bootstrap\n  labels:\n{{ tuple $envAll \"ceph\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"bootstrap\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n      containers:\n        - name: ceph-client-bootstrap\n{{ tuple $envAll \"ceph_bootstrap\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"bootstrap\" \"container\" \"ceph_client_bootstrap\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/bootstrap.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-provisioners-bin\n              mountPath: /tmp/bootstrap.sh\n              subPath: bootstrap.sh\n              readOnly: true\n            - name: ceph-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-client-admin-keyring\n              mountPath: /etc/ceph/ceph.client.admin.keyring\n              subPath: ceph.client.admin.keyring\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: ceph-provisioners-bin\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"ceph-prov-bin\" | quote }}\n            defaultMode: 0555\n        - name: ceph-etc\n          configMap:\n{{- if eq .Values.storageclass.csi_rbd.provision_storage_class true }}\n            name: {{ .Values.storageclass.csi_rbd.ceph_configmap_name }}\n{{- else }}\n            name: {{ .Values.storageclass.rbd.ceph_configmap_name }}\n{{- end }}\n            defaultMode: 0444\n        - name: ceph-client-admin-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.admin }}\n{{- end }}\n"
  },
  {
    "path": "ceph-provisioners/templates/job-cephfs-client-key.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_cephfs_client_key .Values.deployment.cephfs_provisioner }}\n{{- $envAll := . }}\n\n{{- $randStringSuffix := randAlphaNum 5 | lower }}\n\n{{- $serviceAccountName := \"ceph-cephfs-client-key-generator\" }}\n{{ tuple $envAll \"cephfs_client_key_generator\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - create\n      - update\n      - patch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceAccountName $randStringSuffix }}\n  namespace: {{ .Values.storageclass.cephfs.parameters.adminSecretNamespace }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - list\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceAccountName $randStringSuffix }}\n  namespace: {{ .Values.storageclass.cephfs.parameters.adminSecretNamespace }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ printf \"%s-%s\" $serviceAccountName $randStringSuffix }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: ceph-cephfs-client-key-generator\n  labels:\n{{ tuple $envAll \"ceph\" \"cephfs-client-key-generator\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"cephfs-client-key-generator\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-cephfs-client-key-generator\" \"containerNames\" (list \"ceph-storage-keys-generator\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"cephfs_client_key_generator\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"cephfs_client_key_generator\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: ceph-storage-keys-generator\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.secret_provisioning | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cephfs_client_key_generator\" \"container\" \"ceph_storage_keys_generator\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: DEPLOYMENT_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: PVC_CEPH_CEPHFS_STORAGECLASS_USER_SECRET_NAME\n              value: {{ .Values.storageclass.cephfs.parameters.adminSecretName }}\n            - name: PVC_CEPH_CEPHFS_STORAGECLASS_ADMIN_SECRET_NAME\n              value: {{ .Values.secrets.keyrings.prov_adminSecretName }}\n            - name: PVC_CEPH_CEPHFS_STORAGECLASS_DEPLOYED_NAMESPACE\n              value: {{ .Values.storageclass.cephfs.parameters.adminSecretNamespace }}\n          command:\n            - /tmp/provisioner-cephfs-client-key-manager.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-provisioners-bin\n              mountPath: /tmp/provisioner-cephfs-client-key-manager.sh\n              subPath: provisioner-cephfs-client-key-manager.sh\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: ceph-provisioners-bin\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"ceph-prov-bin\" | quote }}\n            defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "ceph-provisioners/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"ceph-provisioners\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "ceph-provisioners/templates/job-namespace-client-ceph-config.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_namespace_client_ceph_config .Values.deployment.client_secrets }}\n{{- $envAll := . }}\n\n{{- $randStringSuffix := randAlphaNum 5 | lower }}\n\n{{- $serviceAccountName := print $envAll.Release.Name \"-ceph-ns-ceph-config-generator\" }}\n{{ tuple $envAll \"namespace_client_ceph_config_generator\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - configmaps\n    verbs:\n      - get\n      - create\n      - update\n      - patch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceAccountName $randStringSuffix }}\n{{- if eq .Values.storageclass.csi_rbd.provision_storage_class true }}\n  namespace: {{ .Values.storageclass.csi_rbd.parameters.adminSecretNamespace }}\n{{- else }}\n  namespace: {{ .Values.storageclass.rbd.parameters.adminSecretNamespace }}\n{{- end }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - endpoints\n    verbs:\n      - get\n      - list\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceAccountName $randStringSuffix }}\n{{- if eq .Values.storageclass.csi_rbd.provision_storage_class true }}\n  namespace: {{ .Values.storageclass.csi_rbd.parameters.adminSecretNamespace }}\n{{- else }}\n  namespace: {{ .Values.storageclass.rbd.parameters.adminSecretNamespace }}\n{{- end }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ printf \"%s-%s\" $serviceAccountName $randStringSuffix }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ $serviceAccountName }}\n  labels:\n{{ tuple $envAll \"ceph\" \"client-ceph-config-generator\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"client-ceph-config-generator\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" $serviceAccountName \"containerNames\" (list \"ceph-storage-keys-generator\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"client_ceph_config_generator\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"namespace_client_ceph_config_generator\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name:  ceph-storage-keys-generator\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.secret_provisioning | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"client_ceph_config_generator\" \"container\" \"ceph_storage_keys_generator\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: CEPH_CONF_ETC\n{{- if eq .Values.storageclass.csi_rbd.provision_storage_class true }}\n              value: {{ .Values.storageclass.csi_rbd.ceph_configmap_name }}\n{{- else }}\n              value: {{ .Values.storageclass.rbd.ceph_configmap_name }}\n{{- end }}\n            - name: DEPLOYMENT_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: PVC_CEPH_RBD_STORAGECLASS_DEPLOYED_NAMESPACE\n{{- if eq .Values.storageclass.csi_rbd.provision_storage_class true }}\n              value: {{ .Values.storageclass.csi_rbd.parameters.adminSecretNamespace }}\n{{- else }}\n              value: {{ .Values.storageclass.rbd.parameters.adminSecretNamespace }}\n{{- end }}\n            - name: MON_PORT\n              value: {{ tuple \"ceph_mon\" \"internal\" \"mon\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: MON_PORT_V2\n              value: {{ tuple \"ceph_mon\" \"internal\" \"mon_msgr2\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n\n          command:\n            - /tmp/provisioner-rbd-namespace-client-ceph-config-manager.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-provisioners-bin-clients\n              mountPath: /tmp/provisioner-rbd-namespace-client-ceph-config-manager.sh\n              subPath: provisioner-rbd-namespace-client-ceph-config-manager.sh\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: ceph-provisioners-bin-clients\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"ceph-prov-bin-clients\" | quote }}\n            defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "ceph-provisioners/templates/job-namespace-client-key-cleaner.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_namespace_client_key_cleaner .Values.deployment.client_secrets }}\n{{- $envAll := . }}\n\n{{- $randStringSuffix := randAlphaNum 5 | lower }}\n\n{{- $serviceAccountName := print $envAll.Release.Name \"-ceph-ns-key-cleaner\" }}\n{{ tuple $envAll \"namespace_client_key_cleaner\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - list\n      - delete\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ $serviceAccountName }}\n  labels:\n{{ tuple $envAll \"ceph\" \"client-key-cleaner\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": pre-delete\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"client-key-cleaner\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"client_key_cleaner\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"namespace_client_key_cleaner\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name:  ceph-namespace-client-keys-cleaner\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.secret_provisioning | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"client_key_cleaner\" \"container\" \"ceph_namespace_client_keys_cleaner\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: DEPLOYMENT_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: PVC_CEPH_RBD_STORAGECLASS_USER_SECRET_NAME\n{{- if eq .Values.storageclass.csi_rbd.provision_storage_class true }}\n              value: {{ .Values.storageclass.csi_rbd.parameters.userSecretName }}\n{{- else }}\n              value: {{ .Values.storageclass.rbd.parameters.userSecretName }}\n{{- end }}\n          command:\n            - /tmp/provisioner-rbd-namespace-client-key-cleaner.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-provisioners-bin-clients\n              mountPath: /tmp/provisioner-rbd-namespace-client-key-cleaner.sh\n              subPath: provisioner-rbd-namespace-client-key-cleaner.sh\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: ceph-provisioners-bin-clients\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"ceph-prov-bin-clients\" | quote }}\n            defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "ceph-provisioners/templates/job-namespace-client-key.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_namespace_client_key .Values.deployment.client_secrets }}\n{{- $envAll := . }}\n\n{{- $randStringSuffix := randAlphaNum 5 | lower }}\n\n{{- $serviceAccountName := print $envAll.Release.Name \"-ceph-ns-key-generator\" }}\n{{ tuple $envAll \"namespace_client_key_generator\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - create\n      - update\n      - patch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceAccountName $randStringSuffix }}\n{{- if eq .Values.storageclass.csi_rbd.provision_storage_class true }}\n  namespace: {{ .Values.storageclass.csi_rbd.parameters.adminSecretNamespace }}\n{{- else }}\n  namespace: {{ .Values.storageclass.rbd.parameters.adminSecretNamespace }}\n{{- end }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - list\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceAccountName $randStringSuffix }}\n{{- if eq .Values.storageclass.csi_rbd.provision_storage_class true }}\n  namespace: {{ .Values.storageclass.csi_rbd.parameters.adminSecretNamespace }}\n{{- else }}\n  namespace: {{ .Values.storageclass.rbd.parameters.adminSecretNamespace }}\n{{- end }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ printf \"%s-%s\" $serviceAccountName $randStringSuffix }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ $serviceAccountName }}\n  labels:\n{{ tuple $envAll \"ceph\" \"client-key-generator\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"client-key-generator\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" $serviceAccountName \"containerNames\" (list \"ceph-storage-keys-generator\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"client_key_generator\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"namespace_client_key_generator\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name:  ceph-storage-keys-generator\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.secret_provisioning | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"client_key_generator\" \"container\" \"ceph_storage_keys_generator\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: DEPLOYMENT_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n{{- if eq .Values.ext_ceph_cluster.rook_ceph.connect true }}\n            - name: CONNECT_TO_ROOK_CEPH_CLUSTER\n              value: \"true\"\n{{- end }}\n{{- if eq .Values.storageclass.csi_rbd.provision_storage_class true }}\n            - name: PVC_CEPH_RBD_STORAGECLASS_USER_SECRET_NAME\n              value: {{ .Values.storageclass.csi_rbd.parameters.userSecretName }}\n            - name: PVC_CEPH_RBD_STORAGECLASS_ADMIN_SECRET_NAME\n              value: {{ .Values.storageclass.csi_rbd.parameters.adminSecretName }}\n            - name: PVC_CEPH_RBD_STORAGECLASS_DEPLOYED_NAMESPACE\n              value: {{ .Values.storageclass.csi_rbd.parameters.adminSecretNamespace }}\n{{- else }}\n            - name: PVC_CEPH_RBD_STORAGECLASS_USER_SECRET_NAME\n              value: {{ .Values.storageclass.rbd.parameters.userSecretName }}\n            - name: PVC_CEPH_RBD_STORAGECLASS_ADMIN_SECRET_NAME\n              value: {{ .Values.storageclass.rbd.parameters.adminSecretName }}\n            - name: PVC_CEPH_RBD_STORAGECLASS_DEPLOYED_NAMESPACE\n              value: {{ .Values.storageclass.rbd.parameters.adminSecretNamespace }}\n{{- end }}\n          command:\n            - /tmp/provisioner-rbd-namespace-client-key-manager.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-provisioners-bin-clients\n              mountPath: /tmp/provisioner-rbd-namespace-client-key-manager.sh\n              subPath: provisioner-rbd-namespace-client-key-manager.sh\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: ceph-provisioners-bin-clients\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"ceph-prov-bin-clients\" | quote }}\n            defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "ceph-provisioners/templates/pod-helm-tests.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.deployment.client_secrets .Values.manifests.helm_tests }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := printf \"%s-%s\" $envAll.Release.Name \"test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - ''\n    resources:\n      - persistentvolumes\n      - persistentvolumeclaims\n      - events\n      - pods\n      - pods/log\n      - configmaps\n    verbs:\n      - create\n      - get\n      - delete\n      - list\n  - apiGroups:\n      - storage.k8s.io\n    resources:\n      - storageclasses\n    verbs:\n      - get\n      - list\n      - watch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $serviceAccountName }}\n  apiGroup: rbac.authorization.k8s.io\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: \"{{.Release.Name}}-test\"\n  labels:\n{{ tuple $envAll \"ceph\" \"provisioner-test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n{{ dict \"envAll\" $envAll \"podName\" $serviceAccountName \"containerNames\" (list \"init\" \"ceph-provisioner-helm-test\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n{{ dict \"envAll\" $envAll \"application\" \"test\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 2 }}\n  restartPolicy: Never\n  serviceAccountName: {{ $serviceAccountName }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n  initContainers:\n{{ tuple $envAll \"tests\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n  containers:\n    - name: ceph-provisioner-helm-test\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" \"container\" \"test\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      env:\n        - name: PVC_NAMESPACE\n          value: {{ .Release.Namespace }}\n        - name: RBD_TEST_POD_NAME\n          value: {{ .Values.pod.test_pod.rbd.name }}\n        - name: RBD_TEST_PVC_NAME\n          value: {{ .Values.pod.test_pod.rbd.pvc_name }}\n        - name: CSI_RBD_TEST_POD_NAME\n          value: {{ .Values.pod.test_pod.csi_rbd.name }}\n        - name: CSI_RBD_TEST_PVC_NAME\n          value: {{ .Values.pod.test_pod.csi_rbd.pvc_name }}\n        - name: CEPHFS_TEST_POD_NAME\n          value: {{ .Values.pod.test_pod.cephfs.name }}\n        - name: CEPHFS_TEST_PVC_NAME\n          value: {{ .Values.pod.test_pod.cephfs.pvc_name }}\n        - name: TEST_POD_WAIT_TIMEOUT\n          value: {{ .Values.pod.test_pod.wait_timeout | quote }}\n      command:\n        - /tmp/helm-tests.sh\n      volumeMounts:\n        - name: ceph-provisioners-bin-clients\n          mountPath: /tmp/helm-tests.sh\n          subPath: helm-tests.sh\n          readOnly: true\n        - name: pod-tmp\n          mountPath: /tmp\n  volumes:\n    - name: ceph-provisioners-bin-clients\n      configMap:\n        name: {{ printf \"%s-%s\" $envAll.Release.Name \"ceph-prov-bin-clients\" | quote }}\n        defaultMode: 0555\n    - name: pod-tmp\n      emptyDir: {}\n{{- end }}\n"
  },
  {
    "path": "ceph-provisioners/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "ceph-provisioners/templates/storageclass.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.storageclass (.Values.deployment.ceph) }}\n{{- range $storageclass, $val := .Values.storageclass }}\n{{ dict \"storageclass_data\" $val \"envAll\" $ | include \"helm-toolkit.manifests.ceph-storageclass\" }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "ceph-provisioners/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for ceph-client.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\ndeployment:\n  ceph: true\n  client_secrets: false\n  csi_rbd_provisioner: true\n  # Original rbd_provisioner and cephfs_provisioner are now DEPRECATED. They\n  # will be removed in the next release; CSI provisioners should be used\n  # instead.\n\nrelease_group: null\n\nimages:\n  pull_policy: IfNotPresent\n  tags:\n    ceph_bootstrap: 'quay.io/airshipit/ceph-daemon:ubuntu_jammy_20.2.1-1-20260407'\n    ceph_config_helper: 'quay.io/airshipit/ceph-config-helper:ubuntu_jammy_20.2.1-1-20260407'\n    csi_provisioner: 'registry.k8s.io/sig-storage/csi-provisioner:v4.0.1'\n    csi_snapshotter: 'registry.k8s.io/sig-storage/csi-snapshotter:v7.0.2'\n    csi_attacher: 'registry.k8s.io/sig-storage/csi-attacher:v4.5.1'\n    csi_resizer: 'registry.k8s.io/sig-storage/csi-resizer:v1.10.1'\n    csi_registrar: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1'\n    cephcsi: 'quay.io/cephcsi/cephcsi:v3.11.0'\n    dep_check: 'quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy'\n    image_repo_sync: 'quay.io/airshipit/docker:27.5.0'\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  provisioner:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  csi_rbd_plugin:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\npod:\n  test_pod:\n    wait_timeout: 600\n    rbd:\n      name: rbd-prov-test-pod\n      pvc_name: rbd-prov-test-pvc\n    csi_rbd:\n      name: csi-rbd-prov-test-pod\n      pvc_name: csi-rbd-prov-test-pvc\n    cephfs:\n      name: cephfs-prov-test-pod\n      pvc_name: cephfs-prov-test-pvc\n  security_context:\n    provisioner:\n      pod:\n        runAsUser: 0\n      container:\n        ceph_cephfs_provisioner:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        ceph_rbd_provisioner:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        ceph_rbd_snapshotter:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        ceph_rbd_attacher:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        ceph_rbd_resizer:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        ceph_rbd_cephcsi:\n          privileged: true\n          capabilities:\n            add: [\"SYS_ADMIN\"]\n    plugin:\n      pod:\n        runAsUser: 0\n      container:\n        ceph_rbd_registrar:\n          privileged: true\n          capabilities:\n            add: [\"SYS_ADMIN\"]\n        ceph_csi_rbd_plugin:\n          privileged: true\n          capabilities:\n            add: [\"SYS_ADMIN\"]\n          allowPrivilegeEscalation: true\n    bootstrap:\n      pod:\n        runAsUser: 99\n      container:\n        ceph_client_bootstrap:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    cephfs_client_key_generator:\n      pod:\n        runAsUser: 99\n      container:\n        ceph_storage_keys_generator:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    client_key_cleaner:\n      pod:\n        runAsUser: 99\n      container:\n        ceph_namespace_client_keys_cleaner:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    client_key_generator:\n      pod:\n        runAsUser: 99\n      container:\n        ceph_storage_keys_generator:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    test:\n      pod:\n        runAsUser: 0\n      container:\n        test:\n          readOnlyRootFilesystem: true\n  dns_policy: \"ClusterFirstWithHostNet\"\n  replicas:\n    csi_rbd_provisioner: 2\n  lifecycle:\n    upgrades:\n      deployments:\n        pod_replacement_strategy: Recreate\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        plugin:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  resources:\n    enabled: false\n    rbd_provisioner:\n      requests:\n        memory: \"5Mi\"\n        cpu: \"250m\"\n      limits:\n        memory: \"50Mi\"\n        cpu: \"500m\"\n    csi_rbd_provisioner:\n      requests:\n        memory: \"5Mi\"\n        cpu: \"250m\"\n      limits:\n        memory: \"50Mi\"\n        cpu: \"500m\"\n    cephfs_provisioner:\n      requests:\n        memory: \"5Mi\"\n        cpu: \"250m\"\n      limits:\n        memory: \"50Mi\"\n        cpu: \"500m\"\n    rbd_attacher:\n      requests:\n        memory: \"5Mi\"\n        cpu: \"250m\"\n      limits:\n        memory: \"50Mi\"\n        cpu: \"500m\"\n    rbd_registrar:\n      requests:\n        memory: \"5Mi\"\n        cpu: \"250m\"\n      limits:\n        memory: \"50Mi\"\n        cpu: \"500m\"\n    rbd_resizer:\n      requests:\n        memory: \"5Mi\"\n        cpu: \"250m\"\n      limits:\n        memory: \"50Mi\"\n        cpu: \"500m\"\n    rbd_snapshotter:\n      requests:\n        memory: \"5Mi\"\n        cpu: \"250m\"\n      limits:\n        memory: \"50Mi\"\n        cpu: \"500m\"\n    rbd_cephcsi:\n      requests:\n        memory: \"5Mi\"\n        cpu: \"250m\"\n      limits:\n        memory: \"50Mi\"\n        cpu: \"500m\"\n    jobs:\n      bootstrap:\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n        requests:\n          memory: \"128Mi\"\n          cpu: \"500m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n  tolerations:\n    rbd_provisioner:\n      tolerations:\n      - effect: NoExecute\n        key: node.kubernetes.io/not-ready\n        operator: Exists\n        tolerationSeconds: 60\n      - effect: NoExecute\n        key: node.kubernetes.io/unreachable\n        operator: Exists\n        tolerationSeconds: 60\n    csi_rbd_provisioner:\n      tolerations:\n      - effect: NoExecute\n        key: node.kubernetes.io/not-ready\n        operator: Exists\n        tolerationSeconds: 60\n      - effect: NoExecute\n        key: node.kubernetes.io/unreachable\n        operator: Exists\n        tolerationSeconds: 60\n    cephfs_provisioner:\n      tolerations:\n      - effect: NoExecute\n        key: node.kubernetes.io/not-ready\n        operator: Exists\n        tolerationSeconds: 60\n      - effect: NoExecute\n        key: node.kubernetes.io/unreachable\n        operator: Exists\n        tolerationSeconds: 60\n\nsecrets:\n  keyrings:\n    admin: ceph-client-admin-keyring\n    prov_adminSecretName: pvc-ceph-conf-combined-storageclass\n  oci_image_registry:\n    ceph-provisioners: ceph-provisioners-oci-image-registry-key\n\nnetwork:\n  public: 192.168.0.0/16\n  cluster: 192.168.0.0/16\n\nconf:\n  ceph:\n    global:\n      # auth\n      cephx: true\n      cephx_require_signatures: false\n      cephx_cluster_require_signatures: true\n      cephx_service_require_signatures: false\n      objecter_inflight_op_bytes: \"1073741824\"\n      objecter_inflight_ops: 10240\n      debug_ms: \"0/0\"\n      log_file: /dev/stdout\n      mon_cluster_log_file: /dev/stdout\n    osd:\n      osd_mkfs_type: xfs\n      osd_mkfs_options_xfs: -f -i size=2048\n      osd_max_object_name_len: 256\n      ms_bind_port_min: 6800\n      ms_bind_port_max: 7100\n\next_ceph_cluster:\n  rook_ceph:\n    connect: false\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - ceph-provisioners-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    bootstrap:\n      jobs: null\n      services:\n        - endpoint: internal\n          service: ceph_mon\n    cephfs_client_key_generator:\n      jobs: null\n    cephfs_provisioner:\n      jobs:\n        - ceph-rbd-pool\n      services:\n        - endpoint: internal\n          service: ceph_mon\n    namespace_client_key_cleaner:\n      jobs: null\n    namespace_client_key_generator:\n      jobs: null\n    rbd_provisioner:\n      jobs:\n        - ceph-rbd-pool\n      services:\n        - endpoint: internal\n          service: ceph_mon\n    csi_rbd_provisioner:\n      jobs:\n        - ceph-rbd-pool\n      services:\n        - endpoint: internal\n          service: ceph_mon\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\nbootstrap:\n  enabled: false\n  script: |\n    ceph -s\n    function ensure_pool () {\n      ceph osd pool stats $1 || ceph osd pool create $1 $2\n      if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then\n        ceph osd pool application enable $1 $3\n      fi\n    }\n    #ensure_pool volumes 8 cinder\n\n# if you change provision_storage_class to false\n# it is presumed you manage your own storage\n# class definition externally\n# NOTE(kranthikirang) We iterate over each storageclass parameters\n# and derive the manifest.\nstorageclass:\n  rbd:\n    provision_storage_class: false\n    provisioner: ceph.com/rbd\n    ceph_configmap_name: ceph-etc\n    metadata:\n      name: general-rbd\n    parameters:\n      pool: rbd\n      adminId: admin\n      adminSecretName: pvc-ceph-conf-combined-storageclass\n      adminSecretNamespace: ceph\n      userId: admin\n      userSecretName: pvc-ceph-client-key\n      imageFormat: \"2\"\n      imageFeatures: layering\n  csi_rbd:\n    provision_storage_class: true\n    provisioner: ceph.rbd.csi.ceph.com\n    ceph_configmap_name: ceph-etc\n    metadata:\n      default_storage_class: true\n      name: general\n    parameters:\n      clusterID: ceph\n      csi.storage.k8s.io/controller-expand-secret-name: pvc-ceph-conf-combined-storageclass\n      csi.storage.k8s.io/controller-expand-secret-namespace: ceph\n      csi.storage.k8s.io/fstype: ext4\n      csi.storage.k8s.io/node-stage-secret-name: pvc-ceph-conf-combined-storageclass\n      csi.storage.k8s.io/node-stage-secret-namespace: ceph\n      csi.storage.k8s.io/provisioner-secret-name: pvc-ceph-conf-combined-storageclass\n      csi.storage.k8s.io/provisioner-secret-namespace: ceph\n      imageFeatures: layering\n      imageFormat: \"2\"\n      pool: rbd\n      adminId: admin\n      adminSecretName: pvc-ceph-conf-combined-storageclass\n      adminSecretNamespace: ceph\n      userId: admin\n      userSecretName: pvc-ceph-client-key\n  cephfs:\n    provision_storage_class: false\n    provisioner: ceph.com/cephfs\n    metadata:\n      name: cephfs\n    parameters:\n      adminId: admin\n      adminSecretName: pvc-ceph-cephfs-client-key\n      adminSecretNamespace: ceph\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      ceph-provisioners:\n        username: ceph-provisioners\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  ceph_mon:\n    namespace: null\n    hosts:\n      default: ceph-mon\n      discovery: ceph-mon-discovery\n    host_fqdn_override:\n      default: null\n    port:\n      mon:\n        default: 6789\n      mon_msgr2:\n        default: 3300\n\n\nmanifests:\n  configmap_bin: true\n  configmap_bin_common: true\n  configmap_etc: true\n  # Original rbd_provisioner is now DEPRECATED. It will be removed in the\n  # next release; CSI RBD provisioner should be used instead.\n  deployment_csi_rbd_provisioner: true\n  job_bootstrap: false\n  job_cephfs_client_key: true\n  job_image_repo_sync: true\n  job_namespace_client_key_cleaner: true\n  job_namespace_client_key: true\n  job_namespace_client_ceph_config: true\n  storageclass: true\n  helm_tests: true\n  secret_registry: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "ceph-rgw/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Ceph RadosGW\nname: ceph-rgw\nversion: 2025.2.0\nhome: https://github.com/ceph/ceph\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "ceph-rgw/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "ceph-rgw/templates/bin/_ceph-admin-keyring.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport HOME=/tmp\n\ncat <<EOF > /etc/ceph/ceph.client.admin.keyring\n[client.admin]\n    key = $(cat /tmp/client-keyring)\nEOF\n\nexit 0\n"
  },
  {
    "path": "ceph-rgw/templates/bin/_ceph-rgw-storage-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\nif [ \"x$STORAGE_BACKEND\" == \"xceph-rgw\" ]; then\n  SECRET=$(mktemp --suffix .yaml)\n  KEYRING=$(mktemp --suffix .keyring)\n  function cleanup {\n      rm -f ${SECRET} ${KEYRING}\n  }\n  trap cleanup EXIT\nfi\n\nfunction kube_ceph_keyring_gen () {\n  CEPH_KEY=$1\n  CEPH_KEY_TEMPLATE=$2\n  sed \"s|{{\"{{\"}} key {{\"}}\"}}|${CEPH_KEY}|\" /tmp/ceph-templates/${CEPH_KEY_TEMPLATE} | base64 -w0 | tr -d '\\n'\n}\n\nset -ex\nif [ \"x$STORAGE_BACKEND\" == \"xceph-rgw\" ]; then\n  ceph -s\n  if USERINFO=$(ceph auth get client.bootstrap-rgw); then\n    KEYSTR=$(echo $USERINFO | sed 's/.*\\( key = .*\\) caps mon.*/\\1/')\n    echo $KEYSTR  > ${KEYRING}\n  else\n    #NOTE(Portdirect): Determine proper privs to assign keyring\n    ceph auth get-or-create client.bootstrap-rgw \\\n      mon \"allow profile bootstrap-rgw\" \\\n      -o ${KEYRING}\n  fi\n  FINAL_KEYRING=$(sed -n 's/^[[:blank:]]*key[[:blank:]]\\+=[[:blank:]]\\(.*\\)/\\1/p' ${KEYRING})\n  cat > ${SECRET} <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: \"os-ceph-bootstrap-rgw-keyring\"\ntype: Opaque\ndata:\n ceph.keyring: $( kube_ceph_keyring_gen ${FINAL_KEYRING} \"bootstrap.keyring.rgw\"  )\nEOF\n  kubectl apply --namespace ${NAMESPACE} -f ${SECRET}\n\nfi\n"
  },
  {
    "path": "ceph-rgw/templates/bin/_create-rgw-placement-targets.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -e\n\nfunction create_rgw_placement_target () {\n  echo \"Creating rgw placement target $2\"\n  radosgw-admin zonegroup placement add \\\n    --rgw-zonegroup \"$1\" \\\n    --placement-id \"$2\"\n}\n\nfunction delete_rgw_placement_target () {\n  echo \"Deleting rgw placement target $1\"\n  radosgw-admin zonegroup placement rm --rgw-zonegroup \"$1\" --placement-id \"$2\"\n}\n\nfunction add_rgw_zone_placement () {\n  echo \"Adding rgw zone placement for placement target $2 data pool $3\"\n  radosgw-admin zone placement add \\\n    --rgw-zone \"$1\" \\\n    --placement-id \"$2\" \\\n    --data-pool \"$3\" \\\n    --index-pool \"$4\" \\\n    --data-extra-pool \"$5\"\n}\n\nfunction rm_rgw_zone_placement () {\n  echo \"Removing rgw zone placement for placement target $1\"\n  radosgw-admin zone placement rm --rgw-zone \"$1\" --placement-id \"$2\"\n}\n\n{{- range $i, $placement_target := .Values.conf.rgw_placement_targets }}\nRGW_PLACEMENT_TARGET={{ $placement_target.name | quote }}\nRGW_PLACEMENT_TARGET_DATA_POOL={{ $placement_target.data_pool | quote }}\nRGW_PLACEMENT_TARGET_INDEX_POOL={{ $placement_target.index_pool | default \"default.rgw.buckets.index\" | quote }}\nRGW_PLACEMENT_TARGET_DATA_EXTRA_POOL={{ $placement_target.data_extra_pool | default \"default.rgw.buckets.non-ec\" | quote }}\nRGW_ZONEGROUP={{ $placement_target.zonegroup | default \"default\" | quote }}\nRGW_ZONE={{ $placement_target.zone | default \"default\" | quote }}\nRGW_DELETE_PLACEMENT_TARGET={{ $placement_target.delete | default \"false\" | quote }}\nRGW_PLACEMENT_TARGET_EXISTS=$(radosgw-admin zonegroup placement get --placement-id \"$RGW_PLACEMENT_TARGET\" 2>/dev/null || true)\nif [[ -z \"$RGW_PLACEMENT_TARGET_EXISTS\" ]]; then\n  create_rgw_placement_target \"$RGW_ZONEGROUP\" \"$RGW_PLACEMENT_TARGET\"\n  add_rgw_zone_placement \"$RGW_ZONE\" \"$RGW_PLACEMENT_TARGET\" \"$RGW_PLACEMENT_TARGET_DATA_POOL\" \"$RGW_PLACEMENT_TARGET_INDEX_POOL\" \"$RGW_PLACEMENT_TARGET_DATA_EXTRA_POOL\"\n  RGW_PLACEMENT_TARGET_EXISTS=$(radosgw-admin zonegroup placement get --placement-id \"$RGW_PLACEMENT_TARGET\" 2>/dev/null || true)\nfi\nif [[ -n \"$RGW_PLACEMENT_TARGET_EXISTS\" ]] &&\n   [[ \"true\" == \"$RGW_DELETE_PLACEMENT_TARGET\" ]]; then\n  rm_rgw_zone_placement \"$RGW_ZONE\" \"$RGW_PLACEMENT_TARGET\"\n  delete_rgw_placement_target \"$RGW_ZONEGROUP\" \"$RGW_PLACEMENT_TARGET\"\nfi\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/bin/_helm-tests.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\ntmpdir=$(mktemp -d)\ndeclare -a objects_list\ntotal_objects=10\n\n#NOTE: This function tests keystone based auth. It uses ceph_config_helper\n#container image that has openstack and ceph installed\nfunction rgw_keystone_bucket_validation ()\n{\n  echo \"function: rgw_keystone_bucket_validation\"\n  openstack service list\n\n  bucket_stat=\"$(openstack container list | grep openstack_test_container || true)\"\n  if [[ -n \"${bucket_stat}\" ]]; then\n    echo \"--> deleting openstack_test_container container\"\n    openstack container delete --recursive openstack_test_container\n  fi\n\n  echo \"--> creating openstack_test_container container\"\n  openstack container create 'openstack_test_container'\n\n  echo \"--> list containers\"\n  openstack container list\n\n  bucket_stat=\"$(openstack container list | grep openstack_test_container || true)\"\n  if [[ -z \"${bucket_stat}\" ]]; then\n    echo \"--> container openstack_test_container not found\"\n    exit 1\n  else\n    echo \"--> container openstack_test_container found\"\n\n    for i in $(seq $total_objects); do\n      openstack object create --name \"${objects_list[$i]}\" openstack_test_container \"${objects_list[$i]}\"\n      echo \"--> file ${objects_list[$i]} uploaded to openstack_test_container container\"\n    done\n\n    echo \"--> list contents of openstack_test_container container\"\n    openstack object list openstack_test_container\n\n    for i in $(seq $total_objects); do\n      echo \"--> downloading ${objects_list[$i]} object from openstack_test_container container to ${objects_list[$i]}_object${i} file\"\n      openstack object save --file \"${objects_list[$i]}_object${i}\" openstack_test_container \"${objects_list[$i]}\"\n      check_result $? \"Error during openstack CLI execution\" \"The object downloaded successfully\"\n\n      echo \"--> comparing files: ${objects_list[$i]} and ${objects_list[$i]}_object${i}\"\n      cmp \"${objects_list[$i]}\" \"${objects_list[$i]}_object${i}\"\n      check_result $? \"The files are not equal\" \"The files are equal\"\n\n      echo \"--> deleting ${objects_list[$i]} object from openstack_test_container container\"\n      openstack object delete openstack_test_container \"${objects_list[$i]}\"\n      check_result $? \"Error during openstack CLI execution\" \"The object deleted successfully\"\n    done\n\n    echo \"--> deleting openstack_test_container container\"\n    openstack container delete --recursive openstack_test_container\n\n    echo \"--> bucket list after deleting container\"\n    openstack container list\n  fi\n}\n\n#NOTE: This function tests s3 based auto. It uses ceph_rgw container image which has\n# s3cmd util install\nfunction rgw_s3_bucket_validation ()\n{\n  echo \"function: rgw_s3_bucket_validation\"\n\n  bucket=s3://rgw-test-bucket\n{{- if .Values.manifests.certificates }}\n  params=\"--host=$RGW_HOST --host-bucket=$RGW_HOST --access_key=$S3_ADMIN_ACCESS_KEY --secret_key=$S3_ADMIN_SECRET_KEY --ca-certs=/etc/tls/ca.crt\"\n{{- else }}\n  params=\"--host=$RGW_HOST --host-bucket=$RGW_HOST --access_key=$S3_ADMIN_ACCESS_KEY --secret_key=$S3_ADMIN_SECRET_KEY --no-ssl\"\n{{- end }}\n\n  bucket_stat=\"$(s3cmd ls $params | grep ${bucket} || true)\"\n  if [[ -n \"${bucket_stat}\" ]]; then\n    s3cmd del --recursive --force $bucket $params\n    check_result $? \"Error during s3cmd execution\" \"Bucket is deleted\"\n  fi\n\n  s3cmd mb $bucket $params\n  if [ $? -eq 0 ]; then\n    echo \"Bucket $bucket created\"\n\n    for i in $(seq $total_objects); do\n      s3cmd put \"${objects_list[$i]}\" $bucket $params\n      check_result $? \"Error during s3cmd execution\" \"File ${objects_list[$i]##*/} uploaded to bucket\"\n    done\n\n    s3cmd ls $bucket $params\n    check_result $? \"Error during s3cmd execution\" \"Got list of objects\"\n\n    for i in $(seq $total_objects); do\n      s3cmd get \"${bucket}/${objects_list[$i]##*/}\" -> \"${objects_list[$i]}_s3_object${i}\" $params\n      check_result $? \"Error during s3cmd execution\" \"File ${objects_list[$i]##*/} downloaded from bucket\"\n\n      echo \"Comparing files: ${objects_list[$i]} and ${objects_list[$i]}_s3_object${i}\"\n      cmp \"${objects_list[$i]}\" \"${objects_list[$i]}_s3_object${i}\"\n      check_result $? \"The files are not equal\" \"The files are equal\"\n\n      s3cmd del \"${bucket}/${objects_list[$i]##*/}\" $params\n      check_result $? \"Error during s3cmd execution\" \"File from bucket is deleted\"\n    done\n\n    s3cmd del --recursive --force $bucket $params\n    check_result $? \"Error during s3cmd execution\" \"Bucket is deleted\"\n\n  else\n    echo \"Error during s3cmd execution\"\n    exit 1\n  fi\n}\n\nfunction check_result ()\n{\n  red='\\033[0;31m'\n  green='\\033[0;32m'\n  bw='\\033[0m'\n  if [ \"$1\" -ne 0 ]; then\n    echo -e \"${red}$2${bw}\"\n    exit 1\n  else\n    echo -e \"${green}$3${bw}\"\n  fi\n}\n\nfunction prepare_objects ()\n{\n  echo \"Preparing ${total_objects} files for test\"\n  for i in $(seq $total_objects); do\n    objects_list[$i]=\"$(mktemp -p \"$tmpdir\")\"\n    echo \"${objects_list[$i]}\"\n    dd if=/dev/urandom of=\"${objects_list[$i]}\" bs=1M count=8\n  done\n}\n\nprepare_objects\n\nif [ \"$RGW_TEST_TYPE\" == RGW_KS ];\nthen\n  echo \"--> Keystone is enabled. Calling function to test keystone based auth \"\n  rgw_keystone_bucket_validation\nfi\n\nif [ \"$RGW_TEST_TYPE\" == RGW_S3 ];\nthen\n  echo \"--> S3 is enabled. Calling function to test S3 based auth \"\n  rgw_s3_bucket_validation\nfi\n"
  },
  {
    "path": "ceph-rgw/templates/bin/_init-dirs.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport LC_ALL=C\n: \"${HOSTNAME:=$(uname -n)}\"\n: \"${RGW_NAME:=${HOSTNAME}}\"\n: \"${RGW_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring}\"\n\nfor keyring in ${RGW_BOOTSTRAP_KEYRING}; do\n  mkdir -p \"$(dirname \"$keyring\")\"\ndone\n\n# Let's create the ceph directories\nfor DIRECTORY in radosgw tmp; do\n  mkdir -p \"/var/lib/ceph/${DIRECTORY}\"\ndone\n\n# Create socket directory\nmkdir -p /run/ceph\n\n# Creating rados directories\nmkdir -p \"/var/lib/ceph/radosgw/${RGW_NAME}\"\n\n# Clean the folder\nrm -f \"$(dirname \"${RGW_BOOTSTRAP_KEYRING}\"/*)\"\n\n# Adjust the owner of all those directories\nchown -R ceph. /run/ceph/ /var/lib/ceph/*\n"
  },
  {
    "path": "ceph-rgw/templates/bin/_rgw-restart.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nexport LC_ALL=C\nTIMEOUT=\"{{ .Values.conf.rgw_restart.timeout | default 600 }}s\"\n\nkubectl rollout restart deployment ceph-rgw\nkubectl rollout status --timeout=${TIMEOUT} deployment ceph-rgw\n\nif [ \"$?\" -ne 0 ]; then\n  echo \"Ceph rgw deployment was not able to restart in ${TIMEOUT}\"\nfi\n"
  },
  {
    "path": "ceph-rgw/templates/bin/rgw/_init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport LC_ALL=C\n\n: \"${CEPH_CONF:=\"/etc/ceph/${CLUSTER}.conf\"}\"\n: \"${EP:=ceph-mon-discovery}\"\n{{- if empty .Values.endpoints.ceph_mon.namespace -}}\nMON_NS=ceph\n{{ else }}\nMON_NS={{ .Values.endpoints.ceph_mon.namespace }}\n{{- end }}\n\n{{ include \"helm-toolkit.snippets.mon_host_from_k8s_ep\" . }}\n\nif [[ ! -e ${CEPH_CONF}.template ]]; then\n  echo \"ERROR- ${CEPH_CONF}.template must exist.\"\n  exit 1\nfi\n\nENDPOINT=$(mon_host_from_k8s_ep \"${MON_NS}\" \"${EP}\")\n\nif [[ -z \"${ENDPOINT}\" ]]; then\n   /bin/sh -c -e \"cat ${CEPH_CONF}.template | tee ${CEPH_CONF}\" || true\nelse\n   /bin/sh -c -e \"cat ${CEPH_CONF}.template | sed 's#mon_host.*#mon_host = ${ENDPOINT}#g' | tee ${CEPH_CONF}\" || true\nfi\n\ncat >> ${CEPH_CONF} <<EOF\n\n[client.rgw.$(hostname -s)]\n{{ range $key, $value := .Values.conf.rgw.config -}}\n{{- if kindIs \"slice\" $value -}}\n{{ $key }} = {{ include \"helm-toolkit.joinListWithComma\" $value | quote }}\n{{ else -}}\n{{ $key }} = {{ $value | quote  }}\n{{ end -}}\n{{- end -}}\n{{- if .Values.conf.rgw_ks.enabled }}\n{{- if .Values.manifests.certificates }}\nrgw_frontends = \"beast ssl_port=${RGW_FRONTEND_PORT} ssl_certificate=/etc/tls/tls.crt ssl_private_key=/etc/tls/tls.key\"\n{{- else }}\nrgw_frontends = \"beast port=${RGW_FRONTEND_PORT}\"\n{{- end }}\nrgw_keystone_url = \"${KEYSTONE_URL}\"\nrgw_keystone_admin_user = \"${OS_USERNAME}\"\nrgw_keystone_admin_password = \"${OS_PASSWORD}\"\nrgw_keystone_admin_project = \"${OS_PROJECT_NAME}\"\nrgw_keystone_admin_domain = \"${OS_USER_DOMAIN_NAME}\"\n{{ range $key, $value := .Values.conf.rgw_ks.config -}}\n{{- if kindIs \"slice\" $value -}}\n{{ $key }} = {{ include \"helm-toolkit.joinListWithComma\" $value | quote }}\n{{ else -}}\n{{ $key }} = {{ $value | quote  }}\n{{ end -}}\n{{- end -}}\n{{ end }}\n{{- if .Values.conf.rgw_s3.enabled }}\n{{- if .Values.manifests.certificates }}\nrgw_frontends = \"beast ssl_port=${RGW_FRONTEND_PORT} ssl_certificate=/etc/tls/tls.crt ssl_private_key=/etc/tls/tls.key\"\n{{- else }}\nrgw_frontends = \"beast port=${RGW_FRONTEND_PORT}\"\n{{- end }}\n{{ range $key, $value := .Values.conf.rgw_s3.config -}}\n{{- if kindIs \"slice\" $value -}}\n{{ $key }} = {{ include \"helm-toolkit.joinListWithComma\" $value | quote }}\n{{ else -}}\n{{ $key }} = {{ $value | quote  }}\n{{ end -}}\n{{- end -}}\n{{ end }}\nEOF\n"
  },
  {
    "path": "ceph-rgw/templates/bin/rgw/_rerun-pool-job.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n# Get the Ceph cluster namespace, assuming \"ceph\" if not defined\n{{- if empty .Values.endpoints.ceph_mon.namespace -}}\nCEPH_NS=ceph\n{{ else }}\nCEPH_NS={{ .Values.endpoints.ceph_mon.namespace }}\n{{- end }}\n\n# If the ceph-rbd pool job exists, delete it and re-create it\n# NOTE: This check is currently required to handle the Rook case properly.\n#       Other charts still deploy ceph-rgw outside of Rook, and Rook does not\n#       have a ceph-rbd-pool job to re-run.\nif [[ -n \"$(kubectl -n ${CEPH_NS} get jobs | grep ceph-rbd-pool)\" ]]\nthen\n  kubectl -n ${CEPH_NS} get job ceph-rbd-pool -o json > /tmp/ceph-rbd-pool.json\n  kubectl -n ${CEPH_NS} delete job ceph-rbd-pool\n  jq 'del(.spec.selector) |\n      del(.spec.template.metadata.creationTimestamp) |\n      del(.spec.template.metadata.labels) |\n      del(.metadata.creationTimestamp) |\n      del(.metadata.uid) |\n      del(.status)' /tmp/ceph-rbd-pool.json | \\\n  kubectl create -f -\n\n  while [[ -z \"$(kubectl -n ${CEPH_NS} get pods | grep ceph-rbd-pool | grep Completed)\" ]]\n  do\n    sleep 5\n  done\nfi\n"
  },
  {
    "path": "ceph-rgw/templates/bin/rgw/_start.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport LC_ALL=C\n: \"${CEPH_GET_ADMIN_KEY:=0}\"\n: \"${RGW_NAME:=$(uname -n)}\"\n: \"${RGW_ZONEGROUP:=}\"\n: \"${RGW_ZONE:=}\"\n: \"${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}\"\n: \"${RGW_KEYRING:=/var/lib/ceph/radosgw/${RGW_NAME}/keyring}\"\n: \"${RGW_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring}\"\n\nif [[ ! -e \"/etc/ceph/${CLUSTER}.conf\" ]]; then\n  echo \"ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon\"\n  exit 1\nfi\n\nif [ \"${CEPH_GET_ADMIN_KEY}\" -eq 1 ]; then\n  if [[ ! -e \"${ADMIN_KEYRING}\" ]]; then\n      echo \"ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon\"\n      exit 1\n  fi\nfi\n\n# Check to see if our RGW has been initialized\nif [ ! -e \"${RGW_KEYRING}\" ]; then\n\n  if [ ! -e \"${RGW_BOOTSTRAP_KEYRING}\" ]; then\n    echo \"ERROR- ${RGW_BOOTSTRAP_KEYRING} must exist. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-rgw -o ${RGW_BOOTSTRAP_KEYRING}'\"\n    exit 1\n  fi\n\n  timeout 10 ceph --cluster \"${CLUSTER}\" --name \"client.bootstrap-rgw\" --keyring \"${RGW_BOOTSTRAP_KEYRING}\" health || exit 1\n\n  # Generate the RGW key\n  ceph --cluster \"${CLUSTER}\" --name \"client.bootstrap-rgw\" --keyring \"${RGW_BOOTSTRAP_KEYRING}\" auth get-or-create \"client.rgw.${RGW_NAME}\" osd 'allow rwx' mon 'allow rw' -o \"${RGW_KEYRING}\"\n  chown ceph. \"${RGW_KEYRING}\"\n  chmod 0600 \"${RGW_KEYRING}\"\nfi\n\n/usr/bin/radosgw \\\n  --cluster \"${CLUSTER}\" \\\n  --setuser \"ceph\" \\\n  --setgroup \"ceph\" \\\n  -d \\\n  -n \"client.rgw.${RGW_NAME}\" \\\n  -k \"${RGW_KEYRING}\" \\\n  --rgw-socket-path=\"\" \\\n  --rgw-zonegroup=\"${RGW_ZONEGROUP}\" \\\n  --rgw-zone=\"${RGW_ZONE}\"\n"
  },
  {
    "path": "ceph-rgw/templates/bin/utils/_checkDNS.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n: \"${CEPH_CONF:=\"/etc/ceph/${CLUSTER}.conf\"}\"\nENDPOINT=\"{$1}\"\n\nfunction check_mon_dns () {\n  GREP_CMD=$(grep -rl 'ceph-mon' ${CEPH_CONF})\n\n  if [[ \"${ENDPOINT}\" == \"{up}\" ]]; then\n    echo \"If DNS is working, we are good here\"\n  elif [[ \"${ENDPOINT}\" != \"\" ]]; then\n    if [[ ${GREP_CMD} != \"\" ]]; then\n      # No DNS, write CEPH MONs IPs into ${CEPH_CONF}\n      sh -c -e \"cat ${CEPH_CONF}.template | sed 's/mon_host.*/mon_host = ${ENDPOINT}/g' | tee ${CEPH_CONF}\" > /dev/null 2>&1\n    else\n      echo \"endpoints are already cached in ${CEPH_CONF}\"\n      exit\n    fi\n  fi\n}\n\ncheck_mon_dns\n\nexit\n"
  },
  {
    "path": "ceph-rgw/templates/certificates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{ $object_store_name := \"object_store\" }}\n{{- if .Values.conf.rgw_s3.enabled }}\n{{ $object_store_name = \"ceph_object_store\" }}\n{{- end }}\n{{- if .Values.manifests.certificates }}\n{{ dict \"envAll\" . \"service\" $object_store_name \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/configmap-bin-ks.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.configmap_bin_ks .Values.conf.rgw_ks.enabled }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: ceph-rgw-bin-ks\ndata:\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.configmap_bin .Values.deployment.ceph }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: ceph-rgw-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  rgw-restart.sh: |\n{{ tuple \"bin/_rgw-restart.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  init-dirs.sh: |\n{{ tuple \"bin/_init-dirs.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n\n  rgw-start.sh: |\n{{ tuple \"bin/rgw/_start.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  rgw-init.sh: |\n{{ tuple \"bin/rgw/_init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  rerun-pool-job.sh: |\n{{ tuple \"bin/rgw/_rerun-pool-job.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  storage-init.sh: |\n{{ tuple \"bin/_ceph-rgw-storage-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ceph-admin-keyring.sh: |\n{{ tuple \"bin/_ceph-admin-keyring.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  rgw-s3-admin.sh: |\n{{- include \"helm-toolkit.scripts.create_s3_user\" . | indent 4 }}\n  create-rgw-placement-targets.sh: |\n{{ tuple \"bin/_create-rgw-placement-targets.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  helm-tests.sh: |\n{{ tuple \"bin/_helm-tests.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  utils-checkDNS.sh: |\n{{ tuple \"bin/utils/_checkDNS.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/configmap-ceph-rgw-templates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.configmap_ceph_templates .Values.manifests.job_ceph_rgw_storage_init }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.Release.Name \"ceph-templates\" | quote }}\ndata:\n  bootstrap.keyring.rgw: |\n{{ .Values.conf.templates.keyring.bootstrap.rgw | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/configmap-etc-client.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"ceph.configmap.etc\" }}\n{{- $configMapName := index . 0 }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n\n{{- if or (.Values.deployment.ceph) (.Values.deployment.client_secrets) }}\n\n{{- if empty .Values.conf.ceph.global.mon_host -}}\n{{- $monHost := tuple \"ceph_mon\" \"internal\" \"mon_msgr2\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n{{- $_ := $monHost | set .Values.conf.ceph.global \"mon_host\" -}}\n{{- end -}}\n\n\n{{- if empty .Values.conf.ceph.osd.cluster_network -}}\n{{- $_ := .Values.network.cluster | set .Values.conf.ceph.osd \"cluster_network\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ceph.osd.public_network -}}\n{{- $_ := .Values.network.public | set .Values.conf.ceph.osd \"public_network\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.rgw_ks.config.rgw_swift_url -}}\n{{- $_ := tuple \"object_store\" \"public\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | trimSuffix .Values.endpoints.object_store.path.default | set .Values.conf.rgw_ks.config \"rgw_swift_url\" -}}\n{{- end -}}\n\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ $configMapName }}\ndata:\n  ceph.conf: |\n{{ include \"helm-toolkit.utils.to_ini\" .Values.conf.ceph | indent 4 }}\n\n{{- end }}\n{{- end }}\n{{- end }}\n{{- if .Values.manifests.configmap_etc }}\n{{- list \"ceph-rgw-etc\" . | include \"ceph.configmap.etc\" }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/deployment-rgw.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"readinessProbeTemplate\" }}\n{{- $object_store_name := \"object_store\" }}\n{{- if .Values.conf.rgw_s3.enabled }}\n{{ $object_store_name = \"ceph_object_store\" }}\n{{- end }}\nhttpGet:\n  path: /\n  port: {{ tuple $object_store_name \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  scheme: {{ tuple $object_store_name \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n{{- end }}\n\n{{- define \"livenessProbeTemplate\" }}\n{{- $object_store_name := \"object_store\" }}\n{{- if .Values.conf.rgw_s3.enabled }}\n{{ $object_store_name = \"ceph_object_store\" }}\n{{- end }}\nhttpGet:\n  path: /\n  port: {{ tuple $object_store_name \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  scheme: {{ tuple $object_store_name \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n{{- end }}\n\n{{- if and .Values.manifests.deployment_rgw ( and .Values.deployment.ceph .Values.conf.features.rgw ) }}\n{{- $envAll := . }}\n\n{{ $object_store_name := \"object_store\" }}\n{{ $tls_secret := .Values.secrets.tls.object_store.api.internal | quote }}\n{{- if .Values.conf.rgw_s3.enabled }}\n{{ $object_store_name = \"ceph_object_store\" }}\n{{ $tls_secret = .Values.secrets.tls.ceph_object_store.api.internal | quote }}\n{{- end }}\n\n{{- $serviceAccountName := \"ceph-rgw\" }}\n{{- $checkDnsServiceAccountName := \"ceph-checkdns\" }}\n\n{{- $_ := set $envAll.Values \"__depParams\" ( list ) }}\n{{- if .Values.conf.rgw_ks.enabled -}}\n{{- $__updateDepParams := append $envAll.Values.__depParams \"keystone\" -}}\n{{- $_ := set $envAll.Values \"__depParams\" $__updateDepParams -}}\n{{- end -}}\n{{- if .Values.conf.rgw_s3.enabled -}}\n{{- $__updateDepParams := append $envAll.Values.__depParams \"s3\" -}}\n{{- $_ := set $envAll.Values \"__depParams\" $__updateDepParams -}}\n{{- end -}}\n{{- $dependencyOpts := dict \"envAll\" $envAll \"dependencyMixinParam\" $envAll.Values.__depParams \"dependencyKey\" \"rgw\" -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_resolver\" $dependencyOpts | toString | fromYaml }}\n{{ tuple $envAll \"pod_dependency\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceAccountName $envAll.Release.Namespace }}\n  namespace: {{ .Values.endpoints.ceph_mon.namespace }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - endpoints\n    verbs:\n      - get\n      - list\n      - watch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceAccountName $envAll.Release.Namespace }}\n  namespace: {{ .Values.endpoints.ceph_mon.namespace }}\nroleRef:\n  kind: Role\n  name: {{ printf \"%s-%s\" $serviceAccountName $envAll.Release.Namespace }}\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\n# This role bindig refers to the ClusterRole for\n# check-dns deployment.\n# See: openstack-helm/ceph-client/deployment-checkdns.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ printf \"%s-from-%s-to-%s\" $checkDnsServiceAccountName $envAll.Values.endpoints.ceph_mon.namespace $envAll.Release.Namespace }}\n  namespace: {{ $envAll.Release.Namespace }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: clusterrole-checkdns\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $checkDnsServiceAccountName }}\n    namespace:  {{ .Values.endpoints.ceph_mon.namespace }}\n---\nkind: Deployment\napiVersion: apps/v1\nmetadata:\n  name: ceph-rgw\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"ceph\" \"rgw\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.rgw }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"ceph\" \"rgw\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"rgw\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-client-hash: {{ tuple \"configmap-etc-client.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        secret-keystone-rgw-hash: {{ tuple \"secret-keystone-rgw.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-rgw\" \"containerNames\" (list \"init\" \"ceph-rgw\" \"ceph-init-dirs\" \"ceph-rgw-init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"rgw\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"ceph\" \"rgw\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n{{ tuple $envAll \"rgw\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n      nodeSelector:\n        {{ .Values.labels.rgw.node_selector_key }}: {{ .Values.labels.rgw.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"pod_dependency\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: ceph-init-dirs\n{{ tuple $envAll \"ceph_rgw\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"rgw\" \"container\" \"init_dirs\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/init-dirs.sh\n          env:\n            - name: CLUSTER\n              value: \"ceph\"\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-run\n              mountPath: /run\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-rgw-bin\n              mountPath: /tmp/init-dirs.sh\n              subPath: init-dirs.sh\n              readOnly: true\n            - name: pod-var-lib-ceph\n              mountPath: /var/lib/ceph\n              readOnly: false\n        - name: ceph-rgw-init\n{{ tuple $envAll \"ceph_rgw\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.rgw | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"rgw\" \"container\" \"rgw_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: CLUSTER\n              value: \"ceph\"\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n{{ if .Values.conf.rgw_ks.enabled }}\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.user_rgw \"useCA\" .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n            - name: KEYSTONE_URL\n              value: {{ tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | trimSuffix .Values.endpoints.identity.path.default | quote }}\n{{ end }}\n            - name: RGW_FRONTEND_PORT\n              value: \"{{ tuple $object_store_name \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\"\n          command:\n            - /tmp/rgw-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-run\n              mountPath: /run\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-rgw-bin\n              mountPath: /tmp/rgw-init.sh\n              subPath: rgw-init.sh\n              readOnly: true\n            - name: ceph-rgw-etc\n              mountPath: /etc/ceph/ceph.conf.template\n              subPath: ceph.conf\n              readOnly: true\n{{- if .Values.conf.rgw_ks.enabled }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.object_store.api.keystone | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- end }}\n      containers:\n        - name: ceph-rgw\n{{ tuple $envAll \"ceph_rgw\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.rgw | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"rgw\" \"container\" \"rgw\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: CLUSTER\n              value: \"ceph\"\n            - name: RGW_FRONTEND_PORT\n              value: \"{{ tuple $object_store_name \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\"\n          command:\n            - /tmp/rgw-start.sh\n          ports:\n            - containerPort: {{ tuple $object_store_name \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{ dict \"envAll\" . \"component\" \"api\" \"container\" \"ceph-rgw\" \"type\" \"liveness\" \"probeTemplate\" (include \"livenessProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | trim | indent 10 }}\n{{ dict \"envAll\" . \"component\" \"api\" \"container\" \"ceph-rgw\" \"type\" \"readiness\" \"probeTemplate\" (include \"readinessProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | trim | indent 10 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-run\n              mountPath: /run\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-rgw-bin\n              mountPath: /tmp/rgw-start.sh\n              subPath: rgw-start.sh\n              readOnly: true\n            - name: ceph-rgw-bin\n              mountPath: /tmp/utils-checkDNS.sh\n              subPath: utils-checkDNS.sh\n              readOnly: true\n            - name: ceph-rgw-etc\n              mountPath: /etc/ceph/ceph.conf.template\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-bootstrap-rgw-keyring\n              mountPath: /var/lib/ceph/bootstrap-rgw/ceph.keyring\n              subPath: ceph.keyring\n              readOnly: false\n            - name: pod-var-lib-ceph\n              mountPath: /var/lib/ceph\n              readOnly: false\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" $tls_secret \"path\" \"/etc/tls\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-run\n          emptyDir:\n            medium: \"Memory\"\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: ceph-rgw-bin\n          configMap:\n            name: ceph-rgw-bin\n            defaultMode: 0555\n        - name: ceph-rgw-etc\n          configMap:\n            name: ceph-rgw-etc\n            defaultMode: 0444\n        - name: pod-var-lib-ceph\n          emptyDir: {}\n        - name: ceph-bootstrap-rgw-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.rgw }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" $tls_secret | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- if .Values.conf.rgw_ks.enabled }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.object_store.api.keystone | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "ceph-rgw/templates/ingress-rgw.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_rgw ( and .Values.deployment.ceph (and .Values.network.api.ingress.public .Values.conf.features.rgw) ) }}\n{{- $ingressOpts := dict \"envAll\" . \"backendServiceType\" \"object_store\"  \"backendPort\" \"ceph-rgw\" -}}\n{{- if .Values.manifests.certificates }}\n{{- if .Values.conf.rgw_ks.enabled }}\n{{- $ingressOpts = dict \"envAll\" . \"backendServiceType\" \"object_store\" \"backendPort\" \"ceph-rgw\" \"certIssuer\" .Values.endpoints.object_store.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end }}\n{{- if .Values.conf.rgw_s3.enabled }}\n{{- $ingressOpts = dict \"envAll\" . \"backendServiceType\" \"ceph_object_store\" \"backendPort\" \"ceph-rgw\" \"certIssuer\" .Values.endpoints.ceph_object_store.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end }}\n{{- end }}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"ceph-rgw-bootstrap\" }}\n{{ tuple $envAll \"bootstrap\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - create\n      - update\n      - patch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: ceph-rgw-bootstrap\n  labels:\n{{ tuple $envAll \"ceph\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-rgw-bootstrap\" \"containerNames\" (list \"ceph-keyring-placement\" \"init\" \"ceph-rgw-bootstrap\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"bootstrap\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n        - name: ceph-keyring-placement\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"bootstrap\" \"container\" \"keyring_placement\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/ceph-admin-keyring.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-rgw-bin\n              mountPath: /tmp/ceph-admin-keyring.sh\n              subPath: ceph-admin-keyring.sh\n              readOnly: true\n            - name: ceph-rgw-admin-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n      containers:\n        - name: ceph-rgw-bootstrap\n{{ tuple $envAll \"ceph_bootstrap\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"bootstrap\" \"container\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/bootstrap.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-rgw-bin\n              mountPath: /tmp/bootstrap.sh\n              subPath: bootstrap.sh\n              readOnly: true\n            - name: ceph-rgw-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-rgw-admin-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: ceph-rgw-bin\n          configMap:\n            name: ceph-rgw-bin\n            defaultMode: 0555\n        - name: ceph-rgw-etc\n          configMap:\n            name: {{ .Values.ceph_client.configmap }}\n            defaultMode: 0444\n        - name: ceph-rgw-admin-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.admin | quote }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"ceph-rgw\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_ks_endpoints .Values.conf.rgw_ks.enabled }}\n{{- $ksServiceJob := dict \"envAll\" . \"configMapBin\" \"ceph-rgw-bin-ks\" \"serviceName\" \"ceph\" \"serviceTypes\" ( tuple \"object-store\" ) -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.object_store.api.internal -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_ks_service .Values.conf.rgw_ks.enabled }}\n{{- $ksServiceJob := dict \"envAll\" . \"configMapBin\" \"ceph-rgw-bin-ks\" \"serviceName\" \"ceph\" \"serviceTypes\" ( tuple \"object-store\" ) -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.object_store.api.internal -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_ks_user .Values.conf.rgw_ks.enabled }}\n{{- $ksUserJob := dict \"envAll\" . \"configMapBin\" \"ceph-rgw-bin-ks\" \"serviceName\" \"ceph\" \"serviceUser\" \"swift\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $ksUserJob \"tlsSecret\" .Values.secrets.tls.object_store.api.internal -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/job-rgw-placement-targets.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_rgw_placement_targets .Values.conf.features.rgw }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"rgw-placement-targets\" }}\n{{ tuple $envAll \"rgw_placement_targets\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - create\n      - update\n      - patch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: ceph-rgw-placement-targets\n  labels:\n{{ tuple $envAll \"ceph\" \"rgw-placement-targets\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"rgw-placement-targets\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-rgw-placement-targets\" \"containerNames\" (list \"ceph-keyring-placement\" \"init\" \"create-rgw-placement-targets\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"rgw_placement_targets\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"rgw_placement_targets\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: ceph-keyring-placement\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"rgw_placement_targets\" \"container\" \"keyring_placement\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/ceph-admin-keyring.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-rgw-bin\n              mountPath: /tmp/ceph-admin-keyring.sh\n              subPath: ceph-admin-keyring.sh\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n      containers:\n        - name: create-rgw-placement-targets\n          image: {{ .Values.images.tags.rgw_placement_targets }}\n          imagePullPolicy: {{ .Values.images.pull_policy }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.rgw_placement_targets | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"rgw_placement_targets\" \"container\" \"create_rgw_placement_targets\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/create-rgw-placement-targets.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-rgw-bin\n              mountPath: /tmp/create-rgw-placement-targets.sh\n              subPath: create-rgw-placement-targets.sh\n              readOnly: true\n            - name: ceph-rgw-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: ceph-rgw-bin\n          configMap:\n            name: ceph-rgw-bin\n            defaultMode: 0555\n        - name: ceph-rgw-etc\n          configMap:\n            name: ceph-rgw-etc\n            defaultMode: 0444\n        - name: ceph-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.admin | quote }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/job-rgw-pool.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n# This job is required for Reef and later because Ceph now disallows the\n# creation of internal pools (pools names beginning with a \".\") and the\n# ceph-rbd-pool job therefore can't configure them if they don't yet exist.\n# This job simply deletes and re-creates the ceph-rbd-pool job after deploying\n# ceph-rgw so it can apply the correct configuration to the .rgw.root pool.\n\n{{- if and .Values.manifests.job_rgw_pool .Values.deployment.ceph }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"ceph-rgw-pool\" }}\n{{ tuple $envAll \"rgw_pool\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: {{ $serviceAccountName }}-{{ $envAll.Release.Namespace }}\nrules:\n  - apiGroups:\n      - ''\n    resources:\n      - pods\n      - jobs\n    verbs:\n      - create\n      - get\n      - delete\n      - list\n  - apiGroups:\n      - 'batch'\n    resources:\n      - jobs\n    verbs:\n      - create\n      - get\n      - delete\n      - list\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}-{{ $envAll.Release.Namespace }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $serviceAccountName }}-{{ $envAll.Release.Namespace }}\n  apiGroup: rbac.authorization.k8s.io\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: ceph-rgw-pool\n  labels:\n{{ tuple $envAll \"ceph\" \"rbd-pool\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      name: ceph-rgw-pool\n      labels:\n{{ tuple $envAll \"ceph\" \"rbd-pool\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-rgw-pool\" \"containerNames\" (list \"ceph-rgw-pool\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"rgw_pool\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: {{ $envAll.Values.jobs.rgw_pool.restartPolicy | quote }}\n      affinity:\n{{ tuple $envAll \"ceph\" \"rbd-pool\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"rgw_pool\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: ceph-rgw-pool\n{{ tuple $envAll \"ceph_rgw_pool\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.rgw_pool | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"rgw_pool\" \"container\" \"rgw_pool\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/rerun-pool-job.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ceph-rgw-bin\n              mountPath: /tmp/rerun-pool-job.sh\n              subPath: rerun-pool-job.sh\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: ceph-rgw-bin\n          configMap:\n            name: ceph-rgw-bin\n            defaultMode: 0555\n        - name: pod-run\n          emptyDir:\n            medium: \"Memory\"\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/job-rgw-restart.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_rgw_restart }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := printf \"%s-%s\" .Release.Name \"rgw-restart\" }}\n{{ tuple $envAll \"rgw_restart\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - 'apps'\n    resources:\n      - deployments\n    verbs:\n      - get\n      - list\n      - update\n      - patch\n      - watch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $serviceAccountName }}\n  apiGroup: rbac.authorization.k8s.io\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: ceph-rgw-restart\n  labels:\n{{ tuple $envAll \"ceph\" \"rgw-restart\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"rgw-restart\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-rgw-restart\" \"containerNames\" (list \"init\" \"ceph-rgw-restart\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"rgw_restart\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"rgw_restart\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n      containers:\n        - name: ceph-rgw-restart\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.rgw_restart | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"rgw_restart\" \"container\" \"ceph-rgw-restart\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/rgw-restart.sh\n          volumeMounts:\n            - name: ceph-rgw-bin\n              mountPath: /tmp/rgw-restart.sh\n              subPath: rgw-restart.sh\n              readOnly: true\n      volumes:\n        - name: ceph-rgw-bin\n          configMap:\n            name: ceph-rgw-bin\n            defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/job-rgw-storage-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_ceph_rgw_storage_init .Values.deployment.ceph }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"ceph-rgw-storage-init\" }}\n{{ tuple $envAll \"rgw_storage_init\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - create\n      - update\n      - patch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: ceph-rgw-storage-init\n  labels:\n{{ tuple $envAll \"ceph\" \"rgw-storage-init\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"rgw-storage-init\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-rgw-storage-init\" \"containerNames\" (list \"ceph-keyring-placement\" \"init\" \"ceph-rgw-storage-init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"rgw_storage_init\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"rgw_storage_init\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: ceph-keyring-placement\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"rgw_storage_init\" \"container\" \"keyring_placement\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/ceph-admin-keyring.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-rgw-bin\n              mountPath: /tmp/ceph-admin-keyring.sh\n              subPath: ceph-admin-keyring.sh\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n      containers:\n        - name: ceph-rgw-storage-init\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.rgw_storage_init | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"rgw_storage_init\" \"container\" \"rgw_storage_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: STORAGE_BACKEND\n              value: \"ceph-rgw\"\n          command:\n            - /tmp/storage-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-rgw-bin\n              mountPath: /tmp/storage-init.sh\n              subPath: storage-init.sh\n              readOnly: true\n            - name: ceph-templates\n              mountPath: /tmp/ceph-templates\n              readOnly: true\n            - name: ceph-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: ceph-rgw-bin\n          configMap:\n            name: ceph-rgw-bin\n            defaultMode: 0555\n        - name: ceph-etc\n          configMap:\n            name: {{ .Values.ceph_client.configmap }}\n            defaultMode: 0444\n        - name: ceph-templates\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"ceph-templates\" | quote }}\n            defaultMode: 0444\n        - name: ceph-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.admin | quote }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/job-s3-admin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_s3_admin ( and .Values.conf.features.rgw .Values.conf.rgw_s3.enabled ) }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"rgw-s3-admin\" }}\n{{ tuple $envAll \"rgw_s3_admin\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $s3AdminSecret := .Values.secrets.rgw_s3.admin }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - create\n      - update\n      - patch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: ceph-rgw-s3-admin\n  labels:\n{{ tuple $envAll \"ceph\" \"rgw-s3-admin\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ceph\" \"rgw-s3-admin\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-rgw-s3-admin\" \"containerNames\" (list \"ceph-keyring-placement\" \"init\" \"create-s3-admin\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"rgw_s3_admin\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"rgw_s3_admin\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: ceph-keyring-placement\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"rgw_s3_admin\" \"container\" \"keyring_placement\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/ceph-admin-keyring.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-rgw-bin\n              mountPath: /tmp/ceph-admin-keyring.sh\n              subPath: ceph-admin-keyring.sh\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n      containers:\n        - name: create-s3-admin\n          image: {{ .Values.images.tags.rgw_s3_admin }}\n          imagePullPolicy: {{ .Values.images.pull_policy }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.rgw_s3_admin | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"rgw_s3_admin\" \"container\" \"create_s3_admin\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: S3_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $s3AdminSecret }}\n                  key: S3_ADMIN_USERNAME\n            - name: S3_ACCESS_KEY\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $s3AdminSecret }}\n                  key: S3_ADMIN_ACCESS_KEY\n            - name: S3_SECRET_KEY\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $s3AdminSecret }}\n                  key: S3_ADMIN_SECRET_KEY\n          command:\n            - /tmp/rgw-s3-admin.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-ceph\n              mountPath: /etc/ceph\n            - name: ceph-rgw-bin\n              mountPath: /tmp/rgw-s3-admin.sh\n              subPath: rgw-s3-admin.sh\n              readOnly: true\n            - name: ceph-rgw-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-ceph\n          emptyDir: {}\n        - name: ceph-rgw-bin\n          configMap:\n            name: ceph-rgw-bin\n            defaultMode: 0555\n        - name: ceph-rgw-etc\n          configMap:\n            name: ceph-rgw-etc\n            defaultMode: 0444\n        - name: ceph-keyring\n          secret:\n            secretName: {{ .Values.secrets.keyrings.admin | quote }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/network_policy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"key\" \"rgw\" \"labels\" (dict \"application\" \"ceph\" \"component\" \"rgw\") -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "ceph-rgw/templates/pod-helm-tests.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- if and .Values.manifests.helm_tests .Values.deployment.ceph }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := printf \"%s-%s\" $envAll.Release.Name \"test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: {{ $serviceAccountName }}\n  labels:\n{{ tuple $envAll \"ceph\" \"rgw-test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n{{ dict \"envAll\" $envAll \"podName\" \"ceph-rgw-test\" \"containerNames\" (list \"ceph-rgw-ks-validation\" \"ceph-rgw-s3-validation\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n{{ dict \"envAll\" $envAll \"application\" \"rgw_test\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 2 }}\n  restartPolicy: Never\n  serviceAccountName: {{ $serviceAccountName }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n  containers:\n{{ if .Values.conf.rgw_ks.enabled }}\n    - name: ceph-rgw-ks-validation\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"rgw_test\" \"container\" \"ceph_rgw_ks_validation\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.user_rgw \"useCA\" .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n        - name: OS_AUTH_TYPE\n          valueFrom:\n            secretKeyRef:\n              name: {{ $.Values.secrets.identity.user_rgw }}\n              key: OS_AUTH_TYPE\n        - name: OS_TENANT_NAME\n          valueFrom:\n            secretKeyRef:\n              name: {{ $.Values.secrets.identity.user_rgw }}\n              key: OS_TENANT_NAME\n{{- end }}\n        - name: \"RGW_TEST_TYPE\"\n          value: \"RGW_KS\"\n      command:\n        - /tmp/helm-tests.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: pod-etc-ceph\n          mountPath: /etc/ceph\n        - name: ceph-rgw-bin\n          mountPath: /tmp/helm-tests.sh\n          subPath: helm-tests.sh\n          readOnly: true\n        - name: ceph-keyring\n          mountPath: /tmp/client-keyring\n          subPath: key\n          readOnly: true\n        - name: ceph-rgw-etc\n          mountPath: /etc/ceph/ceph.conf\n          subPath: ceph.conf\n          readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.object_store.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 8 }}\n{{- end }}\n{{ if .Values.conf.rgw_s3.enabled }}\n    - name: ceph-rgw-s3-validation\n{{ tuple $envAll \"ceph_rgw\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"rgw_test\" \"container\" \"ceph_rgw_s3_validation\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      env:\n{{- with $env := dict \"s3AdminSecret\" $envAll.Values.secrets.rgw_s3.admin }}\n{{- include \"helm-toolkit.snippets.rgw_s3_admin_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: RGW_HOST\n          value: {{ tuple \"ceph_object_store\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n        - name: \"RGW_TEST_TYPE\"\n          value: \"RGW_S3\"\n      command:\n        - /tmp/helm-tests.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: pod-etc-ceph\n          mountPath: /etc/ceph\n        - name: ceph-rgw-bin\n          mountPath: /tmp/helm-tests.sh\n          subPath: helm-tests.sh\n          readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.ceph_object_store.api.internal \"path\" \"/etc/tls\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 8 }}\n{{- end }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: pod-etc-ceph\n      emptyDir: {}\n    - name: ceph-rgw-bin\n      configMap:\n        name: ceph-rgw-bin\n        defaultMode: 0555\n    - name: ceph-keyring\n      secret:\n        secretName: {{ .Values.secrets.keyrings.admin | quote }}\n    - name: ceph-rgw-etc\n      configMap:\n        name: ceph-rgw-etc\n        defaultMode: 0444\n{{- if .Values.conf.rgw_ks.enabled }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.object_store.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 4 }}\n{{- end }}\n{{- if .Values.conf.rgw_s3.enabled }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.ceph_object_store.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 4 }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_ingress_tls ( and .Values.deployment.ceph .Values.conf.features.rgw ) }}\n{{- include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"object_store\" ) }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/secret-keystone-rgw.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_keystone_rgw .Values.deployment.ceph }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"swift\" }}\n{{- $secretName := index $envAll.Values.secrets.identity \"user_rgw\" }}\n{{- $auth := index $envAll.Values.endpoints.identity.auth $userClass }}\n{{ $osAuthType := $auth.os_auth_type }}\n{{ $osTenantName := $auth.os_tenant_name }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 }}\n  OS_AUTH_TYPE: {{ $osAuthType  | b64enc }}\n  OS_TENANT_NAME: {{ $osTenantName | b64enc }}\n{{ end }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_keystone .Values.conf.rgw_ks.enabled }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"swift\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/secret-s3-rgw.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_s3_rgw }}\n{{- $envAll := . }}\n{{- $secretName := index $envAll.Values.secrets.rgw_s3.admin }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n  S3_ADMIN_USERNAME: {{ .Values.endpoints.ceph_object_store.auth.admin.username | b64enc }}\n  S3_ADMIN_ACCESS_KEY: {{ .Values.endpoints.ceph_object_store.auth.admin.access_key | b64enc }}\n  S3_ADMIN_SECRET_KEY: {{ .Values.endpoints.ceph_object_store.auth.admin.secret_key | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/service-ingress-rgw.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{ $object_store_name := \"object_store\" }}\n{{- if .Values.conf.rgw_s3.enabled }}\n{{ $object_store_name = \"ceph_object_store\" }}\n{{- end }}\n\n{{- if and .Values.manifests.service_ingress_rgw ( and .Values.deployment.ceph (and .Values.network.api.ingress.public .Values.conf.features.rgw ) ) }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" $object_store_name -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/templates/service-rgw.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_rgw ( and .Values.deployment.ceph .Values.conf.features.rgw ) }}\n{{- $envAll := . }}\n{{ $object_store_name := \"object_store\" }}\n{{- if .Values.conf.rgw_s3.enabled }}\n{{ $object_store_name = \"ceph_object_store\" }}\n{{- end }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: ceph-rgw\nspec:\n  ports:\n  - name: ceph-rgw\n    port: {{ tuple $object_store_name \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    protocol: TCP\n    targetPort: {{ tuple $object_store_name \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  {{ if .Values.network.api.node_port.enabled }}\n    nodePort: {{ .Values.network.api.node_port.port }}\n  {{ end }}\n  selector:\n{{ tuple $envAll \"ceph\" \"rgw\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.api.node_port.enabled }}\n  type: NodePort\n  {{ if .Values.network.api.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{ end }}\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "ceph-rgw/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for ceph-client.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\ndeployment:\n  ceph: false\n\nrelease_group: null\n\nimages:\n  pull_policy: IfNotPresent\n  tags:\n    ceph_bootstrap: 'quay.io/airshipit/ceph-daemon:ubuntu_jammy_20.2.1-1-20260407'\n    ceph_config_helper: 'quay.io/airshipit/ceph-config-helper:ubuntu_jammy_20.2.1-1-20260407'\n    ceph_rgw: 'quay.io/airshipit/ceph-daemon:ubuntu_jammy_20.2.1-1-20260407'\n    ceph_rgw_pool: 'quay.io/airshipit/ceph-config-helper:ubuntu_jammy_20.2.1-1-20260407'\n    dep_check: 'quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy'\n    image_repo_sync: 'quay.io/airshipit/docker:27.5.0'\n    rgw_s3_admin: 'quay.io/airshipit/ceph-config-helper:ubuntu_jammy_20.2.1-1-20260407'\n    rgw_placement_targets: 'quay.io/airshipit/ceph-config-helper:ubuntu_jammy_20.2.1-1-20260407'\n    ks_endpoints: 'quay.io/airshipit/openstack-client:2025.1-ubuntu_noble'\n    ks_service: 'quay.io/airshipit/openstack-client:2025.1-ubuntu_noble'\n    ks_user: 'quay.io/airshipit/openstack-client:2025.1-ubuntu_noble'\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  rgw:\n    node_selector_key: ceph-rgw\n    node_selector_value: enabled\n\npod:\n  security_context:\n    rgw:\n      pod:\n        runAsUser: 64045\n      container:\n        init_dirs:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        rgw_init:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        rgw:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    rgw_storage_init:\n      pod:\n        runAsUser: 64045\n      container:\n        keyring_placement:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        rgw_storage_init:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    rgw_restart:\n      pod:\n        runAsUser: 65534\n      container:\n        ceph-rgw-restart:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    rgw_s3_admin:\n      pod:\n        runAsUser: 64045\n      container:\n        keyring_placement:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        create_s3_admin:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    rgw_placement_targets:\n      pod:\n        runAsUser: 64045\n      container:\n        keyring_placement:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        create_rgw_placement_targets:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    rgw_test:\n      pod:\n        runAsUser: 64045\n      rgw_test:\n        ceph_rgw_ks_validation:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        ceph_rgw_s3_validation:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    bootstrap:\n      pod:\n        runAsUser: 65534\n      container:\n        keyring_placement:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        bootstrap:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    rgw_pool:\n      pod:\n        runAsUser: 65534\n      container:\n        rgw_pool:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n  dns_policy: \"ClusterFirstWithHostNet\"\n  replicas:\n    rgw: 2\n  lifecycle:\n    upgrades:\n      deployments:\n        pod_replacement_strategy: RollingUpdate\n        revision_history: 3\n        rolling_update:\n          max_surge: 50%\n          max_unavailable: 50%\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  resources:\n    enabled: false\n    rgw:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"250m\"\n      limits:\n        memory: \"512Mi\"\n        cpu: \"1000m\"\n    jobs:\n      bootstrap:\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n        requests:\n          memory: \"128Mi\"\n          cpu: \"500m\"\n      ceph-rgw-storage-init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks-endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rgw_s3_admin:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rgw_placement_targets:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rgw_restart:\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n        requests:\n          memory: \"128Mi\"\n          cpu: \"500m\"\n      rgw_pool:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n    tests:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n  tolerations:\n    rgw:\n      tolerations:\n      - effect: NoExecute\n        key: node.kubernetes.io/not-ready\n        operator: Exists\n        tolerationSeconds: 60\n      - effect: NoExecute\n        key: node.kubernetes.io/unreachable\n        operator: Exists\n        tolerationSeconds: 60\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n        effect: NoSchedule\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n        effect: NoSchedule\n  probes:\n    api:\n      ceph-rgw:\n        readiness:\n          enabled: true\n          params:\n            timeoutSeconds: 5\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 120\n            timeoutSeconds: 5\n\nnetwork_policy:\n  rgw:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nceph_client:\n  configmap: ceph-etc\n\nsecrets:\n  keyrings:\n    mon: ceph-mon-keyring\n    mds: ceph-bootstrap-mds-keyring\n    osd: ceph-bootstrap-osd-keyring\n    rgw: os-ceph-bootstrap-rgw-keyring\n    mgr: ceph-bootstrap-mgr-keyring\n    admin: pvc-ceph-client-key\n  identity:\n    admin: ceph-keystone-admin\n    swift: ceph-keystone-user\n    user_rgw: ceph-keystone-user-rgw\n  oci_image_registry:\n    ceph-rgw: ceph-rgw-oci-image-registry-key\n  rgw_s3:\n    admin: radosgw-s3-admin-creds\n  tls:\n    object_store:\n      api:\n        public: ceph-tls-public\n        internal: ceph-rgw-ks-tls-api\n        keystone: keystone-tls-api\n    ceph_object_store:\n      api:\n        public: ceph-rgw-s3-tls-public\n        internal: ceph-rgw-s3-tls-api\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        nginx.ingress.kubernetes.io/proxy-body-size: \"0\"\n        nginx.ingress.kubernetes.io/proxy-max-temp-file-size: \"0\"\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30004\n  public: 192.168.0.0/16\n  cluster: 192.168.0.0/16\n\nconf:\n  templates:\n    keyring:\n      admin: |\n        [client.admin]\n          key = {{ key }}\n          auid = 0\n          caps mds = \"allow\"\n          caps mon = \"allow *\"\n          caps osd = \"allow *\"\n          caps mgr = \"allow *\"\n      bootstrap:\n        rgw: |\n          [client.bootstrap-rgw]\n            key = {{ key }}\n            caps mgr = \"allow profile bootstrap-rgw\"\n  features:\n    rgw: true\n  pool:\n    # NOTE(portdirect): this drives a simple approximation of\n    # https://ceph.com/pgcalc/, the `target.osd` key should be set to match the\n    # expected number of osds in a cluster, and the `target.pg_per_osd` should be\n    # set to match the desired number of placement groups on each OSD.\n    crush:\n      # NOTE(portdirect): to use RBD devices with Ubuntu 16.04's 4.4.x series\n      # kernel this should be set to `hammer`\n      tunables: null\n    target:\n      # NOTE(portdirect): arbitrarily we set the default number of expected OSD's to 5\n      # to match the number of nodes in the OSH gate.\n      osd: 5\n      pg_per_osd: 100\n    default:\n      # NOTE(portdirect): this should be 'same_host' for a single node\n      # cluster to be in a healthy state\n      crush_rule: replicated_rule\n    # NOTE(portdirect): this section describes the pools that will be managed by\n    # the ceph pool management job, as it tunes the pgs and crush rule, based on\n    # the above.\n    spec:\n      # RBD pool\n      - name: rbd\n        application: rbd\n        replication: 3\n        percent_total_data: 40\n      # CephFS pools\n      - name: cephfs_metadata\n        application: cephfs\n        replication: 3\n        percent_total_data: 5\n      - name: cephfs_data\n        application: cephfs\n        replication: 3\n        percent_total_data: 10\n      # RadosGW pools\n      - name: .rgw.root\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.control\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.data.root\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.gc\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.log\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.intent-log\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.meta\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.usage\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.users.keys\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.users.email\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.users.swift\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.users.uid\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.buckets.extra\n        application: rgw\n        replication: 3\n        percent_total_data: 0.1\n      - name: default.rgw.buckets.index\n        application: rgw\n        replication: 3\n        percent_total_data: 3\n      - name: default.rgw.buckets.data\n        application: rgw\n        replication: 3\n        percent_total_data: 34.8\n  rgw_placement_targets:\n    - name: default-placement\n      data_pool: default.rgw.buckets.data\n      # Set 'delete' to true to delete an existing placement target. A\n      # non-existent placement target will be created and deleted in a single\n      # step.\n      # delete: true\n  rgw:\n    config:\n      # NOTE (portdirect): See http://tracker.ceph.com/issues/21226\n      rgw_keystone_token_cache_size: 0\n      # NOTE (JCL): See http://tracker.ceph.com/issues/7073\n      rgw_gc_max_objs: 997\n      # NOTE (JCL): See http://tracker.ceph.com/issues/24937\n      # NOTE (JCL): See https://tracker.ceph.com/issues/24551\n      rgw_dynamic_resharding: false\n      rgw_override_bucket_index_max_shards: 8\n  rgw_restart:\n    timeout: 600\n  rgw_ks:\n    enabled: false\n    config:\n      rgw_keystone_api_version: 3\n      rgw_keystone_accepted_roles: \"admin, member\"\n      rgw_keystone_implicit_tenants: true\n      rgw_keystone_make_new_tenants: true\n      rgw_s3_auth_use_keystone: true\n      rgw_s3_auth_order: \"local, external, sts\"\n      rgw_swift_account_in_url: true\n      rgw_swift_url: null\n  rgw_s3:\n    enabled: false\n    admin_caps: \"users=*;buckets=*;zone=*\"\n    config:\n      # NOTE (supamatt): Unfortunately we do not conform to S3 compliant names with some of our charts\n      rgw_relaxed_s3_bucket_names: true\n  ceph:\n    global:\n      # auth\n      cephx: true\n      cephx_require_signatures: false\n      cephx_cluster_require_signatures: true\n      cephx_service_require_signatures: false\n      objecter_inflight_op_bytes: \"1073741824\"\n      debug_ms: \"0/0\"\n      log_file: /dev/stdout\n      mon_cluster_log_file: /dev/stdout\n      # CNTT certification required fields\n      rgw_max_attr_name_len: 64\n      rgw_max_attrs_num_in_req: 32\n      rgw_max_attr_size: 1024\n      rgw_swift_versioning_enabled: true\n    osd:\n      osd_mkfs_type: xfs\n      osd_mkfs_options_xfs: -f -i size=2048\n      osd_max_object_name_len: 256\n      ms_bind_port_min: 6800\n      ms_bind_port_max: 7100\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - ceph-rgw-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n    targeted:\n      keystone:\n        rgw:\n          services:\n            - endpoint: internal\n              service: identity\n      s3:\n        rgw: {}\n  static:\n    rgw:\n      jobs:\n        - ceph-rgw-storage-init\n    rgw_restart:\n      services:\n        - endpoint: internal\n          service: ceph_object_store\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    ks_endpoints:\n      jobs:\n        - ceph-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    rgw_s3_admin:\n      services:\n        - endpoint: internal\n          service: ceph_object_store\n    rgw_placement_targets:\n      services:\n        - endpoint: internal\n          service: ceph_object_store\n    rgw_pool:\n      jobs:\n        - ceph-rgw-storage-init\n    tests:\n      services:\n        - endpoint: internal\n          service: ceph_object_store\n\nbootstrap:\n  enabled: false\n  script: |\n    ceph -s\n    function ensure_pool () {\n      ceph osd pool stats $1 || ceph osd pool create $1 $2\n      if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then\n        ceph osd pool application enable $1 $3\n      fi\n    }\n    #ensure_pool volumes 8 cinder\n\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      ceph-rgw:\n        username: ceph-rgw\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  identity:\n    name: keystone\n    namespace: null\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n        os_auth_type: password\n        os_tenant_name: admin\n      swift:\n        role: admin\n        region_name: RegionOne\n        username: swift\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n        os_auth_type: password\n        os_tenant_name: admin\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  object_store:\n    name: swift\n    namespace: null\n    hosts:\n      default: ceph-rgw\n      public: radosgw\n    host_fqdn_override:\n      default: null\n      # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public\n      # endpoints using the following format:\n      # public:\n      #   host: null\n      #   tls:\n      #     crt: null\n      #     key: null\n    path:\n      default: /swift/v1/KEY_$(tenant_id)s\n    scheme:\n      default: http\n    port:\n      api:\n        default: 8088\n        public: 80\n  ceph_object_store:\n    name: radosgw\n    namespace: null\n    auth:\n      admin:\n        # NOTE(srwilkers): These defaults should be used for testing only, and\n        # should be changed before deploying to production\n        username: s3_admin\n        access_key: \"admin_access_key\"\n        secret_key: \"admin_secret_key\"\n    hosts:\n      default: ceph-rgw\n      public: radosgw\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      api:\n        default: 8088\n        public: 80\n  ceph_mon:\n    namespace: null\n    hosts:\n      default: ceph-mon\n      discovery: ceph-mon-discovery\n    host_fqdn_override:\n      default: null\n    port:\n      mon:\n        default: 6789\n      mon_msgr2:\n        default: 3300\n\n  kube_dns:\n    namespace: kube-system\n    name: kubernetes-dns\n    hosts:\n      default: kube-dns\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: http\n    port:\n      dns_tcp:\n        default: 53\n      dns:\n        default: 53\n        protocol: UDP\n\njobs:\n  rgw_pool:\n    restartPolicy: OnFailure\n\nmanifests:\n  certificates: false\n  configmap_ceph_templates: true\n  configmap_bin: true\n  configmap_bin_ks: true\n  configmap_test_bin: true\n  configmap_etc: true\n  deployment_rgw: true\n  ingress_rgw: true\n  job_bootstrap: false\n  job_rgw_restart: false\n  job_ceph_rgw_storage_init: true\n  job_image_repo_sync: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  job_s3_admin: true\n  job_rgw_placement_targets: false\n  job_rgw_pool: true\n  secret_s3_rgw: true\n  secret_keystone_rgw: true\n  secret_ingress_tls: true\n  secret_keystone: true\n  secret_registry: true\n  service_ingress_rgw: true\n  service_rgw: true\n  helm_tests: true\n  network_policy: false\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "cert-rotation/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: \"1.0\"\ndescription: Rotate the certificates generated by cert-manager\nhome: https://cert-manager.io/\nname: cert-rotation\nversion: 2025.2.0\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "cert-rotation/templates/bin/_rotate-certs.sh.tpl",
    "content": "#!/bin/bash\n\nset -x\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n\nCOMMAND=\"${@:-rotate_job}\"\n\nnamespace={{ .Release.Namespace }}\nminDaysToExpiry={{ .Values.jobs.rotate.max_days_to_expiry }}\n\nrotateBefore=$(($(date +%s) + (86400*$minDaysToExpiry)))\n\nfunction rotate_and_get_certs_list(){\n    # Rotate the certificates if the expiry date of certificates is within the\n    # max_days_to_expiry days\n\n    # List of secret and certificates rotated\n    local -n secRotated=$1\n    deleteAllSecrets=$2\n    certRotated=()\n\n    for certificate in $(kubectl get certificates -n ${namespace} --no-headers | awk '{ print $1 }')\n    do\n        certInfo=($(kubectl get certificate -n ${namespace} ${certificate} -o json | jq -r '.spec[\"secretName\"],.status[\"notAfter\"]'))\n        secretName=${certInfo[0]}\n        notAfter=$(date -d\"${certInfo[1]}\" '+%s')\n        deleteSecret=false\n        if ${deleteAllSecrets} || [ ${rotateBefore} -gt ${notAfter} ]\n        then\n            # Rotate the certificates/secrets and add to list.\n            echo \"Deleting secret: ${secretName}\"\n            kubectl delete secret -n ${namespace} $secretName\n            secRotated+=(${secretName})\n            certRotated+=(${certificate})\n        fi\n    done\n\n    # Ensure certificates are re-issued\n    if [ ! -z ${certRotated} ]\n    then\n        for cert in ${certRotated[@]}\n        do\n            counter=0\n            retried=false\n            while [ \"$(kubectl get certificate -n ${namespace} ${cert} -o json | jq -r '.status.conditions[].status')\" != \"True\" ]\n            do\n                # Wait for secret to become ready. Wait for 300 seconds maximum. Sleep for 10 seconds\n                if [ ${counter} -ge 30 ]\n                then\n                    # Seems certificate is not in ready state yet, may be there is an issue be renewing the certificate.\n                    # Try one more time before failing it. The name of the secret would be different at this time (when in\n                    # process of issuing)\n                    priSeckeyName=$(kubectl get certificate -n ${namespace} ${cert} -o json | jq -r '.status[\"nextPrivateKeySecretName\"]')\n\n                    if [ ${retried} = false ] && [ ! -z ${priSeckeyName} ]\n                    then\n                        echo \"Deleting interim failed secret ${priSeckeyName} in namespace ${namespace}\"\n                        kubectl delete secret -n ${namespace} ${priSeckeyName}\n                        retried=true\n                        counter=0\n                    else\n                        # Tried 2 times to renew the certificate, something is not right. Log error and\n                        # continue to check the status of next certificate. Once the status of all the\n                        # certificates has been checked, the pods need to be restarted so that the successfully\n                        # renewed certificates can be deployed.\n                        echo \"ERROR: Rotated certificate  ${cert} in ${namespace} is not ready.\"\n                        break\n                    fi\n                fi\n                echo \"Rotated certificate ${cert} in ${namespace} is not ready yet ... waiting\"\n                counter=$((counter+1))\n                sleep 10\n            done\n\n        done\n    fi\n}\n\nfunction get_cert_list_rotated_by_cert_manager_rotate(){\n\n    local -n secRotated=$1\n\n    # Get the time when the last cron job was run successfully\n    lastCronTime=$(kubectl get jobs -n ${namespace} --no-headers -l application=cert-manager,component=cert-rotate -o json | jq -r '.items[] | select(.status.succeeded != null) | .status.completionTime' | sort -r | head -n 1)\n\n    if [ ! -z ${lastCronTime} ]\n    then\n        lastCronTimeSec=$(date -d\"${lastCronTime}\" '+%s')\n\n        for certificate in $(kubectl get certificates -n ${namespace} --no-headers | awk '{ print $1 }')\n        do\n            certInfo=($(kubectl get certificate -n ${namespace} ${certificate} -o json | jq -r '.spec[\"secretName\"],.status[\"notBefore\"]'))\n            secretName=${certInfo[0]}\n            notBefore=$(date -d\"${certInfo[1]}\" '+%s')\n\n            # if the certificate was created after last cronjob run means it was\n            # rotated by the cert-manager, add to the list.\n            if [[ ${notBefore} -gt ${lastCronTimeSec} ]]\n            then\n                secRotated+=(${secretName})\n            fi\n        done\n    fi\n}\n\nfunction restart_the_pods(){\n\n    local -n secRotated=$1\n\n    if [ -z ${secRotated} ]\n    then\n        echo \"All certificates are still valid in ${namespace} namespace. No pod needs restart\"\n        exit 0\n    fi\n\n    # Restart the pods using kubernetes rollout restart. This will restarts the applications\n    # with zero downtime.\n    for kind in statefulset deployment daemonset\n    do\n        # Need to find which kinds mounts the secret that has been rotated. To do this\n        # for a kind (statefulset, deployment, or daemonset)\n        # - get the name of the kind (which will index 1 = idx=0 of the output)\n        # - get the names of the secrets mounted on this kind (which will be index 2 = idx+1)\n        # - find if tls.crt was mounted to the container: get the subpaths of volumeMount in\n        #   the container and grep for tls.crt. (This will be index 3 = idx+2)\n        # - or, find if tls.crt was mounted to the initContainer (This will be index 4 = idx+3)\n\n        resource=($(kubectl get ${kind} -n ${namespace} -o custom-columns='NAME:.metadata.name,SECRETS:.spec.template.spec.volumes[*].secret.secretName,TLS-CONTAINER:.spec.template.spec.containers[*].volumeMounts[*].subPath,TLS-INIT:.spec.template.spec.initContainers[*].volumeMounts[*].subPath' --no-headers | grep tls.crt || true))\n\n        idx=0\n        while [[ $idx -lt ${#resource[@]} ]]\n        do\n            # Name of the kind\n            resourceName=${resource[$idx]}\n\n            # List of secrets mounted to this kind\n            resourceSecrets=${resource[$idx+1]}\n\n            # For each secret mounted to this kind, check if it was rotated (present in\n            # the list secRotated) and if it was, then trigger rolling restart for this kind.\n            for secret in ${resourceSecrets//,/ }\n            do\n                if [[ \"${secRotated[@]}\" =~ \"${secret}\" ]]\n                then\n                    echo \"Restarting ${kind} ${resourceName} in ${namespace} namespace.\"\n                    kubectl rollout restart -n ${namespace} ${kind} ${resourceName}\n                    break\n                fi\n            done\n\n            # Since we have 4 custom columns in the output, every 5th index will be start of new tuple.\n            # Jump to the next tuple.\n            idx=$((idx+4))\n        done\n    done\n}\n\nfunction rotate_cron(){\n    # Rotate cronjob invoked this script.\n    # 1. If the expiry date of certificates is within the max_days_to_expiry days\n    #    the rotate the certificates and restart the pods\n    # 2. Else if the certificates were rotated by cert-manager, then restart\n    #    the pods.\n\n    secretsRotated=()\n    deleteAllSecrets=false\n\n    rotate_and_get_certs_list secretsRotated $deleteAllSecrets\n\n    if [ ! -z ${secretsRotated} ]\n    then\n        # Certs rotated, restart pods\n        restart_the_pods secretsRotated\n    else\n        # Check if the certificates were rotated by the cert-manager and get the list of\n        # rotated certificates so that the corresponding pods can be restarted\n        get_cert_list_rotated_by_cert_manager_rotate secretsRotated\n        if [ ! -z ${secretsRotated} ]\n        then\n            restart_the_pods secretsRotated\n        else\n            echo \"All certificates are still valid in ${namespace} namespace\"\n        fi\n    fi\n}\n\nfunction rotate_job(){\n    # Rotate job invoked this script.\n    # 1. Rotate all certificates by deleting the secrets and restart the pods\n\n    secretsRotated=()\n    deleteAllSecrets=true\n\n    rotate_and_get_certs_list secretsRotated $deleteAllSecrets\n\n    if [ ! -z ${secretsRotated} ]\n    then\n        # Certs rotated, restart pods\n        restart_the_pods secretsRotated\n    else\n        echo \"All certificates are still valid in ${namespace} namespace\"\n    fi\n}\n\n$COMMAND\nexit 0\n"
  },
  {
    "path": "cert-rotation/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: cert-rotate-bin\ndata:\n  rotate-certs.sh: |\n{{ tuple \"bin/_rotate-certs.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{ end }}\n"
  },
  {
    "path": "cert-rotation/templates/cron-job-cert-rotate.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.cron_job_cert_rotate}}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"cert-rotate-cron\" }}\n{{ tuple $envAll \"cert_rotate\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n    - cert-manager.io\n    resources:\n      - certificates\n    verbs:\n      - get\n      - list\n      - update\n      - patch\n  - apiGroups:\n    - \"*\"\n    resources:\n      - pods\n      - secrets\n      - jobs\n      - statefulsets\n      - daemonsets\n      - deployments\n    verbs:\n      - get\n      - list\n      - update\n      - patch\n      - delete\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: cert-rotate\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"cert-manager\" \"cert-rotate-cron\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  suspend: {{ .Values.jobs.rotate.suspend }}\n  schedule: {{ .Values.jobs.rotate.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.rotate.history.success }}\n  failedJobsHistoryLimit: {{ .Values.jobs.rotate.history.failed }}\n{{- if .Values.jobs.rotate.starting_deadline }}\n  startingDeadlineSeconds: {{ .Values.jobs.rotate.starting_deadline }}\n{{- end }}\n  concurrencyPolicy: Forbid\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"cert-manager\" \"cert-rotate\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"cert-manager\" \"cert-rotate\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n        spec:\n          serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"cert_rotate\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 10 }}\n          restartPolicy: OnFailure\n          nodeSelector:\n            {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n          initContainers:\n{{ tuple $envAll \"cert_rotate\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 12 }}\n          containers:\n            - name: cert-rotate\n{{ tuple $envAll \"cert_rotation\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.cert_rotate | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{ dict \"envAll\" $envAll \"application\" \"cert_rotate\" \"container\" \"cert_rotate\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 14 }}\n              command:\n                - /tmp/rotate-certs.sh\n                - rotate_cron\n              volumeMounts:\n                - name: pod-tmp\n                  mountPath: /tmp\n                - name: cert-rotate-bin\n                  mountPath: /tmp/rotate-certs.sh\n                  subPath: rotate-certs.sh\n                  readOnly: true\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: cert-rotate-bin\n              configMap:\n                name: cert-rotate-bin\n                defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "cert-rotation/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "cert-rotation/templates/job-cert-rotate.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_cert_rotate}}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"cert-rotate-job\" }}\n{{ tuple $envAll \"cert_rotate\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n    - cert-manager.io\n    resources:\n      - certificates\n    verbs:\n      - get\n      - list\n      - update\n      - patch\n  - apiGroups:\n    - \"*\"\n    resources:\n      - pods\n      - secrets\n      - jobs\n      - statefulsets\n      - daemonsets\n      - deployments\n    verbs:\n      - get\n      - list\n      - update\n      - patch\n      - delete\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: cert-rotate-job\n  labels:\n{{ tuple $envAll \"cert-manager\" \"cert-rotate-job\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"cert-manager\" \"cert-rotate\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"cert_rotate\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"cert_rotate\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 12 }}\n      containers:\n        - name: cert-rotate\n{{ tuple $envAll \"cert_rotation\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.cert_rotate | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cert_rotate\" \"container\" \"cert_rotate\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/rotate-certs.sh\n            - rotate_job\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: cert-rotate-bin\n              mountPath: /tmp/rotate-certs.sh\n              subPath: rotate-certs.sh\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: cert-rotate-bin\n          configMap:\n            name: cert-rotate-bin\n            defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "cert-rotation/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "cert-rotation/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n---\n\nimages:\n  tags:\n    cert_rotation: 'quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy'\n    dep_check: 'quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy'\n  local_registry:\n    active: false\nlabels:\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\njobs:\n  rotate:\n    # Run at 1:00AM on 1st of each month\n    cron: \"0 1 1 * *\"\n    starting_deadline: 600\n    history:\n      success: 3\n      failed: 1\n    # Number of day before expiry should certs be rotated.\n    max_days_to_expiry: 45\n    suspend: false\npod:\n  security_context:\n    cert_rotate:\n      pod:\n        runAsUser: 42424\n      container:\n        cert_rotate:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n  resources:\n    enabled: false\n    jobs:\n      cert_rotate:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\ndependencies:\n  static:\n    cert_rotate: null\nsecrets:\n  oci_image_registry:\n    cert-rotation: cert-rotation-oci-image-registry-key\nendpoints:\n  cluster_domain_suffix: cluster.local\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      cert-rotation:\n        username: cert-rotation\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\nmanifests:\n  configmap_bin: true\n  cron_job_cert_rotate: false\n  job_cert_rotate: false\n  secret_registry: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "cinder/.helmignore",
    "content": "values_overrides\n"
  },
  {
    "path": "cinder/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Cinder\nname: cinder\nversion: 2025.2.0\nhome: https://docs.openstack.org/cinder/latest/\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Cinder/OpenStack_Project_Cinder_vertical.png\nsources:\n  - https://opendev.org/openstack/cinder\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "cinder/templates/bin/_backup-storage-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\nif [[ $STORAGE_BACKEND =~ 'cinder.backup.drivers.ceph' ]]; then\n  SECRET=$(mktemp --suffix .yaml)\n  KEYRING=$(mktemp --suffix .keyring)\n  function cleanup {\n      rm -f ${SECRET} ${KEYRING}\n  }\n  trap cleanup EXIT\nfi\n\nset -ex\nif [[ $STORAGE_BACKEND =~ 'cinder.backup.drivers.swift' ]] || \\\n     [[ $STORAGE_BACKEND =~ 'cinder.backup.drivers.posix' ]]; then\n  echo \"INFO: no action required to use $STORAGE_BACKEND\"\nelif [[ $STORAGE_BACKEND =~ 'cinder.backup.drivers.ceph' ]]; then\n  ceph -s\n  function ensure_pool () {\n    ceph osd pool stats $1 || ceph osd pool create $1 $2\n    if [[ $(ceph mgr versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then\n        ceph osd pool application enable $1 $3\n    fi\n    size_protection=$(ceph osd pool get $1 nosizechange | cut -f2 -d: | tr -d '[:space:]')\n    ceph osd pool set $1 nosizechange 0\n    ceph osd pool set $1 size ${RBD_POOL_REPLICATION} --yes-i-really-mean-it\n    ceph osd pool set $1 nosizechange ${size_protection}\n    ceph osd pool set $1 crush_rule \"${RBD_POOL_CRUSH_RULE}\"\n  }\n  ensure_pool ${RBD_POOL_NAME} ${RBD_POOL_CHUNK_SIZE} ${RBD_POOL_APP_NAME}\n\n  if USERINFO=$(ceph auth get client.${RBD_POOL_USER}); then\n    echo \"Cephx user client.${RBD_POOL_USER} already exists\"\n    echo \"Update its cephx caps\"\n    ceph auth caps client.${RBD_POOL_USER} \\\n      mon \"profile rbd\" \\\n      osd \"profile rbd pool=${RBD_POOL_NAME}\"\n    ceph auth get client.${RBD_POOL_USER} -o ${KEYRING}\n  else\n    ceph auth get-or-create client.${RBD_POOL_USER} \\\n      mon \"profile rbd\" \\\n      osd \"profile rbd pool=${RBD_POOL_NAME}\" \\\n      -o ${KEYRING}\n  fi\n\n  ENCODED_KEYRING=$(sed -n 's/^[[:blank:]]*key[[:blank:]]\\+=[[:blank:]]\\(.*\\)/\\1/p' ${KEYRING} | base64 -w0)\n  cat > ${SECRET} <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: \"${RBD_POOL_SECRET}\"\ntype: kubernetes.io/rbd\ndata:\n  key: $( echo ${ENCODED_KEYRING} )\nEOF\n  kubectl apply --namespace ${NAMESPACE} -f ${SECRET}\n\nfi\n"
  },
  {
    "path": "cinder/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport HOME=/tmp\n\n{{- if .Values.bootstrap.enabled | default \"echo 'Not Enabled'\" }}\n\n  {{- /* Create volume types defined in Values.bootstrap */}}\n  {{- /* Types can only be created for backends defined in Values.conf */}}\n  {{- $volumeTypes := .Values.bootstrap.volume_types }}\n  {{- /* Generating list of backends listed in .Values.conf.backends */}}\n  {{- $backendsList := list}}\n  {{- range $backend_name, $backend_properties := .Values.conf.backends }}\n    {{- if and $backend_properties $backend_properties.volume_backend_name }}\n    {{- $backendsList = append $backendsList $backend_properties.volume_backend_name }}\n    {{- end }}\n  {{- end }}\n\n  {{- range $name, $properties := $volumeTypes }}\n    {{- if and $properties.volume_backend_name (has $properties.volume_backend_name $backendsList) }}\n      {{- $access_type := $properties.access_type | default \"public\"}}\n      # Create a volume type if it doesn't exist.\n      # Assumption: the volume type name is unique.\n      openstack volume type show {{ $name }} || \\\n      openstack volume type create \\\n        --{{ $access_type }} \\\n      {{- range $key, $value := $properties }}\n      {{- if or (eq $key \"encryption-provider\") (eq $key \"encryption-cipher\") (eq $key \"encryption-key-size\") (eq $key \"encryption-control-location\") }}\n        --{{ $key }} {{ $value}} \\\n      {{- end }}\n      {{- end }}\n      {{ $name }}\n      {{/*\n        We will try to set or update volume type properties.\n        To update properties, the volume type MUST NOT BE IN USE,\n        and projects and domains with access to the volume type\n        MUST EXIST, as well.\n      */}}\n      is_in_use=$(openstack volume list --long --all-projects -c Type -f value | grep -E \"^{{ $name }}\\s*$\" || true)\n      if [[ -z ${is_in_use} ]]; then\n        {{- if (eq $access_type \"private\") }}\n        volumeTypeID=$(openstack volume type show {{ $name }} -f value -c id)\n        cinder type-update --is-public false ${volumeTypeID}\n        {{- end }}\n\n        {{- if and $properties.grant_access (eq $access_type \"private\") }}\n        {{- range $domain, $domainProjects := $properties.grant_access }}\n        {{- range $project := $domainProjects }}\n        project_id=$(openstack project show --domain {{ $domain }} -c id -f value {{ $project }})\n        if [[ -z  $(openstack volume type show {{ $name }} -c access_project_ids -f value | grep ${project_id} || true) ]]; then\n          openstack volume type set --project-domain {{ $domain }} --project {{ $project }} {{ $name }}\n        fi\n        {{- end }}\n        {{- end }}\n        {{- end }}\n\n        {{- range $key, $value := $properties }}\n        {{- if and (ne $key \"access_type\") (ne $key \"grant_access\") (ne $key \"encryption-provider\") (ne $key \"encryption-cipher\") (ne $key \"encryption-key-size\") (ne $key \"encryption-control-location\") $value }}\n        openstack volume type set --property {{ $key }}={{ $value }} {{ $name }}\n        {{- end }}\n        {{- end }}\n      fi\n    {{- end }}\n  {{- end }}\n\n  {{- /* Create volumes defined in Values.conf.backends */}}\n  {{- if .Values.bootstrap.bootstrap_conf_backends }}\n    {{- range $name, $properties := .Values.conf.backends }}\n      {{- if $properties }}\n        openstack volume type show {{ $name }} || \\\n        openstack volume type create \\\n        --public \\\n        --property volume_backend_name={{ $properties.volume_backend_name }} \\\n        {{ $name }}\n      {{- end }}\n    {{- end }}\n  {{- end }}\n\n  {{- /* Create and associate volume QoS if defined */}}\n  {{- if .Values.bootstrap.volume_qos}}\n    {{- range $qos_name, $qos_properties := .Values.bootstrap.volume_qos }}\n      type_defined=true\n      {{- /* If the volume type to associate with is not defined, skip the qos */}}\n      {{- range $qos_properties.associates }}\n        if ! openstack volume type show {{ . }}; then\n          type_defined=false\n        fi\n      {{- end }}\n      if [[ ${type_defined} ]]; then\n        openstack volume qos show {{ $qos_name }} || \\\n          openstack volume qos create \\\n          --consumer {{ $qos_properties.consumer }} \\\n          {{- range $key, $value := $qos_properties.properties }}\n            --property {{ $key }}={{ $value }} \\\n          {{- end }}\n          {{ $qos_name }}\n          {{- range $qos_properties.associates }}\n            openstack volume qos associate {{ $qos_name }} {{ . }}\n          {{- end }}\n      fi\n    {{- end }}\n  {{- end }}\n\n{{- /* Check volume type and properties were added */}}\nopenstack volume type list --long\nopenstack volume qos list\n{{- end }}\n\nexit 0\n"
  },
  {
    "path": "cinder/templates/bin/_ceph-admin-keyring.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport HOME=/tmp\n\ncat <<EOF > /etc/ceph/ceph.client.admin.keyring\n[client.admin]\n{{- if .Values.conf.ceph.admin_keyring }}\n    key = {{ .Values.conf.ceph.admin_keyring }}\n{{- else }}\n    key = $(cat /tmp/client-keyring)\n{{- end }}\nEOF\n\nexit 0\n"
  },
  {
    "path": "cinder/templates/bin/_ceph-keyring.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport HOME=/tmp\n\ncat <<EOF > /etc/ceph/ceph.client.${RBD_USER}.keyring\n[client.${RBD_USER}]\n    key = $(cat /tmp/client-keyring)\nEOF\n\n{{- if and .Values.ceph_client.enable_external_ceph_backend .Values.ceph_client.external_ceph.rbd_user }}\ncat <<EOF > /etc/ceph/ceph.client.${EXTERNAL_RBD_USER}.keyring\n[client.${EXTERNAL_RBD_USER}]\n    key = $(cat /tmp/external-ceph-client-keyring)\nEOF\n{{- end }}\n\nexit 0\n"
  },
  {
    "path": "cinder/templates/bin/_cinder-api.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n{{- if .Values.manifests.certificates }}\n  if [ -f /etc/apache2/envvars ]; then\n    # Loading Apache2 ENV variables\n    source /etc/apache2/envvars\n    mkdir -p ${APACHE_RUN_DIR}\n  fi\n\n{{- if .Values.conf.software.apache2.a2enmod }}\n  {{- range .Values.conf.software.apache2.a2enmod }}\n  a2enmod {{ . }}\n  {{- end }}\n{{- end }}\n\n{{- if .Values.conf.software.apache2.a2dismod }}\n  {{- range .Values.conf.software.apache2.a2dismod }}\n  a2dismod {{ . }}\n  {{- end }}\n{{- end }}\n\n  if [ -f /var/run/apache2/apache2.pid ]; then\n    # Remove the stale pid for debian/ubuntu images\n    rm -f /var/run/apache2/apache2.pid\n  fi\n  # Starts Apache2\n  exec {{ .Values.conf.software.apache2.binary }} {{ .Values.conf.software.apache2.start_parameters }}\n{{- else }}\n  exec uwsgi --ini /etc/cinder/cinder-api-uwsgi.ini\n{{- end }}\n}\n\nfunction stop () {\n{{- if .Values.manifests.certificates }}\n  if [ -f /etc/apache2/envvars ]; then\n    # Loading Apache2 ENV variables\n    source /etc/apache2/envvars\n    mkdir -p ${APACHE_RUN_DIR}\n  fi\n  {{ .Values.conf.software.apache2.binary }} -k graceful-stop\n{{- else }}\n  kill -TERM 1\n{{- end }}\n}\n\n$COMMAND\n"
  },
  {
    "path": "cinder/templates/bin/_cinder-backup.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexec cinder-backup \\\n     --config-file /etc/cinder/cinder.conf \\\n     --config-dir /etc/cinder/cinder.conf.d\n"
  },
  {
    "path": "cinder/templates/bin/_cinder-scheduler.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexec cinder-scheduler \\\n     --config-file /etc/cinder/cinder.conf \\\n     --config-dir /etc/cinder/cinder.conf.d\n"
  },
  {
    "path": "cinder/templates/bin/_cinder-volume.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexec cinder-volume \\\n     --config-file /etc/cinder/cinder.conf \\\n     --config-file /etc/cinder/conf/backends.conf \\\n     --config-file /tmp/pod-shared/internal_tenant.conf \\\n     --config-dir /etc/cinder/cinder.conf.d\n"
  },
  {
    "path": "cinder/templates/bin/_clean-secrets.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec kubectl delete secret \\\n  --namespace ${NAMESPACE} \\\n  --ignore-not-found=true \\\n  ${RBD_POOL_SECRET}\n"
  },
  {
    "path": "cinder/templates/bin/_create-internal-tenant-id.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n\nUSER_PROJECT_ID=$(openstack project create --or-show --enable -f value -c id \\\n    --domain=\"${PROJECT_DOMAIN_ID}\" \\\n    \"${INTERNAL_PROJECT_NAME}\");\n\nUSER_ID=$(openstack user create --or-show --enable -f value -c id \\\n    --domain=\"${USER_DOMAIN_ID}\" \\\n    --project-domain=\"${PROJECT_DOMAIN_ID}\" \\\n    --project=\"${USER_PROJECT_ID}\" \\\n    \"${INTERNAL_USER_NAME}\");\n\n"
  },
  {
    "path": "cinder/templates/bin/_db-purge.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec cinder-manage \\\n     --config-file /etc/cinder/cinder.conf \\\n     --config-dir /etc/cinder/cinder.conf.d \\\n     db purge {{ .Values.conf.db_purge.before }}\n"
  },
  {
    "path": "cinder/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec cinder-manage db sync\n"
  },
  {
    "path": "cinder/templates/bin/_external-ceph-rbd-admin-keyring.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{- if .Values.backup.external_ceph_rbd.admin_keyring }}\ncat <<EOF > /etc/ceph/ceph.client.admin.keyring\n[client.admin]\n    key = {{ .Values.backup.external_ceph_rbd.admin_keyring }}\nEOF\n{{- else }}\necho \"ERROR: You must define the ceph admin keyring in values.yaml to use external_ceph_rbd.\"\nexit 1\n{{- end }}\n\nexit 0\n"
  },
  {
    "path": "cinder/templates/bin/_iscsiadm.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2020 The Openstack-Helm Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nchroot /mnt/host-rootfs /usr/bin/env -i PATH=\"/sbin:/bin:/usr/bin\" \\\n       iscsiadm \"${@:1}\"\n"
  },
  {
    "path": "cinder/templates/bin/_multipath.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nchroot /mnt/host-rootfs /usr/bin/env -i PATH=\"/sbin:/bin:/usr/bin\" \\\n      multipath \"${@:1}\"\n"
  },
  {
    "path": "cinder/templates/bin/_multipathd.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nchroot /mnt/host-rootfs /usr/bin/env -i PATH=\"/sbin:/bin:/usr/bin\" \\\n      multipathd \"${@:1}\"\n"
  },
  {
    "path": "cinder/templates/bin/_retrieve-internal-tenant-id.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n\nUSER_PROJECT_ID=$(openstack project show -f value -c id \\\n    \"${INTERNAL_PROJECT_NAME}\");\n\nUSER_ID=$(openstack user show -f value -c id \\\n    \"${INTERNAL_USER_NAME}\");\n\ntee /tmp/pod-shared/internal_tenant.conf <<EOF\n[DEFAULT]\ncinder_internal_tenant_project_id = ${USER_PROJECT_ID}\ncinder_internal_tenant_user_id = ${USER_ID}\nEOF\n"
  },
  {
    "path": "cinder/templates/bin/_storage-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\nif [ \"x$STORAGE_BACKEND\" == \"xcinder.volume.drivers.rbd.RBDDriver\" ]; then\n  SECRET=$(mktemp --suffix .yaml)\n  KEYRING=$(mktemp --suffix .keyring)\n  function cleanup {\n      rm -f ${SECRET} ${KEYRING}\n  }\n  trap cleanup EXIT\nfi\n\nset -ex\nif [ \"x$STORAGE_BACKEND\" == \"xcinder.volume.drivers.rbd.RBDDriver\" ]; then\n  ceph -s\n  function ensure_pool () {\n    ceph osd pool stats $1 || ceph osd pool create $1 $2\n    if [[ $(ceph mgr versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then\n        ceph osd pool application enable $1 $3\n    fi\n    size_protection=$(ceph osd pool get $1 nosizechange | cut -f2 -d: | tr -d '[:space:]')\n    ceph osd pool set $1 nosizechange 0\n    ceph osd pool set $1 size ${RBD_POOL_REPLICATION} --yes-i-really-mean-it\n    ceph osd pool set $1 nosizechange ${size_protection}\n    ceph osd pool set $1 crush_rule \"${RBD_POOL_CRUSH_RULE}\"\n  }\n  ensure_pool ${RBD_POOL_NAME} ${RBD_POOL_CHUNK_SIZE} ${RBD_POOL_APP_NAME}\n\n  if USERINFO=$(ceph auth get client.${RBD_POOL_USER}); then\n    echo \"Cephx user client.${RBD_POOL_USER} already exist.\"\n    echo \"Update its cephx caps\"\n    ceph auth caps client.${RBD_POOL_USER} \\\n      mon \"profile rbd\" \\\n      osd \"profile rbd\"\n    ceph auth get client.${RBD_POOL_USER} -o ${KEYRING}\n  else\n    #NOTE(JCL): Restrict Cinder permissions to what is needed. MON Read only and RBD access to Cinder pool only.\n    ceph auth get-or-create client.${RBD_POOL_USER} \\\n      mon \"profile rbd\" \\\n      osd \"profile rbd\" \\\n      -o ${KEYRING}\n  fi\n\n  ENCODED_KEYRING=$(sed -n 's/^[[:blank:]]*key[[:blank:]]\\+=[[:blank:]]\\(.*\\)/\\1/p' ${KEYRING} | base64 -w0)\n  cat > ${SECRET} <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: \"${RBD_POOL_SECRET}\"\ntype: kubernetes.io/rbd\ndata:\n  key: $( echo ${ENCODED_KEYRING} )\nEOF\n  kubectl apply --namespace ${NAMESPACE} -f ${SECRET}\n\nfi\n"
  },
  {
    "path": "cinder/templates/bin/_volume-usage-audit.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec cinder-volume-usage-audit \\\n     --config-file /etc/cinder/cinder.conf \\\n     --config-dir /etc/cinder/cinder.conf.d \\\n     --send_actions\n"
  },
  {
    "path": "cinder/templates/certificates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.certificates -}}\n{{  dict \"envAll\" . \"service\" \"volumev3\" \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end -}}\n"
  },
  {
    "path": "cinder/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n{{- $rallyTests := .Values.conf.rally_tests }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: cinder-bin\ndata:\n{{- if .Values.conf.enable_iscsi }}\n  iscsiadm: |\n{{ tuple \"bin/_iscsiadm.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  multipath: |\n{{ tuple \"bin/_multipath.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  multipathd: |\n{{ tuple \"bin/_multipathd.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n  rally-test.sh: |\n{{ tuple $rallyTests | include \"helm-toolkit.scripts.rally_test\" | indent 4 }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  db-purge.sh: |\n{{ tuple \"bin/_db-purge.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  create-internal-tenant.sh: |\n{{ tuple \"bin/_create-internal-tenant-id.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  retrieve-internal-tenant.sh: |\n{{ tuple \"bin/_retrieve-internal-tenant-id.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  cinder-api.sh: |\n{{ tuple \"bin/_cinder-api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  cinder-backup.sh: |\n{{ tuple \"bin/_cinder-backup.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  cinder-scheduler.sh: |\n{{ tuple \"bin/_cinder-scheduler.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  cinder-volume.sh: |\n{{ tuple \"bin/_cinder-volume.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ceph-keyring.sh: |\n{{ tuple \"bin/_ceph-keyring.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ceph-admin-keyring.sh: |\n{{ tuple \"bin/_ceph-admin-keyring.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  volume-usage-audit.sh: |\n{{ tuple \"bin/_volume-usage-audit.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  backup-storage-init.sh: |\n{{ tuple \"bin/_backup-storage-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  storage-init.sh: |\n{{ tuple \"bin/_storage-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  clean-secrets.sh: |\n{{ tuple \"bin/_clean-secrets.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" . | indent 4 }}\n{{- end }}\n{{- if .Values.backup.external_ceph_rbd.enabled }}\n  external-ceph-rbd-admin-keyring.sh: |\n{{ tuple \"bin/_external-ceph-rbd-admin-keyring.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n{{- if empty .Values.conf.cinder.keystone_authtoken.auth_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.cinder.keystone_authtoken \"auth_uri\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.cinder.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.cinder.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.cinder.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.cinder.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n{{- if empty .Values.conf.cinder.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.cinder.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if empty $envAll.Values.conf.cinder.nova.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set $envAll.Values.conf.cinder.nova \"auth_url\" -}}\n{{- end }}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.cinder.database.connection)) (empty .Values.conf.cinder.database.connection) -}}\n{{- $connection := tuple \"oslo_db\" \"internal\" \"cinder\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | set .Values.conf.cinder.database \"connection\" -}}\n{{- else -}}\n{{- $_ := set .Values.conf.cinder.database \"connection\" $connection -}}\n{{- end -}}\n{{- end -}}\n\n{{- if empty .Values.conf.cinder.DEFAULT.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"cinder\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.cinder.DEFAULT \"transport_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.cinder.DEFAULT.glance_api_servers -}}\n{{- $_ := tuple \"image\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.cinder.DEFAULT \"glance_api_servers\" -}}\n{{- end -}}\n\n{{- if (contains \"cinder.backup.drivers.swift\" .Values.conf.cinder.DEFAULT.backup_driver) }}\n{{- if empty .Values.conf.cinder.DEFAULT.backup_swift_auth_version -}}\n{{- $_ := set .Values.conf.cinder.DEFAULT \"backup_swift_auth_version\" \"3\" -}}\n{{- end -}}\n{{- if empty .Values.conf.cinder.DEFAULT.backup_swift_auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.cinder.DEFAULT \"backup_swift_auth_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.cinder.DEFAULT.backup_swift_user_domain -}}\n{{- $_ := set .Values.conf.cinder.DEFAULT \"backup_swift_user_domain\" .Values.endpoints.identity.auth.swift.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.cinder.DEFAULT.backup_swift_user -}}\n{{- $_ := set .Values.conf.cinder.DEFAULT \"backup_swift_user\" .Values.endpoints.identity.auth.swift.username -}}\n{{- end -}}\n{{- if empty .Values.conf.cinder.DEFAULT.backup_swift_key -}}\n{{- $_ := set .Values.conf.cinder.DEFAULT \"backup_swift_key\" .Values.endpoints.identity.auth.swift.password -}}\n{{- end -}}\n{{- if empty .Values.conf.cinder.DEFAULT.backup_swift_project_domain -}}\n{{- $_ := set .Values.conf.cinder.DEFAULT \"backup_swift_project_domain\" .Values.endpoints.identity.auth.swift.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.cinder.DEFAULT.backup_swift_project -}}\n{{- $_ := set .Values.conf.cinder.DEFAULT \"backup_swift_project\" .Values.endpoints.identity.auth.swift.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.cinder.DEFAULT.swift_catalog_info -}}\n{{- $_ := set .Values.conf.cinder.DEFAULT \"swift_catalog_info\" \"object-store:swift:internalURL\" -}}\n{{- end -}}\n{{- end -}}\n\n{{- if empty .Values.conf.cinder.DEFAULT.osapi_volume_listen_port -}}\n{{- $_ := tuple \"volumev3\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set .Values.conf.cinder.DEFAULT \"osapi_volume_listen_port\" -}}\n{{- end -}}\n\n{{- if .Values.conf.cinder.service_user.send_service_user_token -}}\n{{- if empty .Values.conf.cinder.service_user.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.cinder.service_user \"auth_url\" -}}\n{{- end -}}\n{{- end -}}\n\n{{- if empty .Values.conf.cinder_api_uwsgi.uwsgi.processes -}}\n{{- $_ := set .Values.conf.cinder_api_uwsgi.uwsgi \"processes\" .Values.conf.cinder.DEFAULT.osapi_volume_workers -}}\n{{- end -}}\n{{- if empty (index .Values.conf.cinder_api_uwsgi.uwsgi \"http-socket\") -}}\n{{- $http_socket_port := tuple \"volumev3\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | toString }}\n{{- $http_socket := printf \"0.0.0.0:%s\" $http_socket_port }}\n{{- $_ := set .Values.conf.cinder_api_uwsgi.uwsgi \"http-socket\" $http_socket -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.handler_fluent) (has \"fluent\" .Values.conf.logging.handlers.keys) -}}\n{{- $fluentd_host := tuple \"fluentd\" \"internal\" $envAll | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\n{{- $fluentd_port := tuple \"fluentd\" \"internal\" \"service\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $fluent_args := printf \"('%s.%s', '%s', %s)\" .Release.Namespace .Release.Name $fluentd_host $fluentd_port }}\n{{- $handler_fluent := dict \"class\" \"fluent.handler.FluentHandler\" \"formatter\" \"fluent\" \"args\" $fluent_args -}}\n{{- $_ := set .Values.conf.logging \"handler_fluent\" $handler_fluent -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.formatter_fluent) (has \"fluent\" .Values.conf.logging.formatters.keys) -}}\n{{- $formatter_fluent := dict \"class\" \"oslo_log.formatters.FluentFormatter\" -}}\n{{- $_ := set .Values.conf.logging \"formatter_fluent\" $formatter_fluent -}}\n{{- end -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: cinder-etc\ntype: Opaque\ndata:\n  rally_tests.yaml: {{ toYaml .Values.conf.rally_tests.tests | b64enc }}\n  cinder.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.cinder | b64enc }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.logging | b64enc }}\n  backends.conf: {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.backends | b64enc }}\n  api-paste.ini: {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.paste | b64enc }}\n  cinder-api-uwsgi.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.cinder_api_uwsgi | b64enc }}\n  policy.yaml: {{ toYaml .Values.conf.policy | b64enc }}\n{{- if .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.mpm_event \"key\" \"mpm_event.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.wsgi_cinder \"key\" \"wsgi-cinder.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- end }}\n  api_audit_map.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.api_audit_map | b64enc }}\n  cinder_sudoers: {{ $envAll.Values.conf.cinder_sudoers | b64enc }}\n  rootwrap.conf: {{ $envAll.Values.conf.rootwrap | b64enc }}\n  resource_filters.json: {{ toJson .Values.conf.resource_filters | b64enc }}\n{{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n{{- $filePrefix := replace \"_\" \"-\"  $key }}\n  {{ printf \"%s.filters\" $filePrefix }}: {{ $value.content | b64enc }}\n{{- end }}\n{{- if and .Values.backup.external_ceph_rbd.enabled (not .Values.backup.external_ceph_rbd.configmap) }}\n  external-backup-ceph.conf: {{ include \"helm-toolkit.utils.to_ini\" .Values.backup.external_ceph_rbd.conf | b64enc }}\n{{- end }}\n{{- if and .Values.ceph_client.enable_external_ceph_backend (not .Values.ceph_client.external_ceph.configmap) }}\n  external-ceph.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.ceph_client.external_ceph.conf | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/cron-job-cinder-db-purge.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.cron_db_purge }}\n{{- $envAll := . }}\n\n{{- $mounts_cinder_db_purge := .Values.pod.mounts.cinder_db_purge.cinder_db_purge }}\n{{- $mounts_cinder_db_purge_init := .Values.pod.mounts.cinder_db_purge.init_container }}\n{{- $etcSources := .Values.pod.etcSources.cinder_db_purge }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"cinder-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"cinder-db-purge\" }}\n{{ tuple $envAll \"db_purge\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: cinder-db-purge\n  labels:\n{{ tuple $envAll \"cinder\" \"cinder-db-purge\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  schedule: {{ .Values.jobs.db_purge.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.db_purge.history.success }}\n  failedJobsHistoryLimit: {{ .Values.jobs.db_purge.history.failed }}\n  {{- if .Values.jobs.db_purge.starting_deadline }}\n  startingDeadlineSeconds: {{ .Values.jobs.db_purge.starting_deadline }}\n  {{- end }}\n  concurrencyPolicy: Forbid\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"cinder\" \"db-purge\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"cinder\" \"db-purge\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n          annotations:\n{{ dict \"envAll\" $envAll \"podName\" $serviceAccountName \"containerNames\" (list \"db-purge\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 12 }}\n        spec:\n{{ tuple \"cinder_db_purge\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 10 }}\n{{ tuple \"cinder_db_purge\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"db_purge\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 10 }}\n          serviceAccountName: {{ $serviceAccountName }}\n          restartPolicy: OnFailure\n{{ if $envAll.Values.pod.tolerations.cinder.enabled }}\n{{ tuple $envAll \"cinder\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 10 }}\n{{ end }}\n          nodeSelector:\n            {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n          initContainers:\n{{ tuple $envAll \"db_purge\" $mounts_cinder_db_purge_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 12 }}\n          containers:\n            - name: db-purge\n{{ tuple $envAll \"cinder_db_purge\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.db_purge | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{ dict \"envAll\" $envAll \"application\" \"db_purge\" \"container\" \"cinder_db_purge\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 14 }}\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n              env:\n                - name: REQUESTS_CA_BUNDLE\n                  value: \"/etc/cinder/certs/ca.crt\"\n{{- end }}\n              command:\n                - /tmp/db-purge.sh\n              volumeMounts:\n                - name: pod-tmp\n                  mountPath: /tmp\n                - name: etccinder\n                  mountPath: /etc/cinder\n                - name: cinder-etc\n                  mountPath: /etc/cinder/cinder.conf\n                  subPath: cinder.conf\n                  readOnly: true\n                - name: cinder-etc-snippets\n                  mountPath: /etc/cinder/cinder.conf.d/\n                  readOnly: true\n                {{- if .Values.conf.cinder.DEFAULT.log_config_append }}\n                - name: cinder-etc\n                  mountPath: {{ .Values.conf.cinder.DEFAULT.log_config_append }}\n                  subPath: {{ base .Values.conf.cinder.DEFAULT.log_config_append }}\n                  readOnly: true\n                {{- end }}\n                - name: cinder-bin\n                  mountPath: /tmp/db-purge.sh\n                  subPath: db-purge.sh\n                  readOnly: true\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.volumev3.api.internal \"path\" \"/etc/cinder/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 16 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 16 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 16 }}\n{{ if $mounts_cinder_db_purge.volumeMounts }}{{ toYaml $mounts_cinder_db_purge.volumeMounts | indent 16 }}{{ end }}\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: etccinder\n              emptyDir: {}\n            - name: cinder-etc\n              secret:\n                secretName: cinder-etc\n                defaultMode: 0444\n            - name: cinder-etc-snippets\n{{- if $etcSources }}\n              projected:\n                sources:\n{{ toYaml $etcSources | indent 18 }}\n{{- else }}\n              emptyDir: {}\n{{ end }}\n            - name: cinder-bin\n              configMap:\n                name: cinder-bin\n                defaultMode: 0555\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.volumev3.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n{{ if $mounts_cinder_db_purge.volumes }}{{ toYaml $mounts_cinder_db_purge.volumes | indent 12 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/cron-job-cinder-volume-usage-audit.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.cron_volume_usage_audit }}\n{{- $envAll := . }}\n\n{{- $mounts_cinder_volume_usage_audit := .Values.pod.mounts.cinder_volume_usage_audit.cinder_volume_usage_audit }}\n{{- $mounts_cinder_volume_usage_audit_init := .Values.pod.mounts.cinder_volume_usage_audit.init_container }}\n{{- $etcSources := .Values.pod.etcSources.cinder_volume_usage_audit }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"cinder-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"cinder-volume-usage-audit\" }}\n{{ tuple $envAll \"volume_usage_audit\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: cinder-volume-usage-audit\n  labels:\n{{ tuple $envAll \"cinder\" \"volume-usage-audit\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  schedule: {{ .Values.jobs.volume_usage_audit.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.volume_usage_audit.history.success }}\n  failedJobsHistoryLimit: {{ .Values.jobs.volume_usage_audit.history.failed }}\n  {{- if .Values.jobs.volume_usage_audit.starting_deadline }}\n  startingDeadlineSeconds: {{ .Values.jobs.volume_usage_audit.starting_deadline }}\n  {{- end }}\n  concurrencyPolicy: Forbid\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"cinder\" \"volume-usage-audit\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"cinder\" \"volume-usage-audit\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n          annotations:\n{{ dict \"envAll\" $envAll \"podName\" $serviceAccountName \"containerNames\" (list \"cinder-volume-usage-audit\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 12 }}\n        spec:\n{{ dict \"envAll\" $envAll \"application\" \"volume_usage_audit\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 10 }}\n{{ tuple \"cinder_volume_usage_audit\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 10 }}\n{{ tuple \"cinder_volume_usage_audit\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 10 }}\n          serviceAccountName: {{ $serviceAccountName }}\n          restartPolicy: OnFailure\n{{ if $envAll.Values.pod.tolerations.cinder.enabled }}\n{{ tuple $envAll \"cinder\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 10 }}\n{{ end }}\n          nodeSelector:\n            {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n          initContainers:\n{{ tuple $envAll \"volume_usage_audit\" $mounts_cinder_volume_usage_audit_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 12 }}\n          containers:\n            - name: cinder-volume-usage-audit\n{{ tuple $envAll \"cinder_volume_usage_audit\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.volume_usage_audit | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{ dict \"envAll\" $envAll \"application\" \"volume_usage_audit\" \"container\" \"cinder_volume_usage_audit\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 14 }}\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n              env:\n                - name: REQUESTS_CA_BUNDLE\n                  value: \"/etc/cinder/certs/ca.crt\"\n{{- end }}\n              command:\n                - /tmp/volume-usage-audit.sh\n              volumeMounts:\n                - name: pod-tmp\n                  mountPath: /tmp\n                - name: etccinder\n                  mountPath: /etc/cinder\n                - name: cinder-etc\n                  mountPath: /etc/cinder/cinder.conf\n                  subPath: cinder.conf\n                  readOnly: true\n                - name: cinder-etc-snippets\n                  mountPath: /etc/cinder/cinder.conf.d/\n                  readOnly: true\n                {{- if .Values.conf.cinder.DEFAULT.log_config_append }}\n                - name: cinder-etc\n                  mountPath: {{ .Values.conf.cinder.DEFAULT.log_config_append }}\n                  subPath: {{ base .Values.conf.cinder.DEFAULT.log_config_append }}\n                  readOnly: true\n                {{- end }}\n                - name: cinder-bin\n                  mountPath: /tmp/volume-usage-audit.sh\n                  subPath: volume-usage-audit.sh\n                  readOnly: true\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.volumev3.api.internal \"path\" \"/etc/cinder/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 16 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 16 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 16 }}\n{{ if $mounts_cinder_volume_usage_audit.volumeMounts }}{{ toYaml $mounts_cinder_volume_usage_audit.volumeMounts | indent 16 }}{{ end }}\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: etccinder\n              emptyDir: {}\n            - name: cinder-etc\n              secret:\n                secretName: cinder-etc\n                defaultMode: 0444\n            - name: cinder-etc-snippets\n{{- if $etcSources }}\n              projected:\n                sources:\n{{ toYaml $etcSources | indent 18 }}\n{{- else }}\n              emptyDir: {}\n{{ end }}\n            - name: cinder-bin\n              configMap:\n                name: cinder-bin\n                defaultMode: 0555\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.volumev3.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n{{ if $mounts_cinder_volume_usage_audit.volumes }}{{ toYaml $mounts_cinder_volume_usage_audit.volumes | indent 12 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/deployment-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"cinderApiLivenessProbeTemplate\" }}\nhttpGet:\n  scheme: {{ tuple \"volumev3\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n  path: {{ tuple \"volumev3\" \"healthcheck\" \"internal\" . | include \"helm-toolkit.endpoints.keystone_endpoint_path_lookup\" }}\n  port: {{ tuple \"volumev3\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- define \"cinderApiReadinessProbeTemplate\" }}\nhttpGet:\n  scheme: {{ tuple \"volumev3\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n  path: {{ tuple \"volumev3\" \"healthcheck\" \"internal\" . | include \"helm-toolkit.endpoints.keystone_endpoint_path_lookup\" }}\n  port: {{ tuple \"volumev3\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- if .Values.manifests.deployment_api }}\n{{- $envAll := . }}\n\n{{- $mounts_cinder_api := .Values.pod.mounts.cinder_api.cinder_api }}\n{{- $mounts_cinder_api_init := .Values.pod.mounts.cinder_api.init_container }}\n{{- $etcSources := .Values.pod.etcSources.cinder_api }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"cinder-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"cinder-api\" }}\n{{ tuple $envAll \"api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: cinder-api\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"cinder\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"cinder\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"cinder\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"cinder_api\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"cinder-api\" \"containerNames\" (list \"cinder-api\" \"ceph-coordination-volume-perms\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"cinder_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"cinder_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"cinder_api\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"cinder\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n{{ if $envAll.Values.pod.tolerations.cinder.enabled }}\n{{ tuple $envAll \"cinder\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.api.node_selector_key }}: {{ .Values.labels.api.node_selector_value }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.api.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"api\" $mounts_cinder_api_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        {{- if eq ( split \"://\" .Values.conf.cinder.coordination.backend_url )._0 \"file\" }}\n        - name: ceph-coordination-volume-perms\n{{ tuple $envAll \"cinder_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cinder_api\" \"container\" \"ceph_coordination_volume_perms\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - chown\n            - -R\n            - \"cinder:\"\n            - {{ ( split \"://\" .Values.conf.cinder.coordination.backend_url )._1 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: cinder-coordination\n              mountPath: {{ ( split \"://\" .Values.conf.cinder.coordination.backend_url )._1 }}\n        {{ end }}\n      containers:\n        - name: cinder-api\n{{ tuple $envAll \"cinder_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cinder_api\" \"container\" \"cinder_api\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/cinder-api.sh\n            - start\n          env:\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/cinder/certs/ca.crt\"\n{{- end }}\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/cinder-api.sh\n                  - stop\n          ports:\n            - name: c-api\n              containerPort: {{ tuple \"volumev3\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{ dict \"envAll\" $envAll \"component\" \"api\" \"container\" \"cinder-api\" \"type\" \"readiness\" \"probeTemplate\" (include \"cinderApiReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"api\" \"container\" \"cinder-api\" \"type\" \"liveness\" \"probeTemplate\" (include \"cinderApiLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.cinder.oslo_concurrency.lock_path }}\n            - name: cinder-bin\n              mountPath: /tmp/cinder-api.sh\n              subPath: cinder-api.sh\n              readOnly: true\n            - name: cinder-etc\n              mountPath: /etc/cinder/cinder-api-uwsgi.ini\n              subPath: cinder-api-uwsgi.ini\n              readOnly: true\n            - name: cinder-etc\n              mountPath: /etc/cinder/cinder.conf\n              subPath: cinder.conf\n              readOnly: true\n            - name: cinder-etc-snippets\n              mountPath: /etc/cinder/cinder.conf.d/\n              readOnly: true\n            {{- if .Values.conf.cinder.DEFAULT.log_config_append }}\n            - name: cinder-etc\n              mountPath: {{ .Values.conf.cinder.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.cinder.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: cinder-etc\n              mountPath: /etc/cinder/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: cinder-etc\n              mountPath: /etc/cinder/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: cinder-etc\n              mountPath: /etc/cinder/api_audit_map.conf\n              subPath: api_audit_map.conf\n              readOnly: true\n            - name: cinder-etc\n              mountPath: {{ .Values.conf.cinder.DEFAULT.resource_query_filters_file }}\n              subPath: resource_filters.json\n              readOnly: true\n{{- if .Values.conf.security }}\n            - name: cinder-etc\n              mountPath: {{ .Values.conf.software.apache2.conf_dir }}/security.conf\n              subPath: security.conf\n              readOnly: true\n{{- end }}\n            {{- if eq ( split \"://\" .Values.conf.cinder.coordination.backend_url )._0 \"file\" }}\n            - name: cinder-coordination\n              mountPath: {{ ( split \"://\" .Values.conf.cinder.coordination.backend_url )._1 }}\n            {{- end }}\n            {{- if .Values.manifests.certificates }}\n            - name: cinder-etc\n              mountPath: {{ .Values.conf.software.apache2.site_dir }}/cinder-api.conf\n              subPath: wsgi-cinder.conf\n              readOnly: true\n            - name: cinder-etc\n              mountPath: {{ .Values.conf.software.apache2.mods_dir }}/mpm_event.conf\n              subPath: mpm_event.conf\n              readOnly: true\n            {{- end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.volumev3.api.internal \"path\" \"/etc/cinder/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_cinder_api.volumeMounts }}{{ toYaml $mounts_cinder_api.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: cinder-bin\n          configMap:\n            name: cinder-bin\n            defaultMode: 0555\n        - name: cinder-etc\n          secret:\n            secretName: cinder-etc\n            defaultMode: 0444\n        - name: cinder-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        {{- if eq ( split \"://\" .Values.conf.cinder.coordination.backend_url )._0 \"file\" }}\n        # NOTE (portdirect): this will need to be set to a shared mount amongst all cinder\n        # pods for the coordination backend to be fully functional.\n        - name: cinder-coordination\n          emptyDir: {}\n        {{- end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.volumev3.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_cinder_api.volumes }}{{ toYaml $mounts_cinder_api.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/deployment-backup.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_backup }}\n{{- $envAll := . }}\n\n{{- $internal_ceph_backend := .Values.ceph_client.internal_ceph_backend }}\n\n{{- $mounts_cinder_backup := .Values.pod.mounts.cinder_backup.cinder_backup }}\n{{- $mounts_cinder_backup_init := .Values.pod.mounts.cinder_backup.init_container }}\n{{- $etcSources := .Values.pod.etcSources.cinder_backup }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"cinder-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"cinder-backup\" }}\n{{ tuple $envAll \"backup\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: cinder-backup\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"cinder\" \"backup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.backup }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"cinder\" \"backup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"cinder\" \"backup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"cinder_backup\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"cinder-backup\" \"containerNames\" (list \"cinder-backup\" \"ceph-coordination-volume-perms\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"cinder_backup\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"cinder_backup\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"cinder_backup\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"cinder\" \"backup\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n{{ if $envAll.Values.pod.tolerations.cinder.enabled }}\n{{ tuple $envAll \"cinder\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.backup.node_selector_key }}: {{ .Values.labels.backup.node_selector_value }}\n{{- if .Values.pod.useHostNetwork.backup }}\n      hostNetwork: true\n      dnsPolicy: ClusterFirstWithHostNet\n{{- end }}\n{{- if .Values.conf.enable_iscsi }}\n      hostIPC: true\n{{- end }}\n      initContainers:\n{{ tuple $envAll \"backup\" $mounts_cinder_backup_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        {{- if (contains \"cinder.backup.drivers.ceph\" .Values.conf.cinder.DEFAULT.backup_driver) }}\n        - name: ceph-backup-keyring-placement\n{{ tuple $envAll \"cinder_backup\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cinder_backup\" \"container\" \"ceph_backup_keyring_placement\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/ceph-keyring.sh\n          env:\n            - name: RBD_USER\n              value: {{ .Values.conf.cinder.DEFAULT.backup_ceph_user | quote }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: cinder-bin\n              mountPath: /tmp/ceph-keyring.sh\n              subPath: ceph-keyring.sh\n              readOnly: true\n            - name: ceph-backup-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n        {{ end }}\n        {{- range $name := rest (splitList \",\" (include \"cinder.utils.ceph_backend_list\" $envAll)) }}\n          {{- $backend := index $envAll.Values.conf.backends $name }}\n            {{- if eq $internal_ceph_backend $name }}\n        - name: ceph-keyring-placement-{{ $name | lower }}\n{{ tuple $envAll \"cinder_backup\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cinder_backup\" \"container\" \"ceph_keyring_placement\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/ceph-keyring.sh\n          env:\n            - name: RBD_USER\n              value: {{ $backend.rbd_user | quote }}\n{{- if and $envAll.Values.ceph_client.enable_external_ceph_backend $envAll.Values.ceph_client.external_ceph.rbd_user }}\n            - name: EXTERNAL_RBD_USER\n              value: {{ $envAll.Values.ceph_client.external_ceph.rbd_user | quote }}\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: cinder-bin\n              mountPath: /tmp/ceph-keyring.sh\n              subPath: ceph-keyring.sh\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n           {{- if and $envAll.Values.ceph_client.enable_external_ceph_backend $envAll.Values.ceph_client.external_ceph.rbd_user }}\n            - name: external-ceph-keyring\n              mountPath: /tmp/external-ceph-client-keyring\n              subPath: key\n              readOnly: true\n            {{- end }}\n            {{- end }}\n        {{- end }}\n        {{- if (contains \"cinder.backup.drivers.posix\" .Values.conf.cinder.DEFAULT.backup_driver) }}\n        - name: ceph-backup-volume-perms\n{{ tuple $envAll \"cinder_backup\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cinder_backup\" \"container\" \"ceph_backup_volume_perms\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - chown\n            - -R\n            - \"cinder:\"\n            - {{ .Values.conf.cinder.DEFAULT.backup_posix_path }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: cinder-backup\n              mountPath: {{ .Values.conf.cinder.DEFAULT.backup_posix_path }}\n        {{ end }}\n        {{- if eq ( split \"://\" .Values.conf.cinder.coordination.backend_url )._0 \"file\" }}\n        - name: ceph-coordination-volume-perms\n{{ tuple $envAll \"cinder_backup\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cinder_backup\" \"container\" \"ceph_coordination_volume_perms\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - chown\n            - -R\n            - \"cinder:\"\n            - {{ ( split \"://\" .Values.conf.cinder.coordination.backend_url )._1 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: cinder-coordination\n              mountPath: {{ ( split \"://\" .Values.conf.cinder.coordination.backend_url )._1 }}\n        {{ end }}\n      containers:\n        - name: cinder-backup\n{{ tuple $envAll \"cinder_backup\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.backup | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cinder_backup\" \"container\" \"cinder_backup\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/cinder-backup.sh\n          terminationMessagePath: /var/log/termination-log\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.cinder.oslo_concurrency.lock_path }}\n            - name: cinder-tmp\n              mountPath: /var/lib/cinder/tmp\n            - name: cinder-bin\n              mountPath: /tmp/cinder-backup.sh\n              subPath: cinder-backup.sh\n              readOnly: true\n            - name: cinder-etc\n              mountPath: /etc/cinder/cinder.conf\n              subPath: cinder.conf\n              readOnly: true\n            - name: cinder-etc-snippets\n              mountPath: /etc/cinder/cinder.conf.d/\n              readOnly: true\n            {{- if .Values.conf.cinder.DEFAULT.log_config_append }}\n            - name: cinder-etc\n              mountPath: {{ .Values.conf.cinder.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.cinder.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            {{ if or (contains \"cinder.backup.drivers.ceph\" .Values.conf.cinder.DEFAULT.backup_driver) (eq \"true\" (include \"cinder.utils.has_ceph_backend\" $envAll)) }}\n            - name: etcceph\n              mountPath: /etc/ceph\n            {{- if not .Values.backup.external_ceph_rbd.enabled }}\n            - name: ceph-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            {{- else if .Values.backup.external_ceph_rbd.configmap }}\n            - name: external-backup-ceph-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            {{- else }}\n            - name: cinder-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: external-backup-ceph.conf\n              readOnly: true\n            {{- end }}\n            {{- if (contains \"cinder.backup.drivers.ceph\" .Values.conf.cinder.DEFAULT.backup_driver) }}\n            - name: ceph-backup-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n            {{- else }}\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n            {{- end }}\n            {{- if .Values.ceph_client.enable_external_ceph_backend }}\n            {{- if .Values.ceph_client.external_ceph.configmap }}\n            - name: external-ceph-etc\n              mountPath: /etc/ceph/external-ceph.conf\n              subPath: external-ceph.conf\n              readOnly: true\n            {{- else }}\n            - name: cinder-etc\n              mountPath: /etc/ceph/external-ceph.conf\n              subPath: external-ceph.conf\n              readOnly: true\n            {{- end }}\n            {{- if .Values.ceph_client.external_ceph.rbd_user }}\n            - name: external-ceph-keyring\n              mountPath: /tmp/external-ceph-client-keyring\n              subPath: key\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            {{- end }}\n            {{- if (contains \"cinder.backup.drivers.posix\" .Values.conf.cinder.DEFAULT.backup_driver) }}\n            - name: cinder-backup\n              mountPath: {{ .Values.conf.cinder.DEFAULT.backup_posix_path }}\n            {{- end }}\n            {{- if eq ( split \"://\" .Values.conf.cinder.coordination.backend_url )._0 \"file\" }}\n            - name: cinder-coordination\n              mountPath: {{ ( split \"://\" .Values.conf.cinder.coordination.backend_url )._1 }}\n            {{- end }}\n            - name: cinder-etc\n              # NOTE (Portdirect): We mount here to override Kollas\n              # custom sudoers file when using Kolla images, this\n              # location will also work fine for other images.\n              mountPath: /etc/sudoers.d/kolla_cinder_sudoers\n              subPath: cinder_sudoers\n              readOnly: true\n            - name: cinder-etc\n              mountPath: /etc/sudoers.d/kolla_cinder_volume_sudoers\n              subPath: cinder_sudoers\n              readOnly: true\n            - name: cinder-etc\n              mountPath: /etc/cinder/rootwrap.conf\n              subPath: rootwrap.conf\n              readOnly: true\n            - name: cinder-etc\n              mountPath: /etc/cinder/rootwrap.d/volume.filters\n              subPath: volume.filters\n              readOnly: true\n            {{- if .Values.conf.enable_iscsi }}\n            - name: host-rootfs\n              mountPath: /mnt/host-rootfs\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: HostToContainer\n              {{- end }}\n            - name: host-dev\n              mountPath: /dev\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: HostToContainer\n              {{- end }}\n            - name: runcryptsetup\n              mountPath: /run/cryptsetup\n            - name: runlock\n              mountPath: /run/lock\n            - name: etciscsi\n              mountPath: /etc/iscsi\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: HostToContainer\n              {{- end }}\n            - name: usrlocalsbin\n              mountPath: /usr/local/sbin\n            - name: cinder-bin\n              mountPath: /usr/local/sbin/iscsiadm\n              subPath: iscsiadm\n            {{- end }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_cinder_backup.volumeMounts }}{{ toYaml $mounts_cinder_backup.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: cinder-tmp\n          emptyDir: {}\n        - name: cinder-etc\n          secret:\n            secretName: cinder-etc\n            defaultMode: 0444\n        - name: cinder-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: cinder-bin\n          configMap:\n            name: cinder-bin\n            defaultMode: 0555\n        {{ if or (contains \"cinder.backup.drivers.ceph\" .Values.conf.cinder.DEFAULT.backup_driver) (eq \"true\" (include \"cinder.utils.has_ceph_backend\" $envAll)) }}\n        - name: etcceph\n          emptyDir: {}\n        {{- if and .Values.backup.external_ceph_rbd.enabled .Values.backup.external_ceph_rbd.configmap }}\n        - name: external-backup-ceph-etc\n          configMap:\n            name: {{ .Values.backup.external_ceph_rbd.configmap }}\n            defaultMode: 0444\n        {{- end }}\n        - name: ceph-etc\n          configMap:\n            name: {{ .Values.ceph_client.configmap }}\n            defaultMode: 0444\n        {{- if and .Values.ceph_client.enable_external_ceph_backend .Values.ceph_client.external_ceph.configmap }}\n        - name: external-ceph-etc\n          configMap:\n            name: {{ .Values.ceph_client.external_ceph.configmap }}\n            defaultMode: 0444\n        {{- end }}\n        {{ end }}\n        {{- if (contains \"cinder.backup.drivers.ceph\" .Values.conf.cinder.DEFAULT.backup_driver) }}\n        - name: ceph-backup-keyring\n          secret:\n            secretName: {{ .Values.secrets.rbd.backup | quote }}\n        {{ end }}\n        {{- if eq \"true\" (include \"cinder.utils.has_ceph_backend\" $envAll) }}\n        - name: ceph-keyring\n          secret:\n            secretName: {{ .Values.secrets.rbd.volume | quote }}\n        {{- if and .Values.ceph_client.enable_external_ceph_backend .Values.ceph_client.external_ceph.rbd_user }}\n        - name: external-ceph-keyring\n          secret:\n            secretName: {{ .Values.secrets.rbd.volume_external | quote }}\n        {{ end }}\n        {{ end }}\n        {{- if (contains \"cinder.backup.drivers.posix\" .Values.conf.cinder.DEFAULT.backup_driver) }}\n        - name: cinder-backup\n          persistentVolumeClaim:\n            claimName: cinder-backup\n        {{- end }}\n        {{- if eq ( split \"://\" .Values.conf.cinder.coordination.backend_url )._0 \"file\" }}\n        # NOTE (portdirect): this will need to be set to a shared mount amongst all cinder\n        # pods for the coordination backend to be fully functional.\n        - name: cinder-coordination\n          emptyDir: {}\n        {{- end }}\n        {{- if .Values.conf.enable_iscsi }}\n        - name: host-rootfs\n          hostPath:\n            path: /\n        - name: host-dev\n          hostPath:\n            path: /dev\n        - name: runcryptsetup\n          hostPath:\n            path: /run/cryptsetup\n        - name: runlock\n          hostPath:\n            path: /run/lock\n        - name: etciscsi\n          hostPath:\n            path: /etc/iscsi\n        - name: usrlocalsbin\n          emptyDir: {}\n        {{- end }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_cinder_backup.volumes }}{{ toYaml $mounts_cinder_backup.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/deployment-scheduler.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_scheduler }}\n{{- $envAll := . }}\n\n{{- $mounts_cinder_scheduler := .Values.pod.mounts.cinder_scheduler.cinder_scheduler }}\n{{- $mounts_cinder_scheduler_init := .Values.pod.mounts.cinder_scheduler.init_container }}\n{{- $etcSources := .Values.pod.etcSources.cinder_scheduler }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"cinder-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"cinder-scheduler\" }}\n{{ tuple $envAll \"scheduler\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: cinder-scheduler\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"cinder\" \"scheduler\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.scheduler }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"cinder\" \"scheduler\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"cinder\" \"scheduler\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"cinder_scheduler\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"cinder-scheduler\" \"containerNames\" (list \"cinder-scheduler\" \"ceph-coordination-volume-perms\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"cinder_scheduler\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"cinder_scheduler\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"cinder_scheduler\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"cinder\" \"scheduler\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n{{ if $envAll.Values.pod.tolerations.cinder.enabled }}\n{{ tuple $envAll \"cinder\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.scheduler.node_selector_key }}: {{ .Values.labels.scheduler.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"scheduler\" $mounts_cinder_scheduler_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        {{- if eq ( split \"://\" .Values.conf.cinder.coordination.backend_url )._0 \"file\" }}\n        - name: ceph-coordination-volume-perms\n{{ tuple $envAll \"cinder_scheduler\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cinder_scheduler\" \"container\" \"ceph_coordination_volume_perms\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - chown\n            - -R\n            - \"cinder:\"\n            - {{ ( split \"://\" .Values.conf.cinder.coordination.backend_url )._1 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: cinder-coordination\n              mountPath: {{ ( split \"://\" .Values.conf.cinder.coordination.backend_url )._1 }}\n        {{ end }}\n      containers:\n        - name: cinder-scheduler\n{{ tuple $envAll \"cinder_scheduler\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.scheduler | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cinder_scheduler\" \"container\" \"cinder_scheduler\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/cinder-scheduler.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.cinder.oslo_concurrency.lock_path }}\n            - name: pod-var-cinder\n              mountPath: {{ .Values.conf.cinder.DEFAULT.state_path }}\n            - name: cinder-bin\n              mountPath: /tmp/cinder-scheduler.sh\n              subPath: cinder-scheduler.sh\n              readOnly: true\n            - name: cinder-etc\n              mountPath: /etc/cinder/cinder.conf\n              subPath: cinder.conf\n              readOnly: true\n            - name: cinder-etc-snippets\n              mountPath: /etc/cinder/cinder.conf.d/\n              readOnly: true\n            {{- if .Values.conf.cinder.DEFAULT.log_config_append }}\n            - name: cinder-etc\n              mountPath: {{ .Values.conf.cinder.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.cinder.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: cinder-etc\n              mountPath: /etc/cinder/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: cinder-etc\n              mountPath: /etc/cinder/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            {{- if eq ( split \"://\" .Values.conf.cinder.coordination.backend_url )._0 \"file\" }}\n            - name: cinder-coordination\n              mountPath: {{ ( split \"://\" .Values.conf.cinder.coordination.backend_url )._1 }}\n            {{- end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.volumev3.api.internal \"path\" \"/etc/cinder/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_cinder_scheduler.volumeMounts }}{{ toYaml $mounts_cinder_scheduler.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-var-cinder\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: cinder-bin\n          configMap:\n            name: cinder-bin\n            defaultMode: 0555\n        - name: cinder-etc\n          secret:\n            secretName: cinder-etc\n            defaultMode: 0444\n        - name: cinder-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        {{- if eq ( split \"://\" .Values.conf.cinder.coordination.backend_url )._0 \"file\" }}\n        # NOTE (portdirect): this will need to be set to a shared mount amongst all cinder\n        # pods for the coordination backend to be fully functional.\n        - name: cinder-coordination\n          emptyDir: {}\n        {{- end }}\n {{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.volumev3.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_cinder_scheduler.volumes }}{{ toYaml $mounts_cinder_scheduler.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/deployment-volume.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_volume }}\n{{- $envAll := . }}\n\n{{- $internal_ceph_backend := .Values.ceph_client.internal_ceph_backend }}\n\n{{- $mounts_cinder_volume := .Values.pod.mounts.cinder_volume.cinder_volume }}\n{{- $mounts_cinder_volume_init := .Values.pod.mounts.cinder_volume.init_container }}\n{{- $etcSources := .Values.pod.etcSources.cinder_volume }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"cinder-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"cinder-volume\" }}\n{{ tuple $envAll \"volume\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: cinder-volume\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"cinder\" \"volume\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.volume }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"cinder\" \"volume\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"cinder\" \"volume\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"cinder_volume\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"cinder-volume\" \"containerNames\" (list \"cinder-volume\" \"ceph-coordination-volume-perms\" \"init-cinder-conf\" \"init\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"cinder_volume\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"cinder_volume\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"cinder_volume\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"cinder\" \"volume\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n{{ if $envAll.Values.pod.tolerations.cinder.enabled }}\n{{ tuple $envAll \"cinder\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.volume.node_selector_key }}: {{ .Values.labels.volume.node_selector_value }}\n{{- if .Values.pod.useHostNetwork.volume }}\n      hostNetwork: true\n      dnsPolicy: ClusterFirstWithHostNet\n{{- end }}\n{{- if .Values.conf.enable_iscsi }}\n      hostIPC: true\n{{- end }}\n      initContainers:\n{{ tuple $envAll \"volume\" $mounts_cinder_volume_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        {{- range $name := rest (splitList \",\" (include \"cinder.utils.ceph_backend_list\" $envAll)) }}\n          {{- $backend := index $envAll.Values.conf.backends $name }}\n            {{- if eq $internal_ceph_backend $name }}\n        - name: ceph-keyring-placement-{{ $name | lower }}\n{{ tuple $envAll \"cinder_volume\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cinder_volume\" \"container\" \"ceph_keyring_placement\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/ceph-keyring.sh\n          env:\n            - name: RBD_USER\n              value: {{ $backend.rbd_user | quote }}\n{{- if and $envAll.Values.ceph_client.enable_external_ceph_backend $envAll.Values.ceph_client.external_ceph.rbd_user }}\n            - name: EXTERNAL_RBD_USER\n              value: {{ $envAll.Values.ceph_client.external_ceph.rbd_user | quote }}\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: cinder-bin\n              mountPath: /tmp/ceph-keyring.sh\n              subPath: ceph-keyring.sh\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n            {{- if and $envAll.Values.ceph_client.enable_external_ceph_backend $envAll.Values.ceph_client.external_ceph.rbd_user }}\n            - name: external-ceph-keyring\n              mountPath: /tmp/external-ceph-client-keyring\n              subPath: key\n              readOnly: true\n            {{- end }}\n            {{- end }}\n        {{- end }}\n        {{- if eq ( split \"://\" .Values.conf.cinder.coordination.backend_url )._0 \"file\" }}\n        - name: ceph-coordination-volume-perms\n{{ tuple $envAll \"cinder_volume\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cinder_volume\" \"container\" \"ceph_coordination_volume_perms\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - chown\n            - -R\n            - \"cinder:\"\n            - {{ ( split \"://\" .Values.conf.cinder.coordination.backend_url )._1 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: cinder-coordination\n              mountPath: {{ ( split \"://\" .Values.conf.cinder.coordination.backend_url )._1 }}\n        {{ end }}\n        - name: init-cinder-conf\n{{ dict \"envAll\" $envAll \"application\" \"cinder_volume\" \"container\" \"init_cinder_conf\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          image: {{ .Values.images.tags.ks_user }}\n          imagePullPolicy: {{ .Values.images.pull_policy }}\n          command:\n            - /tmp/retrieve-internal-tenant.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: cinder-bin\n              mountPath: /tmp/retrieve-internal-tenant.sh\n              subPath: retrieve-internal-tenant.sh\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.volumev3.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\"  | indent 12 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" (or .Values.manifests.certificates .Values.tls.identity) }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n            - name: INTERNAL_PROJECT_NAME\n              value: {{ .Values.conf.cinder.DEFAULT.internal_project_name | quote }}\n            - name: INTERNAL_USER_NAME\n              value: {{ .Values.conf.cinder.DEFAULT.internal_user_name | quote }}\n{{- with $env := dict \"ksUserSecret\" (index .Values.secrets.identity \"cinder\" ) }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 12 }}\n{{- end }}\n      containers:\n        - name: cinder-volume\n{{ tuple $envAll \"cinder_volume\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.volume | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cinder_volume\" \"container\" \"cinder_volume\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/cinder-volume.sh\n          env:\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/cinder/certs/ca.crt\"\n{{- end }}\n          terminationMessagePath: /var/log/termination-log\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.cinder.oslo_concurrency.lock_path }}\n            - name: pod-var-cinder\n              mountPath: {{ .Values.conf.cinder.DEFAULT.state_path }}\n            - name: cinder-bin\n              mountPath: /tmp/cinder-volume.sh\n              subPath: cinder-volume.sh\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: cinder-conversion\n              mountPath: /var/lib/cinder/conversion\n            - name: cinder-etc\n              mountPath: /etc/cinder/cinder.conf\n              subPath: cinder.conf\n              readOnly: true\n            - name: cinder-etc-snippets\n              mountPath: /etc/cinder/cinder.conf.d/\n              readOnly: true\n            {{- if .Values.conf.cinder.DEFAULT.log_config_append }}\n            - name: cinder-etc\n              mountPath: {{ .Values.conf.cinder.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.cinder.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: cinder-etc\n              mountPath: /etc/cinder/conf/backends.conf\n              subPath: backends.conf\n              readOnly: true\n            {{- if eq \"true\" (include \"cinder.utils.has_ceph_backend\" $envAll) }}\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: ceph-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n            {{- if .Values.ceph_client.enable_external_ceph_backend }}\n            {{- if .Values.ceph_client.external_ceph.configmap }}\n            - name: external-ceph-etc\n              mountPath: /etc/ceph/external-ceph.conf\n              subPath: external-ceph.conf\n              readOnly: true\n            {{- else }}\n            - name: cinder-etc\n              mountPath: /etc/ceph/external-ceph.conf\n              subPath: external-ceph.conf\n              readOnly: true\n            {{- end }}\n            {{- if .Values.ceph_client.external_ceph.rbd_user }}\n            - name: external-ceph-keyring\n              mountPath: /tmp/external-ceph-client-keyring\n              subPath: key\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            {{- end }}\n            {{- if eq ( split \"://\" .Values.conf.cinder.coordination.backend_url )._0 \"file\" }}\n            - name: cinder-coordination\n              mountPath: {{ ( split \"://\" .Values.conf.cinder.coordination.backend_url )._1 }}\n            {{- end }}\n            - name: cinder-etc\n              # NOTE (Portdirect): We mount here to override Kollas\n              # custom sudoers file when using Kolla images, this\n              # location will also work fine for other images.\n              mountPath: /etc/sudoers.d/kolla_cinder_sudoers\n              subPath: cinder_sudoers\n              readOnly: true\n            - name: cinder-etc\n              mountPath: /etc/sudoers.d/kolla_cinder_volume_sudoers\n              subPath: cinder_sudoers\n              readOnly: true\n            - name: cinder-etc\n              mountPath: /etc/cinder/rootwrap.conf\n              subPath: rootwrap.conf\n              readOnly: true\n            {{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n            {{- if ( has \"volume\" $value.pods ) }}\n            {{- $filePrefix := replace \"_\" \"-\"  $key }}\n            {{- $rootwrapFile := printf \"/etc/cinder/rootwrap.d/%s.filters\" $filePrefix }}\n            - name: cinder-etc\n              mountPath: {{ $rootwrapFile }}\n              subPath: {{ base $rootwrapFile }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            {{- if .Values.conf.enable_iscsi }}\n            - name: host-rootfs\n              mountPath: /mnt/host-rootfs\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: HostToContainer\n              {{- end }}\n            - name: host-dev\n              mountPath: /dev\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: HostToContainer\n              {{- end }}\n            - name: runcryptsetup\n              mountPath: /run/cryptsetup\n            - name: runlock\n              mountPath: /run/lock\n            - name: etciscsi\n              mountPath: /etc/iscsi\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: HostToContainer\n              {{- end }}\n            - name: usrlocalsbin\n              mountPath: /usr/local/sbin\n            - name: cinder-bin\n              mountPath: /usr/local/sbin/iscsiadm\n              subPath: iscsiadm\n            - name: cinder-bin\n              mountPath: /usr/local/sbin/multipath\n              subPath: multipath\n            - name: cinder-bin\n              mountPath: /usr/local/sbin/multipathd\n              subPath: multipathd\n            - name: etcmultipath\n              mountPath: /etc/multipath\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: Bidirectional\n              {{- end }}\n            - name: sys\n              mountPath: /sys\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: HostToContainer\n              {{- end }}\n            {{- end }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.volumev3.api.internal \"path\" \"/etc/cinder/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_cinder_volume.volumeMounts }}{{ toYaml $mounts_cinder_volume.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-var-cinder\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: cinder-bin\n          configMap:\n            name: cinder-bin\n            defaultMode: 0555\n        - name: cinder-etc\n          secret:\n            secretName: cinder-etc\n            defaultMode: 0444\n        - name: cinder-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: pod-shared\n          emptyDir: {}\n        - name: cinder-conversion\n          emptyDir:\n            {{- if .Values.conf.enable_conversion_tmpfs }}\n            medium: Memory\n            sizeLimit: {{ .Values.conf.conversion_tmpfs_size }}\n            {{- else }}\n            {}\n            {{- end }}\n        {{- if eq \"true\" (include \"cinder.utils.has_ceph_backend\" $envAll) }}\n        - name: etcceph\n          emptyDir: {}\n        - name: ceph-etc\n          configMap:\n            name: {{ .Values.ceph_client.configmap }}\n            defaultMode: 0444\n        {{- if and .Values.ceph_client.enable_external_ceph_backend .Values.ceph_client.external_ceph.configmap }}\n        - name: external-ceph-etc\n          configMap:\n            name: {{ .Values.ceph_client.external_ceph.configmap }}\n            defaultMode: 0444\n        {{- end }}\n        - name: ceph-keyring\n          secret:\n            secretName: {{ .Values.secrets.rbd.volume | quote }}\n        {{- if and .Values.ceph_client.enable_external_ceph_backend .Values.ceph_client.external_ceph.rbd_user }}\n        - name: external-ceph-keyring\n          secret:\n            secretName: {{ .Values.secrets.rbd.volume_external | quote }}\n        {{ end }}\n        {{ end }}\n        {{- if eq ( split \"://\" .Values.conf.cinder.coordination.backend_url )._0 \"file\" }}\n        # NOTE (portdirect): this will need to be set to a shared mount amongst all cinder\n        # pods for the coordination backend to be fully functional.\n        - name: cinder-coordination\n          emptyDir: {}\n        {{- end }}\n        {{- if .Values.conf.enable_iscsi }}\n        - name: host-rootfs\n          hostPath:\n            path: /\n        - name: host-dev\n          hostPath:\n            path: /dev\n        - name: runcryptsetup\n          hostPath:\n            path: /run/cryptsetup\n        - name: runlock\n          hostPath:\n            path: /run/lock\n        - name: etciscsi\n          hostPath:\n            path: /etc/iscsi\n        - name: usrlocalsbin\n          emptyDir: {}\n        - name: etcmultipath\n          hostPath:\n            path: /etc/multipath\n        - name: sys\n          hostPath:\n            path: /sys\n        {{- end }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.volumev3.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_cinder_volume.volumes }}{{ toYaml $mounts_cinder_volume.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "cinder/templates/ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_api .Values.network.api.ingress.public }}\n{{- $envAll := . -}}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendServiceType\" \"volumev3\" \"backendPort\" \"c-api\" -}}\n{{- $secretName := $envAll.Values.secrets.tls.volumev3.api.internal -}}\n{{- if and .Values.manifests.certificates $secretName -}}\n{{- $_ := set $ingressOpts \"certIssuer\" .Values.endpoints.volume.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/job-backup-storage-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_backup_storage_init }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"cinder-backup-storage-init\" }}\n{{ tuple $envAll \"backup_storage_init\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - create\n      - update\n      - patch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: cinder-backup-storage-init\n  labels:\n{{ tuple $envAll \"cinder\" \"backup-storage-init\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": post-install,post-upgrade\n    \"helm.sh/hook-delete-policy\": before-hook-creation\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"cinder\" \"storage-init\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" \"cinder-backup-storage-init\" \"containerNames\" (list \"cinder-backup-storage-init\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"storage_init\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      restartPolicy: OnFailure\n{{ if $envAll.Values.pod.tolerations.cinder.enabled }}\n{{ tuple $envAll \"cinder\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"backup_storage_init\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        {{- if (contains \"cinder.backup.drivers.ceph\" .Values.conf.cinder.DEFAULT.backup_driver) }}\n        - name: ceph-keyring-placement\n{{ tuple $envAll \"cinder_backup_storage_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"storage_init\" \"container\" \"ceph_keyring_placement\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/ceph-admin-keyring.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcceph\n              mountPath: /etc/ceph\n            {{- if not .Values.backup.external_ceph_rbd.enabled }}\n            - name: cinder-bin\n              mountPath: /tmp/ceph-admin-keyring.sh\n              subPath: ceph-admin-keyring.sh\n              readOnly: true\n            {{- else }}\n            - name: cinder-bin\n              mountPath: /tmp/ceph-admin-keyring.sh\n              subPath: external-ceph-rbd-admin-keyring.sh\n              readOnly: true\n            {{ end }}\n            {{- if empty .Values.conf.ceph.admin_keyring }}\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n            {{ end }}\n        {{ end }}\n      containers:\n        - name: cinder-backup-storage-init\n{{ tuple $envAll \"cinder_backup_storage_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.backup_storage_init | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"storage_init\" \"container\" \"cinder_backup_storage_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: STORAGE_BACKEND\n              value: {{ .Values.conf.cinder.DEFAULT.backup_driver | quote }}\n            {{- if (contains \"cinder.backup.drivers.ceph\" .Values.conf.cinder.DEFAULT.backup_driver) }}\n            - name: RBD_POOL_NAME\n              value: {{ .Values.conf.cinder.DEFAULT.backup_ceph_pool | quote }}\n            - name: RBD_POOL_APP_NAME\n              value: {{ .Values.conf.ceph.pools.backup.app_name | quote }}\n            - name: RBD_POOL_USER\n              value: {{ .Values.conf.cinder.DEFAULT.backup_ceph_user | quote }}\n            - name: RBD_POOL_CRUSH_RULE\n              value: {{ .Values.conf.ceph.pools.backup.crush_rule | quote }}\n            - name: RBD_POOL_REPLICATION\n              value: {{ .Values.conf.ceph.pools.backup.replication | quote }}\n            - name: RBD_POOL_CHUNK_SIZE\n              value: {{ .Values.conf.ceph.pools.backup.chunk_size | quote }}\n            - name: RBD_POOL_SECRET\n              value: {{ .Values.secrets.rbd.backup | quote }}\n            {{ end }}\n          command:\n            - /tmp/backup-storage-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: cinder-bin\n              mountPath: /tmp/backup-storage-init.sh\n              subPath: backup-storage-init.sh\n              readOnly: true\n            {{- if (contains \"cinder.backup.drivers.ceph\" .Values.conf.cinder.DEFAULT.backup_driver) }}\n            - name: etcceph\n              mountPath: /etc/ceph\n            {{- if not .Values.backup.external_ceph_rbd.enabled }}\n            - name: ceph-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            {{- else }}\n            - name: cinder-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: external-backup-ceph.conf\n              readOnly: true\n            {{- end }}\n            {{- if empty .Values.conf.ceph.admin_keyring }}\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n            {{- end }}\n            {{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: cinder-bin\n          configMap:\n            name: cinder-bin\n            defaultMode: 0555\n        {{- if (contains \"cinder.backup.drivers.ceph\" .Values.conf.cinder.DEFAULT.backup_driver) }}\n        - name: etcceph\n          emptyDir: {}\n        - name: ceph-etc\n          configMap:\n            name: {{ .Values.ceph_client.configmap }}\n            defaultMode: 0444\n        {{- if .Values.backup.external_ceph_rbd.enabled }}\n        - name: cinder-etc\n          secret:\n            secretName: cinder-etc\n            defaultMode: 0444\n        {{- end }}\n        {{- if empty .Values.conf.ceph.admin_keyring }}\n        - name: ceph-keyring\n          secret:\n            secretName: {{ .Values.ceph_client.user_secret_name }}\n        {{ end }}\n        {{ end }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.bootstrap\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"5\"\n{{- end }}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $bootstrapJob := dict \"envAll\" . \"serviceName\" \"cinder\" \"keystoneUser\" .Values.bootstrap.ks_user \"logConfigFile\" .Values.conf.cinder.DEFAULT.log_config_append \"jobAnnotations\" (include \"metadata.annotations.job.bootstrap\" . | fromYaml) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $bootstrapJob \"tlsSecret\" .Values.secrets.tls.volumev3.api.internal -}}\n{{- end -}}\n{{- if .Values.pod.tolerations.cinder.enabled -}}\n{{- $_ := set $bootstrapJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $bootstrapJob | include \"helm-toolkit.manifests.job_bootstrap\" }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/job-clean.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_clean }}\n{{- $envAll := . }}\n{{ if or (contains \"cinder.backup.drivers.ceph\" .Values.conf.cinder.DEFAULT.backup_driver) (eq \"true\" (include \"cinder.utils.has_ceph_backend\" $envAll)) }}\n\n{{- $serviceAccountName := print \"cinder-clean\" }}\n{{ tuple $envAll \"clean\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - delete\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ print \"cinder-clean\" }}\n  labels:\n{{ tuple $envAll \"cinder\" \"clean\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": pre-delete\n    \"helm.sh/hook-delete-policy\": hook-succeeded\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n    {{- tuple \"cinder-clean\" $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | nindent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"cinder\" \"clean\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"clean\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      restartPolicy: OnFailure\n{{ if $envAll.Values.pod.tolerations.cinder.enabled }}\n{{ tuple $envAll \"cinder\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"clean\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        {{- if eq \"true\" (include \"cinder.utils.has_ceph_backend\" $envAll) }}\n        - name: cinder-volume-rbd-secret-clean\n{{ tuple $envAll \"cinder_storage_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.clean | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"clean\" \"container\" \"cinder_volume_rbd_secret_clean\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: RBD_POOL_SECRET\n              value: {{ .Values.secrets.rbd.volume | quote }}\n          command:\n            - /tmp/clean-secrets.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: cinder-bin\n              mountPath: /tmp/clean-secrets.sh\n              subPath: clean-secrets.sh\n              readOnly: true\n        {{ end }}\n        {{- if (contains \"cinder.backup.drivers.ceph\" .Values.conf.cinder.DEFAULT.backup_driver) }}\n        - name: cinder-volume-backup-secret-clean\n{{ tuple $envAll \"cinder_backup_storage_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.clean | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: RBD_POOL_SECRET\n              value: {{ .Values.secrets.rbd.backup | quote }}\n          command:\n            - /tmp/clean-secrets.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: cinder-bin\n              mountPath: /tmp/clean-secrets.sh\n              subPath: clean-secrets.sh\n              readOnly: true\n        {{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: cinder-bin\n          configMap:\n            name: cinder-bin\n            defaultMode: 0555\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/job-create-internal-tenant.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_create_internal_tenant }}\n{{- $envAll := . }}\n\n{{- $serviceName := \"cinder\" }}\n{{- $nodeSelector := index . \"nodeSelector\" | default ( dict $envAll.Values.labels.job.node_selector_key $envAll.Values.labels.job.node_selector_value ) -}}\n{{- $configMapBin := index . \"configMapBin\" | default (printf \"%s-%s\" $serviceName \"bin\" ) -}}\n{{- $serviceUser := index . \"serviceUser\" | default $serviceName -}}\n{{- $serviceUserPretty := $serviceUser | replace \"_\" \"-\" -}}\n\n{{- $serviceAccountName := printf \"%s-%s\" $serviceUserPretty \"create-internal-tenant\" }}\n{{ tuple $envAll \"create_internal_tenant\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceUserPretty \"create-internal-tenant\" | quote }}\n  annotations:\n    \"helm.sh/hook\": post-install,post-upgrade\n    \"helm.sh/hook-delete-policy\": before-hook-creation\n    {{- tuple \"cinder-create-internal-tenant\" $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | nindent 4 }}\n  labels:\n{{ tuple $envAll \"cinder\" \"create-internal-tenant\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll $serviceName \"create-internal-tenant\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" $serviceAccountName \"containerNames\" (list \"create-internal-tenant\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"create_internal_tenant\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName | quote }}\n      restartPolicy: OnFailure\n{{ if $envAll.Values.pod.tolerations.cinder.enabled }}\n{{ tuple $envAll \"cinder\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n{{ toYaml $nodeSelector | indent 8 }}\n      initContainers:\n{{ tuple $envAll \"create_internal_tenant\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: create-internal-tenant\n          image: {{ $envAll.Values.images.tags.ks_user }}\n          imagePullPolicy: {{ $envAll.Values.images.pull_policy }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.ks_user | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"create_internal_tenant\" \"container\" \"create_internal_tenant\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/create-internal-tenant.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: create-internal-tenant-sh\n              mountPath: /tmp/create-internal-tenant.sh\n              subPath: create-internal-tenant.sh\n              readOnly: true\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.volumev3.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\"  | indent 12 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" $envAll.Values.secrets.identity.admin \"useCA\" (or .Values.manifests.certificates .Values.tls.identity) }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n            - name: SERVICE_OS_SERVICE_NAME\n              value: {{ $serviceName | quote }}\n            - name: INTERNAL_PROJECT_NAME\n              value: {{ .Values.conf.cinder.DEFAULT.internal_project_name | quote }}\n            - name: INTERNAL_USER_NAME\n              value: {{ .Values.conf.cinder.DEFAULT.internal_user_name | quote }}\n\n{{- with $env := dict \"ksUserSecret\" (index $envAll.Values.secrets.identity $serviceUser ) }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 12 }}\n{{- end }}\n            - name: SERVICE_OS_ROLES\n            {{- $serviceOsRoles := index $envAll.Values.endpoints.identity.auth $serviceUser \"role\" }}\n            {{- if kindIs \"slice\" $serviceOsRoles }}\n              value: {{ include \"helm-toolkit.utils.joinListWithComma\" $serviceOsRoles | quote }}\n            {{- else }}\n              value: {{ $serviceOsRoles | quote }}\n            {{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: create-internal-tenant-sh\n          configMap:\n            name: {{ $configMapBin | quote }}\n            defaultMode: 0555\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.volumev3.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end -}}\n"
  },
  {
    "path": "cinder/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" \"cinder\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbDropJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- if .Values.pod.tolerations.cinder.enabled -}}\n{{- $_ := set $dbDropJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"cinder\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbInitJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbInitJob \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.cinder.enabled -}}\n{{- $_ := set $dbInitJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"cinder\" \"podVolMounts\" .Values.pod.mounts.cinder_db_sync.cinder_db_sync.volumeMounts \"podVols\" .Values.pod.mounts.cinder_db_sync.cinder_db_sync.volumes -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbSyncJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.cinder.enabled -}}\n{{- $_ := set $dbSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.repo_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\n{{- end }}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"cinder\" -}}\n{{- $_ := $imageRepoSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.repo_sync\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.cinder.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_endpoints\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-2\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"cinder\" \"serviceTypes\" ( tuple \"volumev3\" ) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.volumev3.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_endpoints\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.cinder.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_service\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $serviceTypes := list }}\n{{- range $_, $v := keys .Values.endpoints | sortAlpha }}\n{{- if $v | hasPrefix \"volume\" }}\n{{- $serviceTypes = append $serviceTypes $v }}\n{{- end }}\n{{- end }}\n{{- $volTypes := list \"volumev3\" -}}\n{{- if .Values.conf.cinder.DEFAULT.enable_v1_api }}\n{{- $volTypes = append $volTypes \"volume\" }}\n{{- end }}\n{{- if .Values.conf.cinder.DEFAULT.enable_v2_api }}\n{{- $volTypes = append $volTypes \"volumev2\" }}\n{{- end }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"cinder\" \"serviceTypes\" ( $volTypes ) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.volumev3.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_service\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.cinder.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $serviceUsers := (tuple \"cinder\" \"nova\") -}}\n{{- if (contains \"cinder.backup.drivers.swift\" .Values.conf.cinder.DEFAULT.backup_driver) }}\n{{- $serviceUsers = append $serviceUsers \"swift\" -}}\n{{- end }}\n{{- if .Values.conf.cinder.service_user.send_service_user_token -}}\n{{- $serviceUsers = append $serviceUsers \"service\" -}}\n{{- end }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"cinder\" \"serviceUsers\" $serviceUsers -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksUserJob \"tlsSecret\" .Values.secrets.tls.volumev3.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksUserJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.cinder.enabled -}}\n{{- $_ := set $ksUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/job-rabbit-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.rabbit_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rmqUserJob := dict \"envAll\" . \"serviceName\" \"cinder\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $rmqUserJob \"tlsSecret\" .Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $rmqUserJob \"jobAnnotations\" (include \"metadata.annotations.job.rabbit_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.cinder.enabled -}}\n{{- $_ := set $rmqUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $rmqUserJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/job-storage-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_storage_init }}\n{{- $envAll := . }}\n\n{{- $internal_ceph_backend := .Values.ceph_client.internal_ceph_backend }}\n\n{{- $serviceAccountName := \"cinder-storage-init\" }}\n{{ tuple $envAll \"storage_init\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - create\n      - update\n      - patch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: cinder-storage-init\n  labels:\n{{ tuple $envAll \"cinder\" \"storage-init\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": post-install,post-upgrade\n    \"helm.sh/hook-delete-policy\": before-hook-creation\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"cinder\" \"storage-init\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"cinder\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      restartPolicy: OnFailure\n{{ if $envAll.Values.pod.tolerations.cinder.enabled }}\n{{ tuple $envAll \"cinder\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"storage_init\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        {{- if eq \"true\" (include \"cinder.utils.has_ceph_backend\" $envAll) }}\n        - name: ceph-keyring-placement\n{{ tuple $envAll \"cinder_storage_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n          securityContext:\n            runAsUser: 0\n          command:\n            - /tmp/ceph-admin-keyring.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: cinder-bin\n              mountPath: /tmp/ceph-admin-keyring.sh\n              subPath: ceph-admin-keyring.sh\n              readOnly: true\n            {{- if empty .Values.conf.ceph.admin_keyring }}\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n            {{ end }}\n        {{ end }}\n      containers:\n        {{- range $name, $backend := .Values.conf.backends }}\n          {{- if (eq \"true\" ( dict \"backend\" $backend | include \"cinder.utils.is_ceph_backend\" )) }}\n            {{- if eq $internal_ceph_backend $name }}\n        - name: cinder-storage-init-{{ $name | lower }}\n{{ tuple $envAll \"cinder_storage_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.storage_init | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: STORAGE_BACKEND\n              value: {{ $backend.volume_driver | quote }}\n            - name: RBD_POOL_NAME\n              value: {{ $backend.rbd_pool | quote }}\n            - name: RBD_POOL_APP_NAME\n              value: {{ (index $envAll.Values.conf.ceph.pools $backend.rbd_pool).app_name | quote }}\n            - name: RBD_POOL_USER\n              value: {{ $backend.rbd_user | quote }}\n            - name: RBD_POOL_CRUSH_RULE\n              value: {{ (index $envAll.Values.conf.ceph.pools $backend.rbd_pool).crush_rule | quote }}\n            - name: RBD_POOL_REPLICATION\n              value: {{ (index $envAll.Values.conf.ceph.pools $backend.rbd_pool).replication | quote }}\n            - name: RBD_POOL_CHUNK_SIZE\n              value: {{ (index $envAll.Values.conf.ceph.pools $backend.rbd_pool).chunk_size | quote }}\n            - name: RBD_POOL_SECRET\n              value: {{ $envAll.Values.secrets.rbd.volume | quote }}\n          command:\n            - /tmp/storage-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: cinder-bin\n              mountPath: /tmp/storage-init.sh\n              subPath: storage-init.sh\n              readOnly: true\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: ceph-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            {{- if empty $envAll.Values.conf.ceph.admin_keyring }}\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n            {{- end }}\n            {{- end }}\n        {{- end }}\n      {{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: cinder-bin\n          configMap:\n            name: cinder-bin\n            defaultMode: 0555\n        {{- if eq \"true\" (include \"cinder.utils.has_ceph_backend\" $envAll) }}\n        - name: etcceph\n          emptyDir: {}\n        - name: ceph-etc\n          configMap:\n            name: {{ .Values.ceph_client.configmap }}\n            defaultMode: 0444\n        {{- if empty .Values.conf.ceph.admin_keyring }}\n        - name: ceph-keyring\n          secret:\n            secretName: {{ .Values.ceph_client.user_secret_name }}\n        {{- end }}\n        {{- end }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/network_policy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"cinder\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "cinder/templates/pdb-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_api }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: cinder-api\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.api.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"cinder\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/pod-rally-test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pod_rally_test }}\n{{- $envAll := . }}\n\n{{- $mounts_tests := .Values.pod.mounts.cinder_tests.cinder_tests }}\n{{- $mounts_tests_init := .Values.pod.mounts.cinder_tests.init_container }}\n\n{{- $serviceAccountName := print $envAll.Release.Name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: {{ print $envAll.Release.Name \"-test\" }}\n  labels:\n{{ tuple $envAll \"cinder\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"cinder-test\" \"containerNames\" (list \"init\" \"cinder-test\" \"cinder-test-ks-user\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n  restartPolicy: Never\n{{ if $envAll.Values.pod.tolerations.cinder.enabled }}\n{{ tuple $envAll \"cinder\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 2 }}\n{{ end }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n{{ tuple \"cinder_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 2 }}\n{{ tuple \"cinder_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 2 }}\n  serviceAccountName: {{ $serviceAccountName }}\n  initContainers:\n{{ tuple $envAll \"tests\" $mounts_tests_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n    - name: cinder-test-ks-user\n{{ tuple $envAll \"ks_user\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.ks_user | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n      command:\n        - /tmp/ks-user.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: cinder-bin\n          mountPath: /tmp/ks-user.sh\n          subPath: ks-user.sh\n          readOnly: true\n{{ dict \"enabled\" .Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.volumev3.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\"  | indent 8 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_SERVICE_NAME\n          value: \"test\"\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.test }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_ROLE\n          value: {{ .Values.endpoints.identity.auth.test.role | quote }}\n  containers:\n    - name: cinder-test\n{{ tuple $envAll \"test\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.test }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: RALLY_ENV_NAME\n          value: {{.Release.Name}}\n      command:\n        - /tmp/rally-test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: cinder-etc\n          mountPath: /etc/rally/rally_tests.yaml\n          subPath: rally_tests.yaml\n          readOnly: true\n        - name: cinder-bin\n          mountPath: /tmp/rally-test.sh\n          subPath: rally-test.sh\n          readOnly: true\n        - name: rally-db\n          mountPath: /var/lib/rally\n{{ dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.volumev3.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\"  | indent 8 }}\n{{ if $mounts_tests.volumeMounts }}{{ toYaml $mounts_tests.volumeMounts | indent 8 }}{{ end }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: cinder-etc\n      secret:\n        secretName: cinder-etc\n        defaultMode: 0444\n    - name: cinder-bin\n      configMap:\n        name: cinder-bin\n        defaultMode: 0555\n    - name: rally-db\n      emptyDir: {}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.volumev3.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 4 }}\n{{ if $mounts_tests.volumes }}{{ toYaml $mounts_tests.volumes | indent 4 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/pvc-backup.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pvc_backup }}\n{{- $envAll := . }}\n{{- if (contains \"cinder.backup.drivers.posix\" .Values.conf.cinder.DEFAULT.backup_driver) }}\n---\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: cinder-backup\nspec:\n  accessModes: [ \"ReadWriteOnce\" ]\n  resources:\n    requests:\n      storage: {{ .Values.backup.posix.volume.size }}\n  storageClassName: {{ .Values.backup.posix.volume.class_name }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"cinder\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n{{- $connection := tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- if $envAll.Values.manifests.certificates }}\n  DB_CONNECTION: {{ (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | b64enc -}}\n{{- else }}\n  DB_CONNECTION: {{  $connection | b64enc -}}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/secret-external-ceph-keyring.yaml",
    "content": "{{/*\nThis manifest results a secret being created which has the keyring information\nneeded for cinder rbd user of external managed ceph backend\n*/}}\n\n{{- if and .Values.ceph_client.enable_external_ceph_backend .Values.ceph_client.external_ceph.rbd_user }}\n\n{{- $envAll := . }}\n{{- $userClass := \"volume_external\" }}\n{{- $secretName := index $envAll.Values.secrets.rbd $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"rbd\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  key: {{ $envAll.Values.ceph_client.external_ceph.rbd_user_keyring | b64enc }}\n...\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{- include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"volumev3\" ) }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $userClass, $val := $envAll.Values.endpoints.identity.auth }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/secret-ks-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $envAll := . -}}\n{{/* the endpoints.identity.auth sections with the oslo conf sections they get rendered to */}}\n{{- $ksUsers := dict\n  \"cinder\" \"keystone_authtoken\"\n  \"nova\" \"nova\"\n-}}\n{{- if .Values.conf.cinder.service_user.send_service_user_token }}\n{{- $_ := set $ksUsers \"service\" \"service_user\" -}}\n{{- end }}\n{{ dict\n  \"envAll\" $envAll\n  \"serviceName\" \"cinder\"\n  \"serviceUserSections\" $ksUsers\n  | include \"helm-toolkit.manifests.secret_ks_etc\"\n}}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- $rabbitmqProtocol := \"http\" }}\n{{- if $envAll.Values.manifests.certificates }}\n{{- $rabbitmqProtocol = \"https\" }}\n{{- end }}\n{{- range $key1, $userClass := tuple \"admin\" \"cinder\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_messaging\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass $rabbitmqProtocol $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/service-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_api }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"volumev3\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: c-api\n      port: {{ tuple \"volumev3\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.api.node_port.enabled }}\n      nodePort: {{ .Values.network.api.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"cinder\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.api.node_port.enabled }}\n  type: NodePort\n  {{ if .Values.network.api.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{ end }}\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/service-ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_api .Values.network.api.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"volumev3\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "cinder/templates/utils/_ceph_backend_list.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- /*\n    Return string with all ceph backends separated by comma. The list\n    is either empty or it starts with a comma. Assuming \"a\", \"b\" and\n    \"c\" are ceph backends then ceph_backend_list returns \",a,b,c\".\n    This means the first element in the returned list representation\n    can always be skipped.\n\n    Usage:\n        range $name := rest (splitList include \"cinder.utils.ceph_backend_list\" $)\n*/ -}}\n{{- define \"cinder.utils.ceph_backend_list\" -}}\n  {{- range $name, $backend := .Values.conf.backends -}}\n    {{- if kindIs \"map\" $backend }}\n      {{- if (eq $backend.volume_driver \"cinder.volume.drivers.rbd.RBDDriver\") -}}\n        {{- \",\" -}}\n        {{- $name -}}\n      {{- end -}}\n    {{- end -}}\n  {{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "cinder/templates/utils/_has_ceph_backend.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"cinder.utils.has_ceph_backend\" -}}\n  {{- $has_ceph := false -}}\n  {{- range $_, $backend := .Values.conf.backends -}}\n    {{- if kindIs \"map\" $backend -}}\n      {{- $has_ceph = or $has_ceph (eq $backend.volume_driver \"cinder.volume.drivers.rbd.RBDDriver\") -}}\n    {{- end -}}\n  {{- end -}}\n  {{- $has_ceph -}}\n{{- end -}}\n"
  },
  {
    "path": "cinder/templates/utils/_is_ceph_backend.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"cinder.utils.is_ceph_backend\" -}}\n{{- $backend := index . \"backend\" -}}\n{{- printf \"%v\" (and ( kindIs \"map\" $backend ) ( eq $backend.volume_driver \"cinder.volume.drivers.rbd.RBDDriver\" )) -}}\n{{- end -}}\n"
  },
  {
    "path": "cinder/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for cinder.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nstorage: ceph\n\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  backup:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  scheduler:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  volume:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nrelease_group: null\n\nimages:\n  tags:\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    cinder_db_sync: quay.io/airshipit/cinder:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    cinder_api: quay.io/airshipit/cinder:2025.1-ubuntu_noble\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    cinder_scheduler: quay.io/airshipit/cinder:2025.1-ubuntu_noble\n    cinder_volume: quay.io/airshipit/cinder:2025.1-ubuntu_noble\n    cinder_volume_usage_audit: quay.io/airshipit/cinder:2025.1-ubuntu_noble\n    cinder_db_purge: quay.io/airshipit/cinder:2025.1-ubuntu_noble\n    cinder_storage_init: quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\n    cinder_backup: quay.io/airshipit/cinder:2025.1-ubuntu_noble\n    cinder_backup_storage_init: quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: docker.io/docker:17.07.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\njobs:\n  volume_usage_audit:\n    cron: \"5 * * * *\"\n    starting_deadline: 600\n    history:\n      success: 3\n      failed: 1\n  db_purge:\n    cron: \"0 0 * * *\"\n    starting_deadline: 600\n    history:\n      success: 3\n      failed: 1\n\npod:\n  probes:\n    api:\n      cinder-api:\n        liveness:\n          enabled: True\n          params:\n            initialDelaySeconds: 5\n            periodSeconds: 10\n            timeoutSeconds: 1\n            failureThreshold: 3\n            successThreshold: 1\n        readiness:\n          enabled: True\n          params:\n            initialDelaySeconds: 30\n  security_context:\n    db_purge:\n      pod:\n        runAsUser: 42424\n      container:\n        cinder_db_purge:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    volume_usage_audit:\n      pod:\n        runAsUser: 42424\n      container:\n        cinder_volume_usage_audit:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    cinder_api:\n      pod:\n        runAsUser: 42424\n      container:\n        ceph_coordination_volume_perms:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        cinder_api:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    cinder_backup:\n      pod:\n        runAsUser: 42424\n      container:\n        ceph_backup_keyring_placement:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        ceph_keyring_placement:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        ceph_backup_volume_perms:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        ceph_coordination_volume_perms:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        cinder_backup:\n          capabilities:\n            add:\n              - SYS_ADMIN\n          readOnlyRootFilesystem: true\n          runAsUser: 0\n    cinder_scheduler:\n      pod:\n        runAsUser: 42424\n      container:\n        ceph_coordination_volume_perms:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        cinder_scheduler:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    cinder_volume:\n      pod:\n        runAsUser: 42424\n      container:\n        ceph_keyring_placement:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        ceph_coordination_volume_perms:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        init_cinder_conf:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        cinder_volume:\n          privileged: true\n          readOnlyRootFilesystem: true\n    storage_init:\n      pod:\n        runAsUser: 42424\n      container:\n        ceph_keyring_placement:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        cinder_backup_storage_init:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    clean:\n      pod:\n        runAsUser: 42424\n      container:\n        cinder_volume_rbd_secret_clean:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    create_internal_tenant:\n      pod:\n        runAsUser: 42424\n      container:\n        create_internal_tenant:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    cinder:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n        effect: NoSchedule\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n        effect: NoSchedule\n  useHostNetwork:\n    volume: false\n    backup: false\n  mounts:\n    cinder_api:\n      init_container: null\n      cinder_api:\n        volumeMounts:\n        volumes:\n    cinder_scheduler:\n      init_container: null\n      cinder_scheduler:\n        volumeMounts:\n        volumes:\n    cinder_volume:\n      init_container: null\n      cinder_volume:\n        volumeMounts:\n        volumes:\n    cinder_volume_usage_audit:\n      init_container: null\n      cinder_volume_usage_audit:\n        volumeMounts:\n        volumes:\n    cinder_db_purge:\n      init_container: null\n      cinder_db_purge:\n        volumeMounts:\n        volumes:\n    cinder_backup:\n      init_container: null\n      cinder_backup:\n        volumeMounts:\n        volumes:\n    cinder_tests:\n      init_container: null\n      cinder_tests:\n        volumeMounts:\n        volumes:\n    cinder_db_sync:\n      cinder_db_sync:\n        volumeMounts:\n        volumes:\n  # -- This allows users to add Kubernetes Projected Volumes to be mounted at /etc/cinder/cinder.conf.d/\n  ## This is a list of projected volume source objects for each deployment/statefulset/daemonset/cronjob\n  ## https://kubernetes.io/docs/concepts/storage/projected-volumes/\n  etcSources:\n    cinder_api: []\n    cinder_scheduler: []\n    cinder_volume: []\n    cinder_volume_usage_audit: []\n    cinder_db_purge: []\n    cinder_backup: []\n    cinder_db_sync: []\n  replicas:\n    api: 1\n    volume: 1\n    scheduler: 1\n    backup: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    disruption_budget:\n      api:\n        min_available: 0\n    termination_grace_period:\n      api:\n        timeout: 30\n  resources:\n    enabled: false\n    api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    scheduler:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    volume:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      volume_usage_audit:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rabbit_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_purge:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      clean:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      backup_storage_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      storage_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nbootstrap:\n  enabled: true\n  ks_user: admin\n  bootstrap_conf_backends: true\n  volume_types:\n    name:\n      group:\n      volume_backend_name:\n      # access_type: \"private\"\n      # If you set up access_type to private, only the creator\n      # will get an access to the volume type. You can extend\n      # the access to your volume type by providing a list of\n      # domain names and projects as shown below\n      # grant_access:\n      #   <domain name 1>:\n      #     - <project name 1>\n      #     - <project name 2>\n      #     <...>\n      #   <domain name 2>:\n      #     - <project name 1>\n      #     <...>\n  # Volume QoS if any. By default, None QoS is created.\n  # Below values with a number at the end need to be replaced\n  # with real names.\n  # volume_qos:\n  #   qos_name_1:\n  #     consumer: front-end\n  #     properties:\n  #       key_1: value_1\n  #       key_2: value_2\n  #     associates:\n  #       - volume_type_1\n  #       - volume_type_2\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30877\n\nceph_client:\n  # enable this when there is a need to create second ceph backed pointing\n  # to external ceph cluster\n  enable_external_ceph_backend: false\n  # change this in case of first ceph backend name pointing to internal ceph cluster\n  # is diffrent\n  internal_ceph_backend: rbd1\n  configmap: ceph-etc\n  user_secret_name: pvc-ceph-client-key\n  external_ceph:\n    # Only when enable_external_ceph_backend is true and rbd_user is NOT null\n    # secret for external ceph keyring will be created.\n    rbd_user: null\n    rbd_user_keyring: null\n    configmap: null\n    conf:\n      global: null\n      osd: null\nconf:\n  paste:\n    composite:osapi_volume:\n      use: call:cinder.api:root_app_factory\n      /: apiversions\n      /healthcheck: healthcheck\n      /v3: openstack_volume_api_v3\n    composite:openstack_volume_api_v3:\n      use: call:cinder.api.middleware.auth:pipeline_factory\n      noauth: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv3\n      keystone: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv3\n      keystone_nolimit: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv3\n    filter:request_id:\n      paste.filter_factory: oslo_middleware.request_id:RequestId.factory\n    filter:http_proxy_to_wsgi:\n      paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory\n    filter:cors:\n      paste.filter_factory: oslo_middleware.cors:filter_factory\n      oslo_config_project: cinder\n    filter:faultwrap:\n      paste.filter_factory: cinder.api.middleware.fault:FaultWrapper.factory\n    filter:osprofiler:\n      paste.filter_factory: osprofiler.web:WsgiMiddleware.factory\n    filter:noauth:\n      paste.filter_factory: cinder.api.middleware.auth:NoAuthMiddleware.factory\n    filter:sizelimit:\n      paste.filter_factory: oslo_middleware.sizelimit:RequestBodySizeLimiter.factory\n    app:apiv3:\n      paste.app_factory: cinder.api.v3.router:APIRouter.factory\n    app:healthcheck:\n      paste.app_factory: oslo_middleware:Healthcheck.app_factory\n      backends: disable_by_file\n      disable_by_file_path: /etc/cinder/healthcheck_disable\n    pipeline:apiversions:\n      pipeline: cors http_proxy_to_wsgi faultwrap osvolumeversionapp\n    app:osvolumeversionapp:\n      paste.app_factory: cinder.api.versions:Versions.factory\n    filter:keystonecontext:\n      paste.filter_factory: cinder.api.middleware.auth:CinderKeystoneContext.factory\n    filter:authtoken:\n      paste.filter_factory: keystonemiddleware.auth_token:filter_factory\n    filter:audit:\n      paste.filter_factory: keystonemiddleware.audit:filter_factory\n      audit_map_file: /etc/cinder/api_audit_map.conf\n  policy: {}\n  api_audit_map:\n    DEFAULT:\n      target_endpoint_type: None\n    custom_actions:\n      associate: update/associate\n      disassociate: update/disassociate_all\n      disassociate_all: update/disassociate_all\n      associations: read/list/associations\n    path_keywords:\n      defaults: None\n      detail: None\n      limits: None\n      os-quota-specs: project\n      qos-specs: qos-spec\n      snapshots: snapshot\n      types: type\n      volumes: volume\n    service_endpoints:\n      volumev3: service/storage/block\n  cinder_sudoers: |\n    # This sudoers file supports rootwrap for both Kolla and LOCI Images.\n    Defaults !requiretty\n    Defaults secure_path=\"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin\"\n    cinder ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/cinder-rootwrap /etc/cinder/rootwrap.conf *, /var/lib/openstack/bin/cinder-rootwrap /etc/cinder/rootwrap.conf *\n  rootwrap: |\n    # Configuration for cinder-rootwrap\n    # This file should be owned by (and only-writeable by) the root user\n\n    [DEFAULT]\n    # List of directories to load filter definitions from (separated by ',').\n    # These directories MUST all be only writeable by root !\n    filters_path=/etc/cinder/rootwrap.d\n\n    # List of directories to search executables in, in case filters do not\n    # explicitely specify a full path (separated by ',')\n    # If not specified, defaults to system PATH environment variable.\n    # These directories MUST all be only writeable by root !\n    exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin\n\n    # Enable logging to syslog\n    # Default value is False\n    use_syslog=False\n\n    # Which syslog facility to use.\n    # Valid values include auth, authpriv, syslog, local0, local1...\n    # Default value is 'syslog'\n    syslog_log_facility=syslog\n\n    # Which messages to log.\n    # INFO means log all usage\n    # ERROR means only log unsuccessful attempts\n    syslog_log_level=ERROR\n  rootwrap_filters:\n    volume:\n      pods:\n        - volume\n      content: |\n        # cinder-rootwrap command filters for volume nodes\n        # This file should be owned by (and only-writeable by) the root user\n\n        [Filters]\n        # cinder/volume/iscsi.py: iscsi_helper '--op' ...\n        ietadm: CommandFilter, ietadm, root\n        tgtadm: CommandFilter, tgtadm, root\n        iscsictl: CommandFilter, iscsictl, root\n        tgt-admin: CommandFilter, tgt-admin, root\n        cinder-rtstool: CommandFilter, cinder-rtstool, root\n        scstadmin: CommandFilter, scstadmin, root\n\n        # LVM related show commands\n        pvs: EnvFilter, env, root, LC_ALL=C, pvs\n        vgs: EnvFilter, env, root, LC_ALL=C, vgs\n        lvs: EnvFilter, env, root, LC_ALL=C, lvs\n        lvdisplay: EnvFilter, env, root, LC_ALL=C, lvdisplay\n\n        # -LVM related show commands with suppress fd warnings\n        pvs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, pvs\n        vgs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, vgs\n        lvs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvs\n        lvdisplay_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvdisplay\n\n\n        # -LVM related show commands conf var\n        pvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, pvs\n        vgs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, vgs\n        lvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvs\n        lvdisplay_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvdisplay\n\n        # -LVM conf var with suppress fd_warnings\n        pvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, pvs\n        vgs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, vgs\n        lvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvs\n        lvdisplay_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvdisplay\n\n        # os-brick library commands\n        # os_brick.privileged.run_as_root oslo.privsep context\n        # This line ties the superuser privs with the config files, context name,\n        # and (implicitly) the actual python code invoked.\n        privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\\.\\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.*\n        # The following and any cinder/brick/* entries should all be obsoleted\n        # by privsep, and may be removed once the os-brick version requirement\n        # is updated appropriately.\n        scsi_id: CommandFilter, /lib/udev/scsi_id, root\n        drbdadm: CommandFilter, drbdadm, root\n\n        # cinder/brick/local_dev/lvm.py: 'vgcreate', vg_name, pv_list\n        vgcreate: CommandFilter, vgcreate, root\n\n        # cinder/brick/local_dev/lvm.py: 'lvcreate', '-L', sizestr, '-n', volume_name,..\n        # cinder/brick/local_dev/lvm.py: 'lvcreate', '-L', ...\n        lvcreate: EnvFilter, env, root, LC_ALL=C, lvcreate\n        lvcreate_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvcreate\n        lvcreate_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvcreate\n        lvcreate_lvmconf_fdwarn: EnvFilter, env, root, LVM_SYSTEM_DIR=, LVM_SUPPRESS_FD_WARNINGS=, LC_ALL=C, lvcreate\n\n        # cinder/volume/driver.py: 'dd', 'if=%s' % srcstr, 'of=%s' % deststr,...\n        dd: CommandFilter, dd, root\n\n        # cinder/volume/driver.py: 'lvremove', '-f', %s/%s % ...\n        lvremove: CommandFilter, lvremove, root\n\n        # cinder/volume/driver.py: 'lvrename', '%(vg)s', '%(orig)s' '(new)s'...\n        lvrename: CommandFilter, lvrename, root\n\n        # cinder/brick/local_dev/lvm.py: 'lvextend', '-L' '%(new_size)s', '%(lv_name)s' ...\n        # cinder/brick/local_dev/lvm.py: 'lvextend', '-L' '%(new_size)s', '%(thin_pool)s' ...\n        lvextend: EnvFilter, env, root, LC_ALL=C, lvextend\n        lvextend_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvextend\n        lvextend_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvextend\n        lvextend_lvmconf_fdwarn: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvextend\n\n        # cinder/brick/local_dev/lvm.py: 'lvchange -a y -K <lv>'\n        lvchange: CommandFilter, lvchange, root\n\n        # cinder/brick/local_dev/lvm.py: 'lvconvert', '--merge', snapshot_name\n        lvconvert: CommandFilter, lvconvert, root\n\n        # cinder/volume/driver.py: 'iscsiadm', '-m', 'discovery', '-t',...\n        # cinder/volume/driver.py: 'iscsiadm', '-m', 'node', '-T', ...\n        iscsiadm: CommandFilter, iscsiadm, root\n\n        # cinder/volume/utils.py: utils.temporary_chown(path, 0)\n        chown: CommandFilter, chown, root\n\n        # cinder/volume/utils.py: copy_volume(..., ionice='...')\n        ionice_1: ChainingRegExpFilter, ionice, root, ionice, -c[0-3], -n[0-7]\n        ionice_2: ChainingRegExpFilter, ionice, root, ionice, -c[0-3]\n\n        # cinder/volume/utils.py: setup_blkio_cgroup()\n        cgcreate: CommandFilter, cgcreate, root\n        cgset: CommandFilter, cgset, root\n        cgexec: ChainingRegExpFilter, cgexec, root, cgexec, -g, blkio:\\S+\n\n        # cinder/volume/driver.py\n        dmsetup: CommandFilter, dmsetup, root\n        ln: CommandFilter, ln, root\n\n        # cinder/image/image_utils.py\n        qemu-img: EnvFilter, env, root, LC_ALL=C, qemu-img\n        qemu-img_convert: CommandFilter, qemu-img, root\n\n        udevadm: CommandFilter, udevadm, root\n\n        # cinder/volume/driver.py: utils.read_file_as_root()\n        cat: CommandFilter, cat, root\n\n        # cinder/volume/nfs.py\n        stat: CommandFilter, stat, root\n        mount: CommandFilter, mount, root\n        df: CommandFilter, df, root\n        du: CommandFilter, du, root\n        truncate: CommandFilter, truncate, root\n        chmod: CommandFilter, chmod, root\n        rm: CommandFilter, rm, root\n\n        # cinder/volume/drivers/remotefs.py\n        mkdir: CommandFilter, mkdir, root\n\n        # cinder/volume/drivers/netapp/nfs.py:\n        netapp_nfs_find: RegExpFilter, find, root, find, ^[/]*([^/\\0]+(/+)?)*$, -maxdepth, \\d+, -name, img-cache.*, -amin, \\+\\d+\n\n        # cinder/volume/drivers/glusterfs.py\n        chgrp: CommandFilter, chgrp, root\n        umount: CommandFilter, umount, root\n        fallocate: CommandFilter, fallocate, root\n\n        # cinder/volumes/drivers/hds/hds.py:\n        hus-cmd: CommandFilter, hus-cmd, root\n        hus-cmd_local: CommandFilter, /usr/local/bin/hus-cmd, root\n\n        # cinder/volumes/drivers/hds/hnas_backend.py\n        ssc: CommandFilter, ssc, root\n\n        # cinder/brick/initiator/connector.py:\n        ls: CommandFilter, ls, root\n        tee: CommandFilter, tee, root\n        multipath: CommandFilter, multipath, root\n        multipathd: CommandFilter, multipathd, root\n        systool: CommandFilter, systool, root\n\n        # cinder/volume/drivers/block_device.py\n        blockdev: CommandFilter, blockdev, root\n\n        # cinder/volume/drivers/ibm/gpfs.py\n        # cinder/volume/drivers/tintri.py\n        mv: CommandFilter, mv, root\n\n        # cinder/volume/drivers/ibm/gpfs.py\n        cp: CommandFilter, cp, root\n        mmgetstate: CommandFilter, /usr/lpp/mmfs/bin/mmgetstate, root\n        mmclone: CommandFilter, /usr/lpp/mmfs/bin/mmclone, root\n        mmlsattr: CommandFilter, /usr/lpp/mmfs/bin/mmlsattr, root\n        mmchattr: CommandFilter, /usr/lpp/mmfs/bin/mmchattr, root\n        mmlsconfig: CommandFilter, /usr/lpp/mmfs/bin/mmlsconfig, root\n        mmlsfs: CommandFilter, /usr/lpp/mmfs/bin/mmlsfs, root\n        mmlspool: CommandFilter, /usr/lpp/mmfs/bin/mmlspool, root\n        mkfs: CommandFilter, mkfs, root\n        mmcrfileset: CommandFilter, /usr/lpp/mmfs/bin/mmcrfileset, root\n        mmlinkfileset: CommandFilter, /usr/lpp/mmfs/bin/mmlinkfileset, root\n        mmunlinkfileset: CommandFilter, /usr/lpp/mmfs/bin/mmunlinkfileset, root\n        mmdelfileset: CommandFilter, /usr/lpp/mmfs/bin/mmdelfileset, root\n        mmcrsnapshot: CommandFilter, /usr/lpp/mmfs/bin/mmcrsnapshot, root\n        mmdelsnapshot: CommandFilter, /usr/lpp/mmfs/bin/mmdelsnapshot, root\n\n        # cinder/volume/drivers/ibm/gpfs.py\n        # cinder/volume/drivers/ibm/ibmnas.py\n        find_maxdepth_inum: RegExpFilter, find, root, find, ^[/]*([^/\\0]+(/+)?)*$, -maxdepth, \\d+, -ignore_readdir_race, -inum, \\d+, -print0, -quit\n\n        # cinder/brick/initiator/connector.py:\n        aoe-revalidate: CommandFilter, aoe-revalidate, root\n        aoe-discover: CommandFilter, aoe-discover, root\n        aoe-flush: CommandFilter, aoe-flush, root\n\n        # cinder/brick/initiator/linuxscsi.py:\n        sg_scan: CommandFilter, sg_scan, root\n\n        #cinder/backup/services/tsm.py\n        dsmc:CommandFilter,/usr/bin/dsmc,root\n\n        # cinder/volume/drivers/hitachi/hbsd_horcm.py\n        raidqry: CommandFilter, raidqry, root\n        raidcom: CommandFilter, raidcom, root\n        pairsplit: CommandFilter, pairsplit, root\n        paircreate: CommandFilter, paircreate, root\n        pairdisplay: CommandFilter, pairdisplay, root\n        pairevtwait: CommandFilter, pairevtwait, root\n        horcmstart.sh: CommandFilter, horcmstart.sh, root\n        horcmshutdown.sh: CommandFilter, horcmshutdown.sh, root\n        horcmgr: EnvFilter, env, root, HORCMINST=, /etc/horcmgr\n\n        # cinder/volume/drivers/hitachi/hbsd_snm2.py\n        auman: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auman\n        auluref: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluref\n        auhgdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgdef\n        aufibre1: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aufibre1\n        auhgwwn: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgwwn\n        auhgmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgmap\n        autargetmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetmap\n        aureplicationvvol: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationvvol\n        auluadd: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluadd\n        auludel: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auludel\n        auluchgsize: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluchgsize\n        auchapuser: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auchapuser\n        autargetdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetdef\n        autargetopt: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetopt\n        autargetini: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetini\n        auiscsi: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auiscsi\n        audppool: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/audppool\n        aureplicationlocal: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationlocal\n        aureplicationmon: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationmon\n\n        # cinder/volume/drivers/hgst.py\n        vgc-cluster: CommandFilter, vgc-cluster, root\n\n        # cinder/volume/drivers/vzstorage.py\n        pstorage-mount: CommandFilter, pstorage-mount, root\n        pstorage: CommandFilter, pstorage, root\n        ploop: CommandFilter, ploop, root\n\n        # initiator/connector.py:\n        drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid\n  ceph:\n    override:\n    append:\n    monitors: []\n    admin_keyring: null\n    pools:\n      backup:\n        replication: 3\n        crush_rule: replicated_rule\n        chunk_size: 8\n        app_name: cinder-backup\n      cinder.volumes:\n        replication: 3\n        crush_rule: replicated_rule\n        chunk_size: 8\n        app_name: cinder-volume\n  cinder:\n    DEFAULT:\n      state_path: /var/lib/cinder\n      volume_usage_audit_period: hour\n      resource_query_filters_file: /etc/cinder/resource_filters.json\n      log_config_append: /etc/cinder/logging.conf\n      use_syslog: false\n      use_stderr: true\n      volume_name_template: \"%s\"\n      osapi_volume_workers: 1\n      glance_api_version: 2\n      os_region_name: RegionOne\n      host: cinder-volume-worker\n      # NOTE(portdirect): the bind port should not be defined, and is manipulated\n      # via the endpoints section.\n      osapi_volume_listen_port: null\n      enabled_backends: \"rbd1\"\n      default_volume_type: \"rbd1\"\n      # NOTE(portdirect): \"cinder.backup.drivers.ceph\"  and\n      # \"cinder.backup.drivers.posix\" also supported\n      # NOTE(rchurch): As of Stein, drivers by class name are required\n      # - cinder.backup.drivers.swift.SwiftBackupDriver\n      # - cinder.backup.drivers.ceph.CephBackupDriver\n      # - cinder.backup.drivers.posix.PosixBackupDriver\n      backup_driver: \"cinder.backup.drivers.swift.SwiftBackupDriver\"\n      # Backup: Ceph RBD options\n      backup_ceph_conf: \"/etc/ceph/ceph.conf\"\n      backup_ceph_user: cinderbackup\n      backup_ceph_pool: cinder.backups\n      # Backup: Posix options\n      backup_posix_path: /var/lib/cinder/backup\n      auth_strategy: keystone\n      # Internal tenant id\n      internal_project_name: internal_cinder\n      internal_user_name: internal_cinder\n    database:\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    keystone_authtoken:\n      service_token_roles: service\n      service_token_roles_required: true\n      auth_version: v3\n      auth_type: password\n      memcache_security_strategy: ENCRYPT\n      service_type: volumev3\n    nova:\n      auth_type: password\n      auth_version: v3\n      interface: internal\n    oslo_policy:\n      policy_file: /etc/cinder/policy.yaml\n    oslo_concurrency:\n      lock_path: /var/lock\n    oslo_messaging_notifications:\n      driver: messagingv2\n    oslo_middleware:\n      enable_proxy_headers_parsing: true\n    oslo_messaging_rabbit:\n      rabbit_ha_queues: true\n    coordination:\n      backend_url: file:///var/lib/cinder/coordination\n    service_user:\n      auth_type: password\n      send_service_user_token: true\n  logging:\n    loggers:\n      keys:\n        - root\n        - cinder\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: WARNING\n      handlers: 'null'\n    logger_cinder:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: cinder\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    formatter_default:\n      format: \"%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n  rabbitmq:\n    # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones\n    policies:\n      - vhost: \"cinder\"\n        name: \"ha_ttl_cinder\"\n        definition:\n          # mirror messges to other nodes in rmq cluster\n          ha-mode: \"all\"\n          ha-sync-mode: \"automatic\"\n          # 70s\n          message-ttl: 70000\n        priority: 0\n        apply-to: all\n        pattern: '^(?!(amq\\.|reply_)).*'\n\n  backends:\n    # Those options will be written to backends.conf as-is.\n    rbd1:\n      volume_driver: cinder.volume.drivers.rbd.RBDDriver\n      volume_backend_name: rbd1\n      rbd_pool: cinder.volumes\n      rbd_ceph_conf: \"/etc/ceph/ceph.conf\"\n      rbd_flatten_volume_from_snapshot: false\n      report_discard_supported: true\n      rbd_max_clone_depth: 5\n      rbd_store_chunk_size: 4\n      rados_connect_timeout: -1\n      rbd_user: cinder\n      rbd_secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337\n      image_volume_cache_enabled: true\n      image_volume_cache_max_size_gb: 200\n      image_volume_cache_max_count: 50\n  rally_tests:\n    run_tempest: false\n    clean_up: |\n      VOLUMES=$(openstack volume list -f value | grep -e \"^s_rally_\" | awk '{ print $1 }')\n      if [ -n \"$VOLUMES\" ]; then\n        echo $VOLUMES | xargs openstack volume delete\n      fi\n    tests:\n      CinderVolumes.create_and_delete_volume:\n        - args:\n            size: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n        - args:\n            size:\n              max: 5\n              min: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n  resource_filters:\n    volume:\n      - name\n      - status\n      - metadata\n      - bootable\n      - migration_status\n      - availability_zone\n      - group_id\n    backup:\n      - name\n      - status\n      - volume_id\n    snapshot:\n      - name\n      - status\n      - volume_id\n      - metadata\n      - availability_zone\n    group: []\n    group_snapshot:\n      - status\n      - group_id\n    attachment:\n      - volume_id\n      - status\n      - instance_id\n      - attach_status\n    message:\n      - resource_uuid\n      - resource_type\n      - event_id\n      - request_id\n      - message_level\n    pool:\n      - name\n      - volume_type\n    volume_type: []\n  enable_iscsi: false\n  enable_conversion_tmpfs: false\n  conversion_tmpfs_size: \"10Gi\"\n  cinder_api_uwsgi:\n    uwsgi:\n      add-header: \"Connection: close\"\n      buffer-size: 65535\n      die-on-term: true\n      enable-threads: true\n      exit-on-reload: false\n      hook-master-start: unix_signal:15 gracefully_kill_them_all\n      lazy-apps: true\n      log-x-forwarded-for: true\n      master: true\n      procname-prefix-spaced: \"cinder-api:\"\n      route-user-agent: '^kube-probe.* donotlog:'\n      thunder-lock: true\n      worker-reload-mercy: 80\n      wsgi-file: /var/lib/openstack/bin/cinder-wsgi\n      stats: 0.0.0.0:1717\n      stats-http: true\n  db_purge:\n    before: 30\n\nbackup:\n  external_ceph_rbd:\n    enabled: false\n    admin_keyring: null\n    configmap: null\n    conf:\n      global: null\n      osd: null\n  posix:\n    volume:\n      class_name: general\n      size: 10Gi\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - cinder-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    api:\n      jobs:\n        - cinder-db-sync\n        - cinder-ks-user\n        - cinder-ks-endpoints\n        - cinder-rabbit-init\n        - cinder-storage-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n    backup:\n      jobs:\n        - cinder-db-sync\n        - cinder-ks-user\n        - cinder-ks-endpoints\n        - cinder-rabbit-init\n        - cinder-storage-init\n        - cinder-backup-storage-init\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: volumev3\n    backup_storage_init:\n      jobs: null\n    bootstrap:\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: volumev3\n      pod:\n        - requireSameNode: false\n          labels:\n            application: cinder\n            component: volume\n    clean:\n      jobs: null\n    db_drop:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - cinder-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_purge:\n      jobs:\n        - cinder-db-sync\n        - cinder-db-init\n        - cinder-ks-user\n        - cinder-ks-endpoints\n        - cinder-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n    ks_endpoints:\n      jobs:\n        - cinder-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    rabbit_init:\n      services:\n        - service: oslo_messaging\n          endpoint: internal\n    scheduler:\n      jobs:\n        - cinder-db-sync\n        - cinder-ks-user\n        - cinder-ks-endpoints\n        - cinder-rabbit-init\n        - cinder-storage-init\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: volumev3\n    storage_init:\n      jobs: null\n    tests:\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: volumev3\n    volume:\n      jobs:\n        - cinder-db-sync\n        - cinder-ks-user\n        - cinder-ks-endpoints\n        - cinder-rabbit-init\n        - cinder-storage-init\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: volumev3\n    volume_usage_audit:\n      jobs:\n        - cinder-db-sync\n        - cinder-ks-user\n        - cinder-ks-endpoints\n        - cinder-rabbit-init\n        - cinder-storage-init\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: volumev3\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    create_internal_tenant:\n      services:\n        - endpoint: internal\n          service: identity\n\n# Names of secrets used by bootstrap and environmental checks\nsecrets:\n  identity:\n    admin: cinder-keystone-admin\n    cinder: cinder-keystone-user\n    glance: cinder-keystone-glance\n    nova: cinder-keystone-nova\n    swift: cinder-keystone-swift\n    service: cinder-keystone-service\n    test: cinder-keystone-test\n  oslo_db:\n    admin: cinder-db-admin\n    cinder: cinder-db-user\n  rbd:\n    backup: cinder-backup-rbd-keyring\n    volume: cinder-volume-rbd-keyring\n    volume_external: cinder-volume-external-rbd-keyring\n  oslo_messaging:\n    admin: cinder-rabbitmq-admin\n    cinder: cinder-rabbitmq-user\n  tls:\n    volumev3:\n      api:\n        public: cinder-tls-public\n        internal: cinder-tls-api\n  oci_image_registry:\n    cinder: cinder-oci-image-registry\n\n# We use a different layout of the endpoints here to account for versioning\n# this swaps the service name and type, and should be rolled out to other\n# services.\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      cinder:\n        username: cinder\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      cinder:\n        role: admin,service\n        region_name: RegionOne\n        username: cinder\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      glance:\n        role: admin,service\n        region_name: RegionOne\n        username: glance\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      nova:\n        role: admin,service\n        region_name: RegionOne\n        project_name: service\n        username: cinder_nova\n        password: password\n        user_domain_name: service\n        project_domain_name: service\n      swift:\n        role: admin,service\n        region_name: RegionOne\n        project_name: service\n        username: cinder_swift\n        password: password\n        user_domain_name: service\n        project_domain_name: service\n      service:\n        role: admin,service\n        region_name: RegionOne\n        project_name: service\n        username: cinder_service_user\n        password: password\n        user_domain_name: service\n        project_domain_name: service\n      test:\n        role: admin\n        region_name: RegionOne\n        username: cinder-test\n        password: password\n        project_name: test\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  image:\n    name: glance\n    hosts:\n      default: glance-api\n      public: glance\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      api:\n        default: 9292\n        public: 80\n  volumev3:\n    name: cinderv3\n    hosts:\n      default: cinder-api\n      public: cinder\n    host_fqdn_override:\n      default: null\n      # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public\n      # endpoints using the following format:\n      # public:\n      #   host: null\n      #   tls:\n      #     crt: null\n      #     key: null\n    path:\n      default: '/v3'\n      healthcheck: /healthcheck\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 8776\n        public: 80\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n        secret:\n          tls:\n            internal: mariadb-tls-direct\n      cinder:\n        username: cinder\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /cinder\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_messaging:\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n        secret:\n          tls:\n            internal: rabbitmq-tls-direct\n      cinder:\n        username: cinder\n        password: password\n    statefulset:\n      replicas: 2\n      name: rabbitmq-rabbitmq\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /cinder\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n  oslo_cache:\n    auth:\n      # NOTE(portdirect): this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  fluentd:\n    namespace: null\n    name: fluentd\n    hosts:\n      default: fluentd-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: 'http'\n    port:\n      service:\n        default: 24224\n      metrics:\n        default: 24220\n  kube_dns:\n    namespace: kube-system\n    name: kubernetes-dns\n    hosts:\n      default: kube-dns\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: http\n    port:\n      dns:\n        default: 53\n        protocol: UDP\n  ingress:\n    namespace: null\n    name: ingress\n    hosts:\n      default: ingress\n    port:\n      ingress:\n        default: 80\n\nnetwork_policy:\n  cinder:\n    ingress:\n      - {}\n    egress:\n      - {}\n\ntls:\n  identity: false\n  oslo_messaging: false\n  oslo_db: false\n\nmanifests:\n  certificates: false\n  configmap_bin: true\n  configmap_etc: true\n  cron_volume_usage_audit: true\n  cron_db_purge: false\n  deployment_api: true\n  deployment_backup: true\n  deployment_scheduler: true\n  deployment_volume: true\n  ingress_api: true\n  job_backup_storage_init: true\n  job_bootstrap: true\n  job_clean: true\n  job_create_internal_tenant: true\n  job_db_init: true\n  job_image_repo_sync: true\n  job_rabbit_init: true\n  job_db_sync: true\n  job_db_drop: false\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  job_storage_init: true\n  pdb_api: true\n  pod_rally_test: true\n  pvc_backup: true\n  network_policy: false\n  secret_db: true\n  secret_ingress_tls: true\n  secret_keystone: true\n  secret_ks_etc: true\n  secret_rabbitmq: true\n  secret_registry: true\n  service_api: true\n  service_ingress_api: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "cloudkitty/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Cloudkitty\nname: cloudkitty\nversion: 2025.2.0\nhome: https://docs.openstack.org/cloudkitty/latest/\nicon: https://opendev.org/openstack/cloudkitty/media/branch/master/doc/source/images/cloudkitty-logo.png\nsources:\n  - https://opendev.org/openstack/cloudkitty\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "cloudkitty/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2017 The Openstack-Helm Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "cloudkitty/templates/bin/_cloudkitty-api.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec uwsgi --ini /etc/cloudkitty/cloudkitty-api-uwsgi.ini\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "cloudkitty/templates/bin/_cloudkitty-processor.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n    exec cloudkitty-processor \\\n        --config-file /etc/cloudkitty/cloudkitty.conf \\\n        --config-dir /etc/cloudkitty/cloudkitty.conf.d\n}\n\nfunction stop () {\n    kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "cloudkitty/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\ncloudkitty-dbsync upgrade\n"
  },
  {
    "path": "cloudkitty/templates/bin/_storage-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\ncloudkitty-storage-init\n"
  },
  {
    "path": "cloudkitty/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: cloudkitty-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  cloudkitty-processor.sh: |\n{{ tuple \"bin/_cloudkitty-processor.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  cloudkitty-api.sh: |\n{{ tuple \"bin/_cloudkitty-api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  storage-init.sh: |\n{{ tuple \"bin/_storage-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "cloudkitty/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"cloudkitty.configmap.etc\" }}\n{{- $configMapName := index . 0 }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n\n{{- if empty .Values.conf.cloudkitty.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.cloudkitty.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.cloudkitty.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.cloudkitty.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.cloudkitty.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.cloudkitty.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n{{- if empty .Values.conf.cloudkitty.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.cloudkitty.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.cloudkitty.database.connection)) (empty .Values.conf.cloudkitty.database.connection) -}}\n{{- $connection := tuple \"oslo_db\" \"internal\" \"cloudkitty\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | set .Values.conf.cloudkitty.database \"connection\" -}}\n{{- else -}}\n{{- $_ := set .Values.conf.cloudkitty.database \"connection\" $connection -}}\n{{- end -}}\n{{- end -}}\n\n{{- if empty .Values.conf.cloudkitty.DEFAULT.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"cloudkitty\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.cloudkitty.DEFAULT \"transport_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.cloudkitty.DEFAULT.os_privileged_user_name -}}\n{{- $_ := set .Values.conf.cloudkitty.DEFAULT \"os_privileged_user_name\" .Values.endpoints.identity.auth.cloudkitty.username }}\n{{- end -}}\n{{- if empty .Values.conf.cloudkitty.DEFAULT.os_privileged_user_password -}}\n{{- $_ := set .Values.conf.cloudkitty.DEFAULT \"os_privileged_user_password\" .Values.endpoints.identity.auth.cloudkitty.password }}\n{{- end -}}\n{{- if empty .Values.conf.cloudkitty.DEFAULT.os_privileged_user_auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.cloudkitty.DEFAULT \"os_privileged_user_auth_url\" }}\n{{- end -}}\n{{- if empty .Values.conf.cloudkitty.DEFAULT.os_privileged_user_tenant -}}\n{{- $_ := set .Values.conf.cloudkitty.DEFAULT \"os_privileged_user_tenant\" .Values.endpoints.identity.auth.cloudkitty.project_name }}\n{{- end -}}\n\n{{- if empty .Values.conf.cloudkitty.DEFAULT.os_region_name -}}\n{{- $_ := set .Values.conf.cloudkitty.DEFAULT \"os_region_name\" .Values.endpoints.identity.auth.cloudkitty.region_name }}\n{{- end -}}\n\n{{- if empty .Values.conf.cloudkitty.DEFAULT.os_user_domain_name -}}\n{{- $_ := set .Values.conf.cloudkitty.DEFAULT \"os_user_domain_name\" .Values.endpoints.identity.auth.cloudkitty.user_domain_name }}\n{{- end -}}\n\n{{- if empty .Values.conf.cloudkitty.DEFAULT.os_project_domain_name -}}\n{{- $_ := set .Values.conf.cloudkitty.DEFAULT \"os_project_domain_name\" .Values.endpoints.identity.auth.cloudkitty.user_domain_name }}\n{{- end -}}\n\n{{- if empty (index .Values.conf.cloudkitty_api_uwsgi.uwsgi \"http-socket\") -}}\n{{- $http_socket_port := tuple \"rating\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | toString }}\n{{- $http_socket := printf \"0.0.0.0:%s\" $http_socket_port }}\n{{- $_ := set .Values.conf.cloudkitty_api_uwsgi.uwsgi \"http-socket\" $http_socket -}}\n{{- end -}}\n\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $configMapName }}\ntype: Opaque\ndata:\n  cloudkitty.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.cloudkitty | b64enc }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.logging | b64enc }}\n  api-paste.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.paste | b64enc }}\n  metrics.yml: {{ $envAll.Values.conf.processor_metrics | b64enc }}\n  cloudkitty_sudoers: {{ $envAll.Values.conf.cloudkitty_sudoers | b64enc }}\n  cloudkitty-api-uwsgi.ini: {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.cloudkitty_api_uwsgi | b64enc }}\n{{- end }}\n{{- end }}\n{{- if .Values.manifests.configmap_etc }}\n{{- list \"cloudkitty-etc\" . | include \"cloudkitty.configmap.etc\" }}\n{{- end }}\n"
  },
  {
    "path": "cloudkitty/templates/deployment-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"cloudkittyApiLivenessProbeTemplate\" }}\nhttpGet:\n  scheme: {{ tuple \"rating\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n  path: /\n  port: {{ tuple \"rating\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- define \"cloudkittyApiReadinessProbeTemplate\" }}\nhttpGet:\n  scheme: HTTP\n  path: /\n  port: {{ tuple \"rating\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n\n{{- if .Values.manifests.deployment_api }}\n{{- $envAll := . }}\n\n{{- $mounts_cloudkitty_api := .Values.pod.mounts.cloudkitty_api.cloudkitty_api }}\n{{- $mounts_cloudkitty_api_init := .Values.pod.mounts.cloudkitty_api.init_container }}\n{{- $etcSources := .Values.pod.etcSources.cloudkitty_api }}\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"cloudkitty-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"cloudkitty-api\" }}\n{{- tuple $envAll \"cloudkitty_api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: cloudkitty-api\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"cloudkitty\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.cloudkitty_api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"cloudkitty\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"cloudkitty\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"cloudkitty_api\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"cloudkitty-api\" \"containerNames\" (list \"cloudkitty-api-init\" \"cloudkitty-api\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"cloudkitty\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"cloudkitty\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.cloudkitty.node_selector_key }}: {{ .Values.labels.cloudkitty.node_selector_value }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.cloudkitty_api.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"cloudkitty_api\" $mounts_cloudkitty_api_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: cloudkitty-api\n{{ tuple $envAll \"cloudkitty_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.cloudkitty_api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cloudkitty\" \"container\" \"cloudkitty_api\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/cloudkitty-api.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/cloudkitty-api.sh\n                  - stop\n          ports:\n            - name: c-api\n              containerPort: {{ tuple \"rating\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{ dict \"envAll\" $envAll \"component\" \"cloudkitty\" \"container\" \"default\" \"type\" \"liveness\" \"probeTemplate\" (include \"cloudkittyApiLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"cloudkitty\" \"container\" \"default\" \"type\" \"readiness\" \"probeTemplate\" (include \"cloudkittyApiReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.cloudkitty.oslo_concurrency.lock_path }}\n            - name: cloudkitty-bin\n              mountPath: /tmp/cloudkitty-api.sh\n              subPath: cloudkitty-api.sh\n            - name: etccloudkitty\n              mountPath: /etc/cloudkitty\n            - name: cloudkitty-etc\n              mountPath: /etc/cloudkitty/cloudkitty.conf\n              subPath: cloudkitty.conf\n            - name: cloudkitty-etc\n              mountPath: /etc/cloudkitty/metrics.yml\n              subPath: metrics.yml\n            - name: cloudkitty-etc\n              mountPath: /etc/cloudkitty/logging.conf\n              subPath: logging.conf\n            - name: cloudkitty-etc\n              mountPath: /etc/cloudkitty/api-paste.ini\n              subPath: api-paste.ini\n            - name: cloudkitty-etc\n              mountPath: /etc/cloudkitty/cloudkitty-api-uwsgi.ini\n              subPath: cloudkitty-api-uwsgi.ini\n            - name: cloudkitty-etc-snippets\n              mountPath: /etc/cloudkitty/cloudkitty.conf.d/\n              readOnly: true\n{{ if $mounts_cloudkitty_api.volumeMounts }}{{ toYaml $mounts_cloudkitty_api.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: etccloudkitty\n          emptyDir: {}\n        - name: cloudkitty-bin\n          configMap:\n            name: cloudkitty-bin\n            defaultMode: 0555\n        - name: cloudkitty-etc\n          secret:\n            secretName: cloudkitty-etc\n            defaultMode: 0444\n        - name: cloudkitty-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{ if $mounts_cloudkitty_api.volumes }}{{ toYaml $mounts_cloudkitty_api.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "cloudkitty/templates/deployment-processor.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_processor }}\n{{- $envAll := . }}\n\n{{- $mounts_cloudkitty_processor := .Values.pod.mounts.cloudkitty_processor.cloudkitty_processor }}\n{{- $mounts_cloudkitty_processor_init := .Values.pod.mounts.cloudkitty_processor.init_container }}\n{{- $etcSources := .Values.pod.etcSources.cloudkitty_processor }}\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"cloudkitty-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"cloudkitty-processor\" }}\n{{- tuple $envAll \"cloudkitty_processor\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: cloudkitty-processor\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"cloudkitty\" \"processor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.cloudkitty_processor }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"cloudkitty\" \"processor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"cloudkitty\" \"processor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"cloudkitty_processor\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"cloudkitty-processor\" \"containerNames\" (list \"cloudkitty-processor-init\" \"cloudkitty-processor\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"cloudkitty-processor\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"cloudkitty\" \"processor\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.cloudkitty.node_selector_key }}: {{ .Values.labels.cloudkitty.node_selector_value }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.cloudkitty_processor.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"cloudkitty_processor\" $mounts_cloudkitty_processor_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: cloudkitty-processor\n{{ tuple $envAll \"cloudkitty_processor\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.cloudkitty_processor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cloudkitty\" \"container\" \"cloudkitty_processor\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/cloudkitty-processor.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/cloudkitty-processor.sh\n                  - stop\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.cloudkitty.oslo_concurrency.lock_path }}\n            - name: cloudkitty-bin\n              mountPath: /tmp/cloudkitty-processor.sh\n              subPath: cloudkitty-processor.sh\n              readOnly: true\n            - name: etccloudkitty\n              mountPath: /etc/cloudkitty\n            - name: cloudkitty-etc\n              mountPath: /etc/cloudkitty/cloudkitty.conf\n              subPath: cloudkitty.conf\n            - name: cloudkitty-etc\n              mountPath: /etc/cloudkitty/logging.conf\n              subPath: logging.conf\n            - name: cloudkitty-etc\n              mountPath: /etc/cloudkitty/metrics.yml\n              subPath: metrics.yml\n            - name: cloudkitty-etc-snippets\n              mountPath: /etc/cloudkitty/cloudkitty.conf.d/\n              readOnly: true\n{{ if $mounts_cloudkitty_processor.volumeMounts }}{{ toYaml $mounts_cloudkitty_processor.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: etccloudkitty\n          emptyDir: {}\n        - name: cloudkitty-bin\n          configMap:\n            name: cloudkitty-bin\n            defaultMode: 0555\n        - name: cloudkitty-etc\n          secret:\n            secretName: cloudkitty-etc\n            defaultMode: 0444\n        - name: cloudkitty-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{ if $mounts_cloudkitty_processor.volumes}}{{ toYaml $mounts_cloudkitty_processor.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "cloudkitty/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "cloudkitty/templates/ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_api .Values.network.api.ingress.public }}\n{{- $ingressOpts := dict \"envAll\" . \"backendServiceType\" \"rating\" \"backendPort\" \"c-api\" -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "cloudkitty/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $bootstrapJob := dict \"envAll\" . \"serviceName\" \"cloudkitty\" \"keystoneUser\" .Values.bootstrap.ks_user \"logConfigFile\" .Values.conf.cloudkitty.DEFAULT.log_config_append -}}\n{{ $bootstrapJob | include \"helm-toolkit.manifests.job_bootstrap\" }}\n{{- end }}\n"
  },
  {
    "path": "cloudkitty/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $dbToDrop := dict \"inputType\" \"secret\" \"adminSecret\" .Values.secrets.oslo_db.admin \"userSecret\" .Values.secrets.oslo_db.cloudkitty -}}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" \"cloudkitty\" \"dbToDrop\" $dbToDrop -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "cloudkitty/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"cloudkitty\" -}}\n{{- $_ := set $dbInitJob \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml ) }}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "cloudkitty/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n\n{{- define \"metadata.annotations.job.db_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"cloudkitty\" \"podVolMounts\" .Values.pod.mounts.cloudkitty_db_sync.cloudkitty_db_sync.volumeMounts \"podVols\" .Values.pod.mounts.cloudkitty_db_sync.cloudkitty_db_sync.volumes -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbSyncJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.cloudkitty.enabled -}}\n{{- $_ := set $dbSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "cloudkitty/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"cloudkitty\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "cloudkitty/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_endpoints\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-2\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"cloudkitty\" \"serviceTypes\" ( tuple \"rating\" ) -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_endpoints\" . | fromYaml ) }}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "cloudkitty/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_service\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"cloudkitty\" \"serviceTypes\" ( tuple \"rating\" ) -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_service\" . | fromYaml ) }}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "cloudkitty/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"cloudkitty\" -}}\n{{- $_ := set $ksUserJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml ) }}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "cloudkitty/templates/job-rabbitmq-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.rabbit_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rmqUserJob := dict \"envAll\" . \"serviceName\" \"cloudkitty\" -}}\n{{- $_ := set $rmqUserJob \"jobAnnotations\" (include \"metadata.annotations.job.rabbit_init\" . | fromYaml) }}\n{{ $rmqUserJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "cloudkitty/templates/job-storage-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_storage_init }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"cloudkitty-storage-init\" }}\n{{ tuple $envAll \"storage_init\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: cloudkitty-storage-init\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n    \"helm.sh/hook\": \"post-install,post-upgrade\"\n    \"helm.sh/hook-weight\": \"-4\"\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"cloudkitty\" \"db-migrate\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"storage_init\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: cloudkitty-storage-init\n{{ tuple $envAll \"cloudkitty_storage_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.storage_init | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cloudkitty\" \"container\" \"cloudkitty_storage_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/storage-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: cloudkitty-bin\n              mountPath: /tmp/storage-init.sh\n              subPath: storage-init.sh\n            - name: etccloudkitty\n              mountPath: /etc/cloudkitty\n            - name: cloudkitty-etc\n              mountPath: /etc/cloudkitty/cloudkitty.conf\n              subPath: cloudkitty.conf\n            - name: cloudkitty-etc\n              mountPath: /etc/cloudkitty/logging.conf\n              subPath: logging.conf\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: etccloudkitty\n          emptyDir: {}\n        - name: cloudkitty-etc\n          secret:\n            secretName: cloudkitty-etc\n            defaultMode: 0444\n        - name: cloudkitty-bin\n          configMap:\n            name: cloudkitty-bin\n            defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "cloudkitty/templates/network_policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"cloudkitty\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "cloudkitty/templates/pbd-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_api }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: cloudkitty-api\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.cloudkitty_api.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"cloudkitty\" \"cloudkitty_api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "cloudkitty/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"cloudkitty\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n{{- $connection := tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- if $envAll.Values.manifests.certificates }}\n  DB_CONNECTION: {{ (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | b64enc -}}\n{{- else }}\n  DB_CONNECTION: {{  $connection | b64enc -}}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "cloudkitty/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"cloudkitty\" \"test\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "cloudkitty/templates/secret-ks-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $envAll := . -}}\n{{/* the endpoints.identity.auth sections with the oslo conf sections they get rendered to */}}\n{{- $ksUsers := dict\n  \"cloudkitty\" \"keystone_authtoken\"\n-}}\n{{ dict\n  \"envAll\" $envAll\n  \"serviceName\" \"cloudkitty\"\n  \"serviceUserSections\" $ksUsers\n  | include \"helm-toolkit.manifests.secret_ks_etc\"\n}}\n{{- end }}"
  },
  {
    "path": "cloudkitty/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- $rabbitmqProtocol := \"http\" }}\n{{- if $envAll.Values.manifests.certificates }}\n{{- $rabbitmqProtocol = \"https\" }}\n{{- end }}\n{{- range $key1, $userClass := tuple \"admin\" \"cloudkitty\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_messaging\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass $rabbitmqProtocol $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n  TRANSPORT_URL: {{ tuple \"oslo_messaging\" \"internal\" $userClass \"amqp\" $envAll | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "cloudkitty/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "cloudkitty/templates/service-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_api }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"rating\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: c-api\n      port: {{ tuple \"rating\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.api.node_port.enabled }}\n      nodePort: {{ .Values.network.api.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"cloudkitty\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.api.node_port.enabled }}\n  type: NodePort\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "cloudkitty/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    cloudkitty_api: quay.io/airshipit/cloudkitty:2025.1-ubuntu_noble\n    cloudkitty_db_sync: quay.io/airshipit/cloudkitty:2025.1-ubuntu_noble\n    cloudkitty_processor: quay.io/airshipit/cloudkitty:2025.1-ubuntu_noble\n    cloudkitty_storage_init: quay.io/airshipit/cloudkitty:2025.1-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    image_repo_sync: docker.io/docker:17.07.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  cloudkitty:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  processor:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      cloudkitty:\n        username: cloudkitty\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  rating:\n    name: cloudkitty\n    hosts:\n      default: cloudkitty-api\n      public: cloudkitty-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: \"\"\n    scheme:\n      default: \"http\"\n    port:\n      api:\n        default: 8089\n        public: 80\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n        secret:\n          tls:\n            internal: mariadb-tls-direct\n      cloudkitty:\n        username: cloudkitty\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /cloudkitty\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      cloudkitty:\n        role: admin\n        region_name: RegionOne\n        username: cloudkitty\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      test:\n        role: admin\n        region_name: RegionOne\n        username: test\n        password: password\n        project_name: test\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  oslo_messaging:\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n        secret:\n          tls:\n            internal: rabbitmq-tls-direct\n      cloudkitty:\n        username: cloudkitty\n        password: password\n    statefulset:\n      replicas: 2\n      name: rabbitmq-rabbitmq\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /cloudkitty\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n  oslo_cache:\n    auth:\n      # NOTE(portdirect): this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  fluentd:\n    namespace: null\n    name: fluentd\n    hosts:\n      default: fluentd-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: \"http\"\n    port:\n      service:\n        default: 24224\n      metrics:\n        default: 24220\n  # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress\n  # They are using to enable the Egress K8s network policy.\n  kube_dns:\n    namespace: kube-system\n    name: kubernetes-dns\n    hosts:\n      default: kube-dns\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: http\n    port:\n      dns:\n        default: 53\n        protocol: UDP\n  ingress:\n    namespace: null\n    name: ingress\n    hosts:\n      default: ingress\n    port:\n      ingress:\n        default: 80\n\nsecrets:\n  identity:\n    admin: cloudkitty-keystone-admin\n    cloudkitty: cloudkitty-keystone-user\n    test: cloudkitty-keystone-test\n  oslo_db:\n    admin: cloudkitty-db-admin\n    cloudkitty: cloudkitty-db-user\n  oslo_messaging:\n    admin: cloudkitty-rabbitmq-admin\n    cloudkitty: cloudkitty-rabbitmq-user\n  oci_image_registry:\n    cloudkitty: cloudkitty-oci-image-registry\n\nbootstrap:\n  enabled: false\n  ks_user: cloudkitty\n  script: |\n    openstack token issue\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - cloudkitty-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n\n  static:\n    cloudkitty_api:\n      jobs:\n        - cloudkitty-db-sync\n        - cloudkitty-storage-init\n        - cloudkitty-ks-user\n        - cloudkitty-ks-endpoints\n        - cloudkitty-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    cloudkitty_processor:\n      jobs:\n        - cloudkitty-db-sync\n        - cloudkitty-storage-init\n        - cloudkitty-ks-user\n        - cloudkitty-ks-endpoints\n        - cloudkitty-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - cloudkitty-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    storage_init:\n      jobs:\n        - cloudkitty-db-sync\n      servcies:\n        - endpoint: internal\n          service: oslo_db\n    ks_endpoints:\n      jobs:\n        - cloudkitty-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    rabbit_init:\n      services:\n        - service: oslo_messaging\n          endpoint: internal\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\npod:\n  security_context:\n    cloudkitty:\n      pod:\n        runAsUser: 42424\n      container:\n        cloudkitty_api:\n          readOnlyRootFilesystem: false\n          allowPrivilegeEscalation: false\n          runAsUser: 42424\n        cloudkitty_processor:\n          readOnlyRootFilesystem: false\n          allowPrivilegeEscalation: false\n          runAsUser: 42424\n        cloudkitty_db_sync:\n          readOnlyRootFilesystem: false\n          allowPrivilegeEscalation: false\n          runAsUser: 42424\n    test:\n      pod:\n        runAsUser: 42424\n      container:\n        horizon_test:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n  probes:\n    cloudkitty:\n      default:\n        liveness:\n          enabled: true\n          params: {}\n        readiness:\n          enabled: true\n          params: {}\n    cloudkitty-processor:\n      default:\n        liveness:\n          enabled: true\n          params: {}\n        readiness:\n          enabled: true\n          params: {}\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  replicas:\n    cloudkitty_api: 1\n    cloudkitty_processor: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    disruption_budget:\n      cloudkitty_api:\n        min_available: 0\n      cloudkitty_processor:\n        min_available: 0\n    termination_grace_period:\n      cloudkitty_api:\n        timeout: 30\n      cloudkitty_processor:\n        timeout: 30\n  tolerations:\n    cloudkitty:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n        effect: NoSchedule\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n        effect: NoSchedule\n  mounts:\n    cloudkitty_api:\n      init_container: null\n      cloudkitty_api:\n        volumeMounts:\n        volumes:\n    cloudkitty_processor:\n      init_container: null\n      cloudkitty_processor:\n        volumeMounts:\n        volumes:\n    cloudkitty_db_sync:\n      cloudkitty_db_sync:\n        volumeMounts:\n        volumes:\n    cloudkitty_db_init:\n      cloudkitty_db_sync:\n        volumeMounts:\n        volumes:\n    cloudkitty_ks_users:\n      cloudkitty_db_sync:\n        volumeMounts:\n        volumes:\n    cloudkitty_ks_service:\n      cloudkitty_db_sync:\n        volumeMounts:\n        volumes:\n  # -- This allows users to add Kubernetes Projected Volumes to be mounted at /etc/cloudkitty/cloudkitty.conf.d/\n  ## This is a list of projected volume source objects for each deployment/statefulset/daemonset/cronjob\n  ## https://kubernetes.io/docs/concepts/storage/projected-volumes/\n  etcSources:\n    cloudkitty_api: []\n    cloudkitty_processor: []\n    cloudkitty_db_sync: []\n  resources:\n    enabled: false\n    cloudkitty_api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    cloudkitty_processor:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rabbit_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      storage_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\nconf:\n  paste:\n    pipeline:cloudkitty+noauth:\n      pipeline: cors healthcheck http_proxy_to_wsgi request_id ck_api\n    pipeline:cloudkitty+keystone:\n      pipeline: cors healthcheck http_proxy_to_wsgi request_id authtoken ck_api\n    app:ck_api:\n      paste.app_factory: cloudkitty.api.app:app_factory\n    filter:authtoken:\n      acl_public_routes: /, /v1, /v2, /healthcheck\n      paste.filter_factory: cloudkitty.api.middleware:AuthTokenMiddleware.factory\n    filter:request_id:\n      paste.filter_factory: oslo_middleware:RequestId.factory\n    filter:cors:\n      paste.filter_factory: oslo_middleware.cors:filter_factory\n      oslo_config_project: cloudkitty\n    filter:healthcheck:\n      paste.filter_factory: oslo_middleware:Healthcheck.factory\n      backends: disable_by_file\n      disable_by_file_path: /etc/cloudkitty/healthcheck_disable\n    filter:http_proxy_to_wsgi:\n      paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory\n      oslo_config_project: cloudkitty\n  cloudkitty_api_uwsgi:\n    uwsgi:\n      add-header: \"Connection: close\"\n      buffer-size: 65535\n      die-on-term: true\n      enable-threads: true\n      exit-on-reload: false\n      hook-master-start: unix_signal:15 gracefully_kill_them_all\n      lazy-apps: true\n      log-x-forwarded-for: true\n      master: true\n      procname-prefix-spaced: \"cloudkitty-api:\"\n      route-user-agent: '^kube-probe.* donotlog:'\n      thunder-lock: true\n      worker-reload-mercy: 80\n      wsgi-file: /var/lib/openstack/bin/cloudkitty-api\n      processes: 1\n      stats: 0.0.0.0:1717\n      stats-http: true\n  cloudkitty:\n    DEFAULT:\n      log_config_append: /etc/cloudkitty/logging.conf\n      api_paste_config: /etc/cloudkitty/api-paste.ini\n      auth_strategy: keystone\n      debug: false\n    keystone_authtoken:\n      auth_type: password\n      username: cloudkitty\n      service_token_roles_required: true\n      service_token_roles: admin,rating,service\n      service_type: rating\n    database:\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    oslo_concurrency:\n      lock_path: /var/lock\n    collect:\n      collector: gnocchi\n    collector_gnocchi:\n      auth_section: keystone_authtoken\n    fetcher:\n      backend: gnocchi\n    fetcher_gnocchi:\n      auth_section: keystone_authtoken\n    output:\n      pipeline: osrf\n      basepath: /var/cloudkitty/reports\n      backend: cloudkitty.backend.file.FileBackend\n    storage:\n      backend: sqlalchemy\n      version: 1\n  logging:\n    loggers:\n      keys:\n        - root\n        - cloudkitty\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: DEBUG\n      handlers:\n        - stdout\n    logger_cloudkitty:\n      level: DEBUG\n      handlers:\n        - stdout\n      qualname: cloudkitty\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    formatter_default:\n      format: \"%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n  cloudkitty_sudoers: |\n    Defaults secure_path=\"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin\"\n    cloudkitty ALL=(ALL:ALL) NOPASSWD: /var/lib/openstack/bin/privsep-helper\n  processor_metrics: |\n    metrics:\n      cpu:\n        alt_name: instance\n        extra_args:\n          aggregation_method: mean\n          resource_type: instance\n        groupby:\n          - id\n          - user_id\n          - project_id\n        metadata:\n          - flavor_name\n          - flavor_id\n          - vcpus\n        mutate: NUMBOOL\n        unit: instance\n      image.size:\n        extra_args:\n          aggregation_method: mean\n          resource_type: image\n        factor: 1/1048576\n        groupby:\n          - id\n          - user_id\n          - project_id\n        metadata:\n          - container_format\n          - disk_format\n        unit: MiB\n      ip.floating:\n        extra_args:\n          aggregation_method: mean\n          resource_type: network\n        groupby:\n          - id\n          - user_id\n          - project_id\n        metadata:\n          - state\n        mutate: NUMBOOL\n        unit: ip\n      network.incoming.bytes.rate:\n        extra_args:\n          aggregation_method: mean\n          resource_type: instance_network_interface\n        factor: 3600/1000000\n        groupby:\n          - id\n          - project_id\n          - user_id\n        metadata:\n          - instance_id\n        unit: MB\n      network.outgoing.bytes.rate:\n        extra_args:\n          aggregation_method: mean\n          resource_type: instance_network_interface\n        factor: 3600/1000000\n        groupby:\n          - id\n          - project_id\n          - user_id\n        metadata:\n          - instance_id\n        unit: MB\n      radosgw.objects.size:\n        extra_args:\n          aggregation_method: mean\n          resource_type: ceph_account\n        factor: 1/1073741824\n        groupby:\n          - id\n          - user_id\n          - project_id\n        unit: GiB\n      volume.size:\n        extra_args:\n          aggregation_method: mean\n          resource_type: volume\n        groupby:\n          - id\n          - user_id\n          - project_id\n        metadata:\n          - volume_type\n        unit: GiB\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    node_port:\n      enabled: false\n      port: 33053\n\nnetwork_policy:\n  cloudkitty:\n    ingress:\n      - from:\n        - podSelector:\n            matchLabels:\n              application: cloudkitty\n        - podSelector:\n            matchLabels:\n              application: horizon\n        - podSelector:\n            matchLabels:\n              application: ingress\n        - podSelector:\n            matchLabels:\n              application: gnocchi\n        ports:\n        - protocol: TCP\n          port: 80\n        - protocol: TCP\n          port: 8089\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  deployment_api: true\n  deployment_processor: true\n  ingress_api: true\n  job_bootstrap: true\n  job_ks_user: true\n  job_db_sync: true\n  job_db_init: true\n  job_db_drop: false\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_rabbit_init: true\n  job_storage_init: true\n  pdb_api: true\n  network_policy: false\n  secret_db: true\n  secret_rabbitmq: true\n  secret_keystone: true\n  secret_registry: true\n  service_api: true\n  secret_ks_etc: true\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "cyborg/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Cyborg\nname: cyborg\nversion: 2025.2.0\nhome: https://docs.openstack.org/cyborg\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Cyborg/OpenStack_Project_Cyborg_vertical.png\nsources:\n  - https://opendev.org/openstack/cyborg\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "cyborg/templates/bin/_cyborg-agent.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\n\nexec cyborg-agent \\\n      --config-file /etc/cyborg/cyborg.conf \\\n      --log-config-append  /tmp/logging-cyborg.conf\n"
  },
  {
    "path": "cyborg/templates/bin/_cyborg-api.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\n\nfunction start () {\n  exec cyborg-api \\\n        --config-file /etc/cyborg/cyborg.conf \\\n        --log-config-append  /tmp/logging-cyborg.conf \\\n        ${OPTIONS}\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "cyborg/templates/bin/_cyborg-conductor.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\n\nexec cyborg-conductor \\\n      --config-file /etc/cyborg/cyborg.conf \\\n      --log-config-append  /tmp/logging-cyborg.conf\n"
  },
  {
    "path": "cyborg/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\ncyborg-dbsync --config-file /etc/cyborg/cyborg.conf upgrade\n"
  },
  {
    "path": "cyborg/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: cyborg-bin\ndata:\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  cyborg-api.sh: |\n{{ tuple \"bin/_cyborg-api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  cyborg-conductor.sh: |\n{{ tuple \"bin/_cyborg-conductor.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  cyborg-agent.sh: |\n{{ tuple \"bin/_cyborg-agent.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "cyborg/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"cyborg.etc\" }}\n{{- $configMapName := index . 0 }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n\n{{- if empty .Values.conf.cyborg.keystone_authtoken.auth_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.cyborg.keystone_authtoken \"auth_uri\" -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.cyborg.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.keystone_authtoken.region_name -}}\n{{- $_ := set .Values.conf.cyborg.keystone_authtoken \"region_name\" .Values.endpoints.identity.auth.cyborg.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.keystone_authtoken.project_name -}}\n{{- $_ := set .Values.conf.cyborg.keystone_authtoken \"password\" .Values.endpoints.identity.auth.cyborg.password -}}\n{{- $_ := set .Values.conf.cyborg.keystone_authtoken \"project_name\" .Values.endpoints.identity.auth.cyborg.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.keystone_authtoken.project_domain_name -}}\n{{- $_ := set .Values.conf.cyborg.keystone_authtoken \"password\" .Values.endpoints.identity.auth.cyborg.password -}}\n{{- $_ := set .Values.conf.cyborg.keystone_authtoken \"project_domain_name\" .Values.endpoints.identity.auth.cyborg.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.keystone_authtoken.user_domain_name -}}\n{{- $_ := set .Values.conf.cyborg.keystone_authtoken \"password\" .Values.endpoints.identity.auth.cyborg.password -}}\n{{- $_ := set .Values.conf.cyborg.keystone_authtoken \"user_domain_name\" .Values.endpoints.identity.auth.cyborg.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.keystone_authtoken.username -}}\n{{- $_ := set .Values.conf.cyborg.keystone_authtoken \"password\" .Values.endpoints.identity.auth.cyborg.password -}}\n{{- $_ := set .Values.conf.cyborg.keystone_authtoken \"username\" .Values.endpoints.identity.auth.cyborg.username -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.keystone_authtoken.password -}}\n{{- $_ := set .Values.conf.cyborg.keystone_authtoken \"password\" .Values.endpoints.identity.auth.cyborg.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.cyborg.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.cyborg.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.cyborg.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.cyborg.database.connection)) (empty .Values.conf.cyborg.database.connection) -}}\n{{- $_ := tuple \"oslo_db\" \"internal\" \"cyborg\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\"| set .Values.conf.cyborg.database \"connection\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.cyborg.DEFAULT.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"cyborg\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.cyborg.DEFAULT \"transport_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.cyborg.placement.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.cyborg.placement \"auth_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.placement.project_name -}}\n{{- $_ := set .Values.conf.cyborg.placement \"project_name\" .Values.endpoints.identity.auth.placement.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.placement.project_domain_name -}}\n{{- $_ := set .Values.conf.cyborg.placement \"project_domain_name\" .Values.endpoints.identity.auth.placement.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.placement.user_domain_name -}}\n{{- $_ := set .Values.conf.cyborg.placement \"user_domain_name\" .Values.endpoints.identity.auth.placement.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.placement.username -}}\n{{- $_ := set .Values.conf.cyborg.placement \"username\" .Values.endpoints.identity.auth.placement.username -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.placement.password -}}\n{{- $_ := set .Values.conf.cyborg.placement \"password\" .Values.endpoints.identity.auth.placement.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.cyborg.nova.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.cyborg.nova \"auth_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.nova.project_name -}}\n{{- $_ := set .Values.conf.cyborg.nova \"project_name\" .Values.endpoints.identity.auth.nova.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.nova.project_domain_name -}}\n{{- $_ := set .Values.conf.cyborg.nova \"project_domain_name\" .Values.endpoints.identity.auth.nova.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.nova.user_domain_name -}}\n{{- $_ := set .Values.conf.cyborg.nova \"user_domain_name\" .Values.endpoints.identity.auth.nova.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.nova.username -}}\n{{- $_ := set .Values.conf.cyborg.nova \"username\" .Values.endpoints.identity.auth.nova.username -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.nova.password -}}\n{{- $_ := set .Values.conf.cyborg.nova \"password\" .Values.endpoints.identity.auth.nova.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.cyborg.service_catalog.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.cyborg.service_catalog \"auth_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.service_catalog.project_name -}}\n{{- $_ := set .Values.conf.cyborg.service_catalog \"project_name\" .Values.endpoints.identity.auth.cyborg.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.service_catalog.project_domain_name -}}\n{{- $_ := set .Values.conf.cyborg.service_catalog \"project_domain_name\" .Values.endpoints.identity.auth.cyborg.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.service_catalog.user_domain_name -}}\n{{- $_ := set .Values.conf.cyborg.service_catalog \"user_domain_name\" .Values.endpoints.identity.auth.cyborg.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.service_catalog.username -}}\n{{- $_ := set .Values.conf.cyborg.service_catalog \"username\" .Values.endpoints.identity.auth.cyborg.username -}}\n{{- end -}}\n{{- if empty .Values.conf.cyborg.service_catalog.password -}}\n{{- $_ := set .Values.conf.cyborg.service_catalog \"password\" .Values.endpoints.identity.auth.cyborg.password -}}\n{{- end -}}\n\n\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $configMapName }}\ntype: Opaque\ndata:\n  cyborg.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.cyborg | b64enc }}\n  policy.yaml: {{ toYaml .Values.conf.policy | b64enc }}\n  api-paste.ini: {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.paste | b64enc }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- list \"cyborg-etc\" . | include \"cyborg.etc\" }}\n{{- end }}\n"
  },
  {
    "path": "cyborg/templates/daemonset-agent.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"cyborg.agent.daemonset\" }}\n{{- $daemonset := index . 0 }}\n{{- $configMapName := index . 1 }}\n{{- $serviceAccountName := index . 2 }}\n{{- $envAll := index . 3 }}\n{{- with $envAll }}\n\n{{- $mounts_cyborg := .Values.pod.mounts.cyborg_agent.cyborg_agent }}\n{{- $mounts_cyborg_agent_init := .Values.pod.mounts.cyborg_agent.init_container }}\n\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: cyborg-agent\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll $daemonset | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"cyborg_agent\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      nodeSelector:\n        {{ .Values.labels.agent.node_selector_key }}: {{ .Values.labels.agent.node_selector_value }}\n      hostNetwork: true\n      hostPID: true\n      hostIPC: true\n      dnsPolicy: ClusterFirstWithHostNet\n      initContainers:\n{{ tuple $envAll \"cyborg\" $mounts_cyborg_agent_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: cyborg-agent\n{{ tuple $envAll \"cyborg_agent\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          securityContext:\n            runAsUser: 0\n            privileged: true\n          env:\n            - name: HOST_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.hostIP\n          command:\n            - /tmp/cyborg-agent.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.cyborg.oslo_concurrency.lock_path }}\n            - name: cyborg-bin\n              mountPath: /tmp/cyborg-agent.sh\n              subPath: cyborg-agent.sh\n              readOnly: true\n            - name: cyborg-etc\n              mountPath: /etc/cyborg/cyborg.conf\n              subPath: cyborg.conf\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-shared\n          emptyDir: {}\n        - name: cyborg-bin\n          configMap:\n            name: cyborg-bin\n            defaultMode: 0555\n        - name: cyborg-etc\n          secret:\n            secretName: cyborg-etc\n            defaultMode: 0444\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.daemonset_agent }}\n{{- $envAll := . }}\n{{- $daemonset := \"agent\" }}\n{{- $configMapName := \"cyborg-etc\" }}\n{{- $serviceAccountName := \"cyborg-agent\" }}\n\n{{- $dependencyOpts := dict \"envAll\" $envAll \"dependencyMixinParam\" $envAll.Values.network.backend \"dependencyKey\" \"agent\" -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_resolver\" $dependencyOpts | toString | fromYaml }}\n\n{{ tuple $envAll \"pod_dependency\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include \"cyborg.agent.daemonset\" | toString | fromYaml }}\n{{- $configmap_yaml := \"cyborg.etc\" }}\n{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include \"helm-toolkit.utils.daemonset_overrides\" }}\n{{- end }}\n"
  },
  {
    "path": "cyborg/templates/deployment-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_api }}\n{{- $envAll := . }}\n\n{{- $mounts_cyborg_api := .Values.pod.mounts.cyborg_api.cyborg_api }}\n{{- $mounts_cyborg_api_init := .Values.pod.mounts.cyborg_api.init_container }}\n\n{{- $serviceAccountName := \"cyborg-api\" }}\n{{ tuple $envAll \"api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: cyborg-api\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"cyborg\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"cyborg\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"cyborg\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"cyborg_api\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"cyborg-api\" \"containerNames\" (list \"cyborg\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"cyborg\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"cyborg\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.api.node_selector_key }}: {{ .Values.labels.api.node_selector_value }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.api.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"api\" $mounts_cyborg_api_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: cyborg\n{{ tuple $envAll \"cyborg_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cyborg\" \"container\" \"cyborg\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n          - name: HOST_IP\n            valueFrom:\n              fieldRef:\n                apiVersion: v1\n                fieldPath: status.hostIP\n          command:\n            - /tmp/cyborg-api.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/cyborg-api.sh\n                  - stop\n          ports:\n            - name: cyborg-api\n              containerPort: {{ tuple \"accelerator\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            tcpSocket:\n              port: {{ tuple \"accelerator\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          livenessProbe:\n            tcpSocket:\n              port: {{ tuple \"accelerator\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.cyborg.oslo_concurrency.lock_path }}\n            - name: pod-var-cyborg\n              mountPath: /var/lib/cyborg\n            - name: cyborg-bin\n              mountPath: /tmp/cyborg-api.sh\n              subPath: cyborg-api.sh\n              readOnly: true\n            - name: cyborg-etc\n              mountPath: /etc/cyborg/cyborg.conf\n              subPath: cyborg.conf\n              readOnly: true\n            - name: cyborg-etc\n              mountPath: /etc/cyborg/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: cyborg-etc\n              mountPath: /etc/cyborg/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n{{ if $mounts_cyborg_api.volumeMounts }}{{ toYaml $mounts_cyborg_api.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-var-cyborg\n          emptyDir: {}\n        - name: cyborg-bin\n          configMap:\n            name: cyborg-bin\n            defaultMode: 0555\n        - name: cyborg-etc\n          secret:\n            secretName: cyborg-etc\n            defaultMode: 0444\n{{ if $mounts_cyborg_api.volumes}}{{ toYaml $mounts_cyborg_api.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "cyborg/templates/deployment-conductor.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_conductor }}\n{{- $envAll := . }}\n\n{{- $mounts_cyborg_conductor := .Values.pod.mounts.cyborg_conductor.cyborg_conductor }}\n{{- $mounts_cyborg_conductor_init := .Values.pod.mounts.cyborg_conductor.init_container }}\n\n{{- $serviceAccountName := \"cyborg-conductor\" }}\n{{ tuple $envAll \"conductor\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: cyborg-conductor\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"cyborg\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.conductor }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"cyborg\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"cyborg\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"cyborg_conductor\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"cyborg-conductor\" \"containerNames\" (list \"cyborg-conductor\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"cyborg\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"cyborg\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.conductor.node_selector_key }}: {{ .Values.labels.conductor.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"conductor\" $mounts_cyborg_conductor_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: cyborg-conductor\n{{ tuple $envAll \"cyborg_conductor\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.conductor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cyborg\" \"container\" \"cyborg_conductor\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: HOST_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.hostIP\n          command:\n            - /tmp/cyborg-conductor.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.cyborg.oslo_concurrency.lock_path }}\n            - name: cyborg-bin\n              mountPath: /tmp/cyborg-conductor.sh\n              subPath: cyborg-conductor.sh\n              readOnly: true\n            - name: cyborg-etc\n              mountPath: /etc/cyborg/cyborg.conf\n              subPath: cyborg.conf\n              readOnly: true\n            - name: cyborg-etc\n              mountPath: /etc/cyborg/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n{{ if $mounts_cyborg_conductor.volumeMounts }}{{ toYaml $mounts_cyborg_conductor.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: cyborg-bin\n          configMap:\n            name: cyborg-bin\n            defaultMode: 0555\n        - name: cyborg-etc\n          secret:\n            secretName: cyborg-etc\n            defaultMode: 0444\n{{ if $mounts_cyborg_conductor.volumes }}{{ toYaml $mounts_cyborg_conductor.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "cyborg/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "cyborg/templates/ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_api .Values.network.api.ingress.public }}\n{{- $ingressOpts := dict \"envAll\" . \"backendServiceType\" \"accelerator\" \"backendPort\" \"cyborg-api\" -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "cyborg/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" \"cyborg\" -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "cyborg/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"cyborg\" -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "cyborg/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"cyborg\" -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "cyborg/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"cyborg\" \"serviceTypes\" ( tuple \"accelerator\" ) -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "cyborg/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"cyborg\" \"serviceTypes\" ( tuple \"accelerator\" ) \"configMapBin\" \"cyborg-bin\" -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "cyborg/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"cyborg\" -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "cyborg/templates/job-rabbit-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rmqUserJob := dict \"envAll\" . \"serviceName\" \"cyborg\" -}}\n{{ $rmqUserJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "cyborg/templates/network_policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"cyborg\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "cyborg/templates/pdb-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_api }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: cyborg-api\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.api.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"cyborg\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "cyborg/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"cyborg\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n{{- $connection := tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- if $envAll.Values.manifests.certificates }}\n  DB_CONNECTION: {{ (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | b64enc -}}\n{{- else }}\n  DB_CONNECTION: {{  $connection | b64enc -}}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "cyborg/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"cyborg\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "cyborg/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"cyborg\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_messaging\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass \"http\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "cyborg/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "cyborg/templates/service-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_api }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"accelerator\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: cyborg-api\n      port: {{ tuple \"accelerator\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n      {{ if .Values.network.api.node_port.enabled }}\n      nodePort: {{ .Values.network.api.node_port.port }}\n      {{ end }}\n  selector:\n{{ tuple $envAll \"cyborg\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.api.node_port.enabled }}\n  type: NodePort\n  {{ end }}\n{{- end }}\n\n\n"
  },
  {
    "path": "cyborg/templates/service-ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_api .Values.network.api.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"accelerator\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "cyborg/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    cyborg_db_sync: quay.io/airshipit/cyborg:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    cyborg_api: quay.io/airshipit/cyborg:2025.1-ubuntu_noble\n    cyborg_conductor: quay.io/airshipit/cyborg:2025.1-ubuntu_noble\n    cyborg_agent: quay.io/airshipit/cyborg:2025.1-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    dep_check: 'quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy'\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  conductor:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  agent:\n    node_selector_key: openstack-compute-node\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      cyborg:\n        username: cyborg\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  accelerator:\n    name: cyborg\n    hosts:\n      default: cyborg-api\n      admin: cyborg\n      public: cyborg\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v2\n    scheme:\n      default: http\n    port:\n      api:\n        default: 6666\n        admin: 80\n        public: 80\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n        secret:\n          tls:\n            internal: mariadb-tls-direct\n      cyborg:\n        username: cyborg\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /cyborg\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      cyborg:\n        role: admin\n        region_name: RegionOne\n        username: cyborg\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      placement:\n        role: admin\n        region_name: RegionOne\n        username: placement\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      nova:\n        role: admin\n        region_name: RegionOne\n        username: nova\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      test:\n        role: admin\n        region_name: RegionOne\n        username: neutron-test\n        password: password\n        project_name: test\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  oslo_messaging:\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n        secret:\n          tls:\n            internal: rabbitmq-tls-direct\n      cyborg:\n        username: cyborg\n        password: password\n    statefulset:\n      replicas: 2\n      name: rabbitmq-rabbitmq\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /cyborg\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n  oslo_cache:\n    auth:\n      # NOTE(portdirect): this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  fluentd:\n    namespace: null\n    name: fluentd\n    hosts:\n      default: fluentd-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: 'http'\n    port:\n      service:\n        default: 24224\n      metrics:\n        default: 24220\n  # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress\n  # They are using to enable the Egress K8s network policy.\n  kube_dns:\n    namespace: kube-system\n    name: kubernetes-dns\n    hosts:\n      default: kube-dns\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: http\n    port:\n      dns:\n        default: 53\n        protocol: UDP\n  ingress:\n    namespace: null\n    name: ingress\n    hosts:\n      default: ingress\n    port:\n      ingress:\n        default: 80\n\nsecrets:\n  identity:\n    admin: cyborg-keystone-admin\n    cyborg: cyborg-keystone-user\n    test: cyborg-keystone-test\n  oslo_db:\n    admin: cyborg-db-admin\n    cyborg: cyborg-db-user\n  oslo_messaging:\n    admin: cyborg-rabbitmq-admin\n    cyborg: cyborg-rabbitmq-user\n  oci_image_registry:\n    cyborg: cyborg-oci-image-registry\n\ndependencies:\n  static:\n    api:\n      jobs:\n        - cyborg-db-sync\n        - cyborg-ks-user\n        - cyborg-ks-endpoints\n        - cyborg-ks-service\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_messaging\n    conductor:\n      jobs:\n        - cyborg-db-sync\n        - cyborg-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n    agent:\n      jobs:\n        - cyborg-db-sync\n        - cyborg-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: placement\n    db_drop:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - cyborg-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    ks_endpoints:\n      jobs:\n        - cyborg-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n\npod:\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n        conductor: requiredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  mounts:\n    cyborg_api:\n      init_container: null\n      cyborg_api:\n        volumeMounts:\n        volumes:\n    cyborg_conductor:\n      init_container: null\n      cyborg_conductor:\n        volumeMounts:\n        volumes:\n    cyborg_agent:\n      init_container: null\n      cyborg_agent:\n        volumeMounts:\n        volumes:\n    cyborg_db_sync:\n      cyborg_db_sync:\n        volumeMounts:\n          - name: db-sync-sh\n            mountPath: /tmp/env.py\n            subPath: env.py\n            readOnly: true\n        volumes:\n  replicas:\n    api: 3\n    conductor: 3\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        cyborg:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n    disruption_budget:\n      api:\n        min_available: 0\n    termination_grace_period:\n      api:\n        timeout: 30\n  resources:\n    enabled: true\n    api:\n      requests:\n        memory: \"128Mi\"\n      limits:\n        memory: \"1024Mi\"\n    conductor:\n      requests:\n        memory: \"128Mi\"\n      limits:\n        memory: \"1024Mi\"\n    agent:\n      requests:\n        memory: \"128Mi\"\n      limits:\n        memory: \"1024Mi\"\n    jobs:\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rabbit_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nconf:\n  paste:\n    pipeline:main:\n      pipeline: cors request_id authtoken api_v2\n    app:api_v2:\n      paste.app_factory: cyborg.api.app:app_factory\n    filter:authtoken:\n      acl_public_routes: /, /v2\n      paste.filter_factory: cyborg.api.middleware.auth_token:AuthTokenMiddleware.factory\n    filter:osprofiler:\n      paste.filter_factory: cyborg.common.profiler:WsgiMiddleware.factory\n    filter:request_id:\n      paste.filter_factory: oslo_middleware:RequestId.factory\n    filter:cors:\n      paste.filter_factory: oslo_middleware.cors:filter_factory\n      oslo_config_project: cyborg\n\n  policy: {}\n  cyborg:\n    DEFAULT:\n      use_syslog: false\n      state_path: /var/lib/cyborg\n      debug: true\n    api:\n      host_ip: 0.0.0.0\n      api_workers: 3\n    database:\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    service_catalog:\n      auth_type: password\n    oslo_messaging_rabbit:\n      rabbit_ha_queues: true\n      amqp_durable_queues: true\n    oslo_concurrency:\n      lock_path: /var/lock\n    placement:\n      auth_type: password\n    nova:\n      auth_type: password\n    keystone_authtoken:\n      auth_type: password\n      endpoint_type: internal\n      www_authenticate_uri: null\n      service_type: accelerator\n    agent:\n      enabled_drivers:\n        - nvidia_gpu_driver\n    gpu_devices:\n      enabled_vgpu_types: []\n    cyborg_sys_admin:\n      helper_command: /var/lib/openstack/bin/privsep-helper\n  rabbitmq:\n    policies:\n      - vhost: \"cyborg\"\n        name: \"ha_ttl_cyborg\"\n        definition:\n          ha-mode: \"all\"\n          ha-sync-mode: \"automatic\"\n          message-ttl: 70000\n        priority: 0\n        apply-to: all\n        pattern: '^(?!(amq\\.|reply_)).*'\n\nnetwork:\n  api:\n    port: 6666\n    istio:\n      public: true\n    ingress:\n      public: false\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    node_port:\n      enabled: false\n      port: 30666\n\nmanifests:\n  certificates: false\n  configmap_bin: true\n  configmap_etc: true\n  daemonset_agent: true\n  deployment_api: true\n  deployment_conductor: true\n  ingress_api: true\n  job_db_drop: false\n  job_db_init: true\n  job_db_sync: true\n  job_image_repo_sync: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  job_rabbit_init: true\n  pdb_api: true\n  network_policy: false\n  secret_db: true\n  secret_keystone: true\n  secret_rabbitmq: true\n  secret_registry: true\n  service_ingress_api: false\n  service_api: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "designate/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Designate\nname: designate\nversion: 2025.2.0\nhome: https://docs.openstack.org/designate/latest/\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Designate/OpenStack_Project_Designate_vertical.jpg\nsources:\n  - https://opendev.org/openstack/designate\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "designate/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -ex\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "designate/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -ex\n\ndesignate-manage database sync\n\n"
  },
  {
    "path": "designate/templates/bin/_designate-api.sh.tpl",
    "content": "#!/bin/bash\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec uwsgi --ini /etc/designate/designate-api-uwsgi.ini\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "designate/templates/bin/_designate-central.sh.tpl",
    "content": "#!/bin/bash\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec designate-central \\\n      --config-file /etc/designate/designate.conf \\\n      --config-dir /etc/designate/designate.conf.d\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "designate/templates/bin/_designate-mdns.sh.tpl",
    "content": "#!/bin/bash\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  designate-mdns \\\n      --config-file /etc/designate/designate.conf \\\n      --config-dir /etc/designate/designate.conf.d\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "designate/templates/bin/_designate-producer.sh.tpl",
    "content": "#!/bin/bash\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  designate-producer \\\n      --config-file /etc/designate/designate.conf \\\n      --config-dir /etc/designate/designate.conf.d\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "designate/templates/bin/_designate-service-cleaner.sh.tpl",
    "content": "#!/bin/bash\n# Copyright (c) 2025 VEXXHOST, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nset -ex\n\ndesignate-manage \\\n  --config-file /etc/designate/designate.conf \\\n  --config-dir /etc/designate/designate.conf.d \\\n  service clean\n"
  },
  {
    "path": "designate/templates/bin/_designate-sink.sh.tpl",
    "content": "#!/bin/bash\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -ex\nCOMMAND=\"${@:-start}\"\n  exec designate-sink \\\n      --config-file /etc/designate/designate.conf \\\n      --config-dir /etc/designate/designate.conf.d\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "designate/templates/bin/_designate-worker.sh.tpl",
    "content": "#!/bin/bash\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  designate-worker \\\n      --config-file /etc/designate/designate.conf \\\n      --config-dir /etc/designate/designate.conf.d\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "designate/templates/configmap-bin.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n{{- if .Values.manifests.configmap_bin }}\n\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: designate-bin\ndata:\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |+\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  ks-service.sh: |+\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |+\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |+\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  designate-api.sh: |\n{{ tuple \"bin/_designate-api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  designate-central.sh: |\n{{ tuple \"bin/_designate-central.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  designate-mdns.sh: |\n{{ tuple \"bin/_designate-mdns.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  designate-worker.sh: |\n{{ tuple \"bin/_designate-worker.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  designate-producer.sh: |\n{{ tuple \"bin/_designate-producer.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  designate-sink.sh: |\n{{ tuple \"bin/_designate-sink.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  designate-service-cleaner.sh: |\n{{ tuple \"bin/_designate-service-cleaner.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" . | indent 4 }}\n\n{{- end }}\n"
  },
  {
    "path": "designate/templates/configmap-etc.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n{{- if .Values.manifests.configmap_etc }}\n\n{{- $envAll := . }}\n\n{{- if empty .Values.conf.designate.keystone_authtoken.auth_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.designate.keystone_authtoken \"auth_uri\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.designate.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.designate.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.designate.keystone_authtoken.region_name -}}\n{{- $_ := set .Values.conf.designate.keystone_authtoken \"region_name\" .Values.endpoints.identity.auth.designate.region_name -}}\n{{- end -}}\n\n{{- if empty .Values.conf.designate.keystone_authtoken.project_name -}}\n{{- $_ := set .Values.conf.designate.keystone_authtoken \"project_name\" .Values.endpoints.identity.auth.designate.project_name -}}\n{{- end -}}\n\n{{- if empty .Values.conf.designate.keystone_authtoken.project_domain_name -}}\n{{- $_ := set .Values.conf.designate.keystone_authtoken \"project_domain_name\" .Values.endpoints.identity.auth.designate.project_domain_name -}}\n{{- end -}}\n\n{{- if empty .Values.conf.designate.keystone_authtoken.user_domain_name -}}\n{{- $_ := set .Values.conf.designate.keystone_authtoken \"user_domain_name\" .Values.endpoints.identity.auth.designate.user_domain_name -}}\n{{- end -}}\n\n{{- if empty .Values.conf.designate.keystone_authtoken.username -}}\n{{- $_ := set .Values.conf.designate.keystone_authtoken \"username\" .Values.endpoints.identity.auth.designate.username -}}\n{{- end -}}\n\n{{- if empty .Values.conf.designate.keystone_authtoken.password -}}\n{{- $_ := set .Values.conf.designate.keystone_authtoken \"password\" .Values.endpoints.identity.auth.designate.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.designate.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.designate.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.designate.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.designate.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" (index .Values.conf.designate \"storage:sqlalchemy\").connection)) (empty (index .Values.conf.designate \"storage:sqlalchemy\").connection) -}}\n{{- $_ := tuple \"oslo_db\" \"internal\" \"designate\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | set (index .Values.conf.designate \"storage:sqlalchemy\") \"connection\" -}}\n{{- $_ := tuple \"oslo_db\" \"internal\" \"designate\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | set .Values.conf.designate.database \"connection\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.designate.DEFAULT.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"designate\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.designate.DEFAULT \"transport_url\" -}}\n{{- end -}}\n\n{{- if empty (index .Values.conf.designate \"service:api\").api_base_uri -}}\n{{- $_ := tuple \"dns\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set (index .Values.conf.designate \"service:api\") \"api_base_uri\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.designate_api_uwsgi.uwsgi.processes -}}\n{{- $_ := set .Values.conf.designate_api_uwsgi.uwsgi \"processes\" (index .Values.conf.designate \"service:api\").workers -}}\n{{- end -}}\n{{- if empty (index .Values.conf.designate_api_uwsgi.uwsgi \"http-socket\") -}}\n{{- $http_socket_port := tuple \"dns\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | toString }}\n{{- $http_socket := printf \"0.0.0.0:%s\" $http_socket_port }}\n{{- $_ := set .Values.conf.designate_api_uwsgi.uwsgi \"http-socket\" $http_socket -}}\n{{- end -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: designate-etc\ntype: Opaque\ndata:\n  designate.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.designate | b64enc }}\n  designate-api-uwsgi.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.designate_api_uwsgi | b64enc }}\n  api-paste.ini: {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.paste | b64enc }}\n  policy.yaml: {{ toYaml .Values.conf.policy | b64enc }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.logging | b64enc }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.pools \"key\" \"pools.yaml\" \"format\" \"Secret\" ) | indent 2 }}\n\n{{- end }}\n"
  },
  {
    "path": "designate/templates/cron-job-service.cleaner.yaml",
    "content": "# Copyright (c) 2025 VEXXHOST, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\n{{- if .Values.manifests.cron_job_service_cleaner }}\n{{- $envAll := . }}\n\n{{- $mounts_designate_service_cleaner := .Values.pod.mounts.designate_service_cleaner.designate_service_cleaner }}\n{{- $mounts_designate_service_cleaner_init := .Values.pod.mounts.designate_service_cleaner.init_container }}\n{{- $etcSources := .Values.pod.etcSources.designate_service_cleaner }}\n\n{{- $serviceAccountName := \"designate-service-cleaner\" }}\n{{ tuple $envAll \"service_cleaner\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: designate-service-cleaner\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  schedule: {{ .Values.jobs.service_cleaner.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.service_cleaner.history.success }}\n  failedJobsHistoryLimit: {{ .Values.jobs.service_cleaner.history.failed }}\n  concurrencyPolicy: Forbid\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"designate\" \"service-cleaner\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"designate-service-cleaner\" \"containerNames\" (list \"init\" \"designate-service-cleaner\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"designate\" \"service-cleaner\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n          annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 12 }}\n{{ dict \"envAll\" $envAll \"podName\" \"designate-service-cleaner\" \"containerNames\" (list \"init\" \"designate-service-cleaner\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n        spec:\n{{ tuple \"designate_service_cleaner\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"designate_service_cleaner\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n          serviceAccountName: {{ $serviceAccountName }}\n          restartPolicy: OnFailure\n          nodeSelector:\n            {{ .Values.labels.service_cleaner.node_selector_key }}: {{ .Values.labels.service_cleaner.node_selector_value }}\n          initContainers:\n{{ tuple $envAll \"service_cleaner\" $mounts_designate_service_cleaner_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 12 }}\n          containers:\n            - name: designate-service-cleaner\n{{ tuple $envAll \"designate_service_cleaner\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.service_cleaner | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n              command:\n                - /tmp/designate-service-cleaner.sh\n              volumeMounts:\n                - name: designate-bin\n                  mountPath: /tmp/designate-service-cleaner.sh\n                  subPath: designate-service-cleaner.sh\n                  readOnly: true\n                - name: pod-etc-designate\n                  mountPath: /etc/designate\n                - name: pod-var-cache-designate\n                  mountPath: /var/cache/designate\n                - name: designate-etc\n                  mountPath: /etc/designate/designate.conf\n                  subPath: designate.conf\n                  readOnly: true\n                - name: designate-etc-snippets\n                  mountPath: /etc/designate/designate.conf.d/\n                  readOnly: true\n                - name: designate-etc\n                  mountPath: /etc/designate/api-paste.ini\n                  subPath: api-paste.ini\n                  readOnly: true\n                - name: designate-etc\n                  mountPath: /etc/designate/policy.yaml\n                  subPath: policy.yaml\n                  readOnly: true\n                {{- if .Values.conf.designate.DEFAULT.log_config_append }}\n                - name: designate-etc\n                  mountPath: {{ .Values.conf.designate.DEFAULT.log_config_append }}\n                  subPath: {{ base .Values.conf.designate.DEFAULT.log_config_append }}\n                  readOnly: true\n                {{- end }}\n{{ if $mounts_designate_service_cleaner.volumeMounts }}{{ toYaml $mounts_designate_service_cleaner.volumeMounts | indent 16 }}{{ end }}\n          volumes:\n            - name: pod-etc-designate\n              emptyDir: {}\n            - name: pod-var-cache-designate\n              emptyDir: {}\n            - name: designate-bin\n              configMap:\n                name: designate-bin\n                defaultMode: 0555\n            - name: designate-etc\n              secret:\n                secretName: designate-etc\n                defaultMode: 0444\n            - name: service-etc-snippets\n{{- if $etcSources }}\n              projected:\n                sources:\n{{ toYaml $etcSources | indent 18 }}\n{{- else }}\n              emptyDir: {}\n{{ end }}\n{{ if $mounts_designate_service_cleaner.volumes }}{{ toYaml $mounts_designate_service_cleaner.volumes | indent 12 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "designate/templates/deployment-api.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n{{- if .Values.manifests.deployment_api }}\n\n{{- $envAll := . }}\n{{- $mounts_designate_api := .Values.pod.mounts.designate_api.designate_api }}\n{{- $mounts_designate_api_init := .Values.pod.mounts.designate_api.init_container }}\n{{- $etcSources := .Values.pod.etcSources.designate_api }}\n\n{{- $serviceAccountName := \"designate-api\" }}\n{{ tuple $envAll \"api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: designate-api\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"designate\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"designate\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"designate\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"designate_api\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ tuple \"designate_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"designate_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"designate\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"designate\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.api.node_selector_key }}: {{ .Values.labels.api.node_selector_value }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.api.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"api\" $mounts_designate_api_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: designate-api\n{{ tuple $envAll \"designate_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"designate\" \"container\" \"designate_api\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/designate-api.sh\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/designate-api.sh\n                  - stop\n          ports:\n            - name: dns-api\n              containerPort: {{ tuple \"dns\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            httpGet:\n              scheme: {{ tuple \"dns\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n              path: /\n              port: {{ tuple \"dns\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          volumeMounts:\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.designate.oslo_concurrency.lock_path }}\n            - name: designate-bin\n              mountPath: /tmp/designate-api.sh\n              subPath: designate-api.sh\n              readOnly: true\n            - name: pod-etc-designate\n              mountPath: /etc/designate\n            - name: pod-var-cache-designate\n              mountPath: /var/cache/designate\n            - name: designate-etc\n              mountPath: /etc/designate/designate.conf\n              subPath: designate.conf\n              readOnly: true\n            - name: designate-etc-snippets\n              mountPath: /etc/designate/designate.conf.d/\n              readOnly: true\n            - name: designate-etc\n              mountPath: /etc/designate/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: designate-etc\n              mountPath: /etc/designate/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: designate-etc\n              mountPath: /etc/designate/designate-api-uwsgi.ini\n              subPath: designate-api-uwsgi.ini\n              readOnly: true\n            {{- if .Values.conf.designate.DEFAULT.log_config_append }}\n            - name: designate-etc\n              mountPath: {{ .Values.conf.designate.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.designate.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n{{ if $mounts_designate_api.volumeMounts }}{{ toYaml $mounts_designate_api.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-designate\n          emptyDir: {}\n        - name: pod-var-cache-designate\n          emptyDir: {}\n        - name: designate-bin\n          configMap:\n            name: designate-bin\n            defaultMode: 0555\n        - name: designate-etc\n          secret:\n            secretName: designate-etc\n            defaultMode: 0444\n        - name: designate-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{ if $mounts_designate_api.volumes }}{{ toYaml $mounts_designate_api.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "designate/templates/deployment-central.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n{{- if .Values.manifests.deployment_central }}\n\n{{- $envAll := . }}\n{{- $mounts_designate_central := .Values.pod.mounts.designate_central.designate_central }}\n{{- $mounts_designate_central_init := .Values.pod.mounts.designate_central.init_container }}\n{{- $etcSources := .Values.pod.etcSources.designate_central }}\n\n{{- $serviceAccountName := \"designate-central\" }}\n{{ tuple $envAll \"central\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: designate-central\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"designate\" \"central\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.central }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"designate\" \"central\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"designate\" \"central\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"designate_central\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ tuple \"designate_central\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"designate_central\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"designate\" \"central\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.central.node_selector_key }}: {{ .Values.labels.central.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"central\" $mounts_designate_central_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: designate-central\n{{ tuple $envAll \"designate_central\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.central | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"designate\" \"container\" \"designate_central\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - bash\n            - /tmp/designate-central.sh\n          volumeMounts:\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.designate.oslo_concurrency.lock_path }}\n            - name: designate-bin\n              mountPath: /tmp/designate-central.sh\n              subPath: designate-central.sh\n              readOnly: true\n            - name: pod-etc-designate\n              mountPath: /etc/designate\n            - name: pod-var-cache-designate\n              mountPath: /var/cache/designate\n            - name: designate-etc\n              mountPath: /etc/designate/designate.conf\n              subPath: designate.conf\n              readOnly: true\n            - name: designate-etc-snippets\n              mountPath: /etc/designate/designate.conf.d/\n              readOnly: true\n            - name: designate-etc\n              mountPath: /etc/designate/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: designate-etc\n              mountPath: /etc/designate/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            {{- if .Values.conf.designate.DEFAULT.log_config_append }}\n            - name: designate-etc\n              mountPath: {{ .Values.conf.designate.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.designate.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n{{ if $mounts_designate_central.volumeMounts }}{{ toYaml $mounts_designate_central.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-designate\n          emptyDir: {}\n        - name: pod-var-cache-designate\n          emptyDir: {}\n        - name: designate-bin\n          configMap:\n            name: designate-bin\n            defaultMode: 0555\n        - name: designate-etc\n          secret:\n            secretName: designate-etc\n            defaultMode: 0444\n        - name: designate-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{ if $mounts_designate_central.volumes }}{{ toYaml $mounts_designate_central.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "designate/templates/deployment-mdns.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n{{- if .Values.manifests.deployment_mdns }}\n\n{{- $envAll := . }}\n{{- $mounts_designate_mdns := .Values.pod.mounts.designate_mdns.designate_mdns }}\n{{- $mounts_designate_mdns_init := .Values.pod.mounts.designate_mdns.init_container }}\n{{- $etcSources := .Values.pod.etcSources.designate_mdns }}\n\n{{- $serviceAccountName := \"designate-mdns\" }}\n{{ tuple $envAll \"mdns\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: designate-mdns\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"designate\" \"mdns\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.mdns }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"designate\" \"mdns\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"designate\" \"mdns\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"designate_mdns\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ tuple \"designate_mdns\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"designate_mdns\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"designate\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"designate\" \"mdns\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.mdns.node_selector_key }}: {{ .Values.labels.mdns.node_selector_value }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.mdns.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"mdns\" $mounts_designate_mdns_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: designate-mdns\n{{ tuple $envAll \"designate_mdns\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.mdns | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"designate\" \"container\" \"designate_mdns\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          ports:\n            - name: d-mdns\n              containerPort: {{ tuple \"mdns\" \"internal\" \"ipc\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            - name: d-mdns-udp\n              containerPort: {{ tuple \"mdns\" \"internal\" \"ipc\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n              protocol: UDP\n          readinessProbe:\n            tcpSocket:\n              port: {{ tuple \"mdns\" \"internal\" \"ipc\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          command:\n            - bash\n            - /tmp/designate-mdns.sh\n          volumeMounts:\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.designate.oslo_concurrency.lock_path }}\n            - name: designate-bin\n              mountPath: /tmp/designate-mdns.sh\n              subPath: designate-mdns.sh\n              readOnly: true\n            - name: pod-etc-designate\n              mountPath: /etc/designate\n            - name: pod-var-cache-designate\n              mountPath: /var/cache/designate\n            - name: designate-etc\n              mountPath: /etc/designate/designate.conf\n              subPath: designate.conf\n              readOnly: true\n            - name: designate-etc-snippets\n              mountPath: /etc/designate/designate.conf.d/\n              readOnly: true\n            - name: designate-etc\n              mountPath: /etc/designate/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: designate-etc\n              mountPath: /etc/designate/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            {{- if .Values.conf.designate.DEFAULT.log_config_append }}\n            - name: designate-etc\n              mountPath: {{ .Values.conf.designate.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.designate.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n{{ if $mounts_designate_mdns.volumeMounts }}{{ toYaml $mounts_designate_mdns.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-designate\n          emptyDir: {}\n        - name: pod-var-cache-designate\n          emptyDir: {}\n        - name: designate-bin\n          configMap:\n            name: designate-bin\n            defaultMode: 0555\n        - name: designate-etc\n          secret:\n            secretName: designate-etc\n            defaultMode: 0444\n        - name: designate-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{ if $mounts_designate_mdns.volumes }}{{ toYaml $mounts_designate_mdns.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "designate/templates/deployment-producer.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n{{- if .Values.manifests.deployment_producer }}\n\n{{- $envAll := . }}\n{{- $mounts_designate_producer := .Values.pod.mounts.designate_producer.designate_producer }}\n{{- $mounts_designate_producer_init := .Values.pod.mounts.designate_producer.init_container }}\n{{- $etcSources := .Values.pod.etcSources.designate_producer }}\n\n{{- $serviceAccountName := \"designate-producer\" }}\n{{ tuple $envAll \"producer\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: designate-producer\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"designate\" \"producer\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.producer }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"designate\" \"producer\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"designate\" \"producer\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"designate_producer\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ tuple \"designate_producer\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"designate_producer\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"designate\" \"producer\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.producer.node_selector_key }}: {{ .Values.labels.producer.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"producer\" $mounts_designate_producer_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: designate-producer\n{{ tuple $envAll \"designate_producer\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.producer | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"designate\" \"container\" \"designate_producer\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - bash\n            - /tmp/designate-producer.sh\n          volumeMounts:\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.designate.oslo_concurrency.lock_path }}\n            - name: designate-bin\n              mountPath: /tmp/designate-producer.sh\n              subPath: designate-producer.sh\n              readOnly: true\n            - name: pod-etc-designate\n              mountPath: /etc/designate\n            - name: pod-var-cache-designate\n              mountPath: /var/cache/designate\n            - name: designate-etc\n              mountPath: /etc/designate/designate.conf\n              subPath: designate.conf\n              readOnly: true\n            - name: designate-etc-snippets\n              mountPath: /etc/designate/designate.conf.d/\n              readOnly: true\n            - name: designate-etc\n              mountPath: /etc/designate/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: designate-etc\n              mountPath: /etc/designate/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            {{- if .Values.conf.designate.DEFAULT.log_config_append }}\n            - name: designate-etc\n              mountPath: {{ .Values.conf.designate.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.designate.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n{{ if $mounts_designate_producer.volumeMounts }}{{ toYaml $mounts_designate_producer.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-designate\n          emptyDir: {}\n        - name: pod-var-cache-designate\n          emptyDir: {}\n        - name: designate-bin\n          configMap:\n            name: designate-bin\n            defaultMode: 0555\n        - name: designate-etc\n          secret:\n            secretName: designate-etc\n            defaultMode: 0444\n        - name: designate-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{ if $mounts_designate_producer.volumes }}{{ toYaml $mounts_designate_producer.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "designate/templates/deployment-sink.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n{{- if .Values.manifests.deployment_sink }}\n\n{{- $envAll := . }}\n{{- $mounts_designate_sink := .Values.pod.mounts.designate_sink.designate_sink }}\n{{- $mounts_designate_sink_init := .Values.pod.mounts.designate_sink.init_container }}\n{{- $etcSources := .Values.pod.etcSources.designate_sink }}\n\n{{- $serviceAccountName := \"designate-sink\" }}\n{{ tuple $envAll \"sink\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: designate-sink\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"designate\" \"sink\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.sink }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"designate\" \"sink\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"designate\" \"sink\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"designate_sink\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ tuple \"designate_sink\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"designate_sink\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"designate\" \"sink\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.sink.node_selector_key }}: {{ .Values.labels.sink.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"sink\" $mounts_designate_sink_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: designate-sink\n{{ tuple $envAll \"designate_sink\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.sink | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"designate\" \"container\" \"designate_sink\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - bash\n            - /tmp/designate-sink.sh\n          volumeMounts:\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.designate.oslo_concurrency.lock_path }}\n            - name: designate-bin\n              mountPath: /tmp/designate-sink.sh\n              subPath: designate-sink.sh\n              readOnly: true\n            - name: pod-etc-designate\n              mountPath: /etc/designate\n            - name: pod-var-cache-designate\n              mountPath: /var/cache/designate\n            - name: designate-etc\n              mountPath: /etc/designate/designate.conf\n              subPath: designate.conf\n              readOnly: true\n            - name: designate-etc-snippets\n              mountPath: /etc/designate/designate.conf.d/\n              readOnly: true\n            - name: designate-etc\n              mountPath: /etc/designate/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            {{- if .Values.conf.designate.DEFAULT.log_config_append }}\n            - name: designate-etc\n              mountPath: {{ .Values.conf.designate.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.designate.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n{{ if $mounts_designate_sink.volumeMounts }}{{ toYaml $mounts_designate_sink.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-designate\n          emptyDir: {}\n        - name: pod-var-cache-designate\n          emptyDir: {}\n        - name: designate-bin\n          configMap:\n            name: designate-bin\n            defaultMode: 0555\n        - name: designate-etc\n          secret:\n            secretName: designate-etc\n            defaultMode: 0444\n        - name: designate-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{ if $mounts_designate_sink.volumes }}{{ toYaml $mounts_designate_sink.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "designate/templates/deployment-worker.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n{{- if .Values.manifests.deployment_worker }}\n\n{{- $envAll := . }}\n{{- $mounts_designate_worker := .Values.pod.mounts.designate_worker.designate_worker }}\n{{- $mounts_designate_worker_init := .Values.pod.mounts.designate_worker.init_container }}\n{{- $etcSources := .Values.pod.etcSources.designate_worker }}\n\n{{- $serviceAccountName := \"designate-worker\" }}\n{{ tuple $envAll \"worker\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: designate-worker\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"designate\" \"worker\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.worker }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"designate\" \"worker\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"designate\" \"worker\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"designate_worker\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ tuple \"designate_worker\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"designate_worker\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"designate\" \"worker\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.worker.node_selector_key }}: {{ .Values.labels.worker.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"worker\" $mounts_designate_worker_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: designate-worker-init\n{{ tuple $envAll \"designate_worker\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.worker | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - bash\n            - -c\n            - 'eval \"echo \\\"$(cat /tmp/designate_pools.template)\\\"\" > /etc/designate/pools.yaml && designate-manage pool update'\n          volumeMounts:\n            - name: designate-etc\n              mountPath: /tmp/designate_pools.template\n              subPath: pools.yaml\n              readOnly: true\n            - name: pod-etc-designate\n              mountPath: /etc/designate\n            - name: designate-etc\n              mountPath: /etc/designate/designate.conf\n              subPath: designate.conf\n              readOnly: true\n            {{- if .Values.conf.designate.DEFAULT.log_config_append }}\n            - name: designate-etc\n              mountPath: {{ .Values.conf.designate.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.designate.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n{{ if $mounts_designate_worker.volumeMounts }}{{ toYaml $mounts_designate_worker.volumeMounts | indent 12 }}{{ end }}\n      containers:\n        - name: designate-worker\n{{ tuple $envAll \"designate_worker\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.worker | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"designate\" \"container\" \"designate_worker\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - bash\n            - /tmp/designate-worker.sh\n          volumeMounts:\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.designate.oslo_concurrency.lock_path }}\n            - name: designate-bin\n              mountPath: /tmp/designate-worker.sh\n              subPath: designate-worker.sh\n              readOnly: true\n            - name: pod-etc-designate\n              mountPath: /etc/designate\n            - name: pod-var-cache-designate\n              mountPath: /var/cache/designate\n            - name: designate-etc\n              mountPath: /etc/designate/designate.conf\n              subPath: designate.conf\n              readOnly: true\n            - name: designate-etc-snippets\n              mountPath: /etc/designate/designate.conf.d/\n              readOnly: true\n            - name: designate-etc\n              mountPath: /etc/designate/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: designate-etc\n              mountPath: /etc/designate/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            {{- if .Values.conf.designate.DEFAULT.log_config_append }}\n            - name: designate-etc\n              mountPath: {{ .Values.conf.designate.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.designate.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n{{ if $mounts_designate_worker.volumeMounts }}{{ toYaml $mounts_designate_worker.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-designate\n          emptyDir: {}\n        - name: pod-var-cache-designate\n          emptyDir: {}\n        - name: designate-bin\n          configMap:\n            name: designate-bin\n            defaultMode: 0555\n        - name: designate-etc\n          secret:\n            secretName: designate-etc\n            defaultMode: 0444\n        - name: designate-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{ if $mounts_designate_worker.volumes }}{{ toYaml $mounts_designate_worker.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "designate/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "designate/templates/ingress-api.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n{{- if .Values.manifests.ingress_api }}\n\n{{- $ingressOpts := dict \"envAll\" . \"backendServiceType\" \"dns\" \"backendPort\" \"dns-api\" -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n\n{{- end }}\n"
  },
  {
    "path": "designate/templates/job-bootstrap.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n{{- if .Values.manifests.job_bootstrap }}\n\n{{- $envAll := . }}\n{{- if .Values.bootstrap.enabled }}\n{{- $mounts_designate_bootstrap := .Values.pod.mounts.designate_bootstrap.designate_bootstrap }}\n{{- $mounts_designate_bootstrap_init := .Values.pod.mounts.designate_bootstrap.init_container }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: designate-bootstrap\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"designate\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"bootstrap\" $mounts_designate_bootstrap_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n      containers:\n        - name: designate-bootstrap\n          image: {{ .Values.images.tags.bootstrap }}\n          imagePullPolicy: {{ .Values.images.pull_policy }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n          command:\n            - /tmp/bootstrap.sh\n          volumeMounts:\n            - name: designate-bin\n              mountPath: /tmp/bootstrap.sh\n              subPath: bootstrap.sh\n              readOnly: true\n{{ if $mounts_designate_bootstrap.volumeMounts }}{{ toYaml $mounts_designate_bootstrap.volumeMounts | indent 10 }}{{ end }}\n      volumes:\n        - name: designate-bin\n          configMap:\n            name: designate-bin\n            defaultMode: 0555\n{{ if $mounts_designate_bootstrap.volumes }}{{ toYaml $mounts_designate_bootstrap.volumes | indent 6 }}{{ end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "designate/templates/job-db-init.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"designate\" \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n\n{{- end }}\n"
  },
  {
    "path": "designate/templates/job-db-sync.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- define \"metadata.annotations.job.db_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_sync }}\n\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"designate\" \"podVolMounts\" .Values.pod.mounts.designate_db_sync.designate_db_sync.volumeMounts \"podVols\" .Values.pod.mounts.designate_db_sync.designate_db_sync.volumes \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n\n{{- end }}\n"
  },
  {
    "path": "designate/templates/job-ks-endpoints.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- define \"metadata.annotations.job.ks_endpoints\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-2\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"designate\" \"serviceTypes\" ( tuple \"dns\" ) \"jobAnnotations\" (include \"metadata.annotations.job.ks_endpoints\" . | fromYaml) -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n\n{{- end }}\n"
  },
  {
    "path": "designate/templates/job-ks-service.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- define \"metadata.annotations.job.ks_service\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_service }}\n\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"designate\" \"serviceTypes\" ( tuple \"dns\" ) \"jobAnnotations\" (include \"metadata.annotations.job.ks_service\" . | fromYaml) -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n\n {{- end }}\n"
  },
  {
    "path": "designate/templates/job-ks-user.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# $% What does following represent?\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"designate\" \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n\n{{- end }}\n"
  },
  {
    "path": "designate/templates/job-rabbit-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.rabbit_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rmqUserJob := dict \"envAll\" . \"serviceName\" \"designate\" \"jobAnnotations\" (include \"metadata.annotations.job.rabbit_init\" . | fromYaml) -}}\n{{ $rmqUserJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "designate/templates/pdb-api.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n{{- if .Values.manifests.pdb_api }}\n\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: designate-api\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.api.min_available }}\n  selector:\n    matchLabels:\n      app: designate-api\n{{ tuple $envAll \"designate\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "designate/templates/pdb-central.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n{{- if .Values.manifests.pdb_central }}\n\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: designate-central\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.central.min_available }}\n  selector:\n    matchLabels:\n      app: designate-central\n{{- end }}\n"
  },
  {
    "path": "designate/templates/pdb-mdns.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n{{- if .Values.manifests.pdb_mdns }}\n\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: designate-mdns\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.mdns.min_available }}\n  selector:\n    matchLabels:\n      app: designate-mdns\n{{- end }}\n"
  },
  {
    "path": "designate/templates/pdb-producer.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n{{- if .Values.manifests.pdb_producer }}\n\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: designate-producer\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.producer.min_available }}\n  selector:\n    matchLabels:\n      app: designate-producer\n{{- end }}\n"
  },
  {
    "path": "designate/templates/pdb-sink.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n{{- if .Values.manifests.pdb_sink }}\n\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: designate-sink\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.sink.min_available }}\n  selector:\n    matchLabels:\n      app: designate-sink\n{{- end }}\n"
  },
  {
    "path": "designate/templates/pdb-worker.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n{{- if .Values.manifests.pdb_worker }}\n\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: designate-worker\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.worker.min_available }}\n  selector:\n    matchLabels:\n      app: designate-worker\n{{- end }}\n"
  },
  {
    "path": "designate/templates/secret-db.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n{{- if .Values.manifests.secret_db }}\n\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"designate\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  DB_CONNECTION: {{ tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "designate/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{- include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"dns\" ) }}\n{{- end }}\n"
  },
  {
    "path": "designate/templates/secret-keystone.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n{{- if .Values.manifests.secret_keystone }}\n\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"designate\" \"test\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "designate/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"designate\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_messaging\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass \"http\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "designate/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "designate/templates/service-api.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n{{- if .Values.manifests.service_api }}\n\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"dns\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: dns-api\n      port: {{ tuple \"dns\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"designate\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "designate/templates/service-ingress-api.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n{{- if .Values.manifests.service_ingress_api }}\n\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"dns\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n\n{{- end }}\n"
  },
  {
    "path": "designate/templates/service-mdns.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n{{- if .Values.manifests.service_mdns }}\n\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"mdns\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: d-mdns\n      port: {{ tuple \"mdns\" \"internal\" \"ipc\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    - name: d-mdns-udp\n      port: {{ tuple \"mdns\" \"internal\" \"ipc\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n      protocol: UDP\n  selector:\n{{ tuple $envAll \"designate\" \"mdns\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.mdns.node_port.enabled }}\n  type: NodePort\n  {{ if .Values.network.mdns.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{ end }}\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "designate/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for designate.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nrelease_group: null\n\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  central:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  producer:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  worker:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  mdns:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  service_cleaner:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  sink:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    designate_db_sync: quay.io/airshipit/designate:2025.1-ubuntu_noble\n    designate_api: quay.io/airshipit/designate:2025.1-ubuntu_noble\n    designate_central: quay.io/airshipit/designate:2025.1-ubuntu_noble\n    designate_mdns: quay.io/airshipit/designate:2025.1-ubuntu_noble\n    designate_worker: quay.io/airshipit/designate:2025.1-ubuntu_noble\n    designate_producer: quay.io/airshipit/designate:2025.1-ubuntu_noble\n    designate_sink: quay.io/airshipit/designate:2025.1-ubuntu_noble\n    designate_service_cleaner: quay.io/airshipit/designate:2025.1-ubuntu_noble\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\npod:\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n  mounts:\n    designate_api:\n      init_container: null\n      designate_api:\n        volumeMounts:\n        volumes:\n    designate_central:\n      init_container: null\n      designate_central:\n        volumeMounts:\n        volumes:\n    designate_mdns:\n      init_container: null\n      designate_mdns:\n        volumeMounts:\n        volumes:\n    designate_worker:\n      init_container: null\n      designate_worker:\n        volumeMounts:\n        volumes:\n    designate_producer:\n      init_container: null\n      designate_producer:\n        volumeMounts:\n        volumes:\n    designate_service_cleaner:\n      init_container: null\n      designate_service_cleaner:\n        volumeMounts:\n        volumes:\n    designate_sink:\n      init_container: null\n      designate_sink:\n        volumeMounts:\n        volumes:\n    designate_db_sync:\n      designate_db_sync:\n        volumeMounts:\n        volumes:\n  # -- This allows users to add Kubernetes Projected Volumes to be mounted at /etc/designate/designate.conf.d/\n  ## This is a list of projected volume source objects for each deployment/statefulset/daemonset/cronjob\n  ## https://kubernetes.io/docs/concepts/storage/projected-volumes/\n  etcSources:\n    designate_api: []\n    designate_central: []\n    designate_mdns: []\n    designate_worker: []\n    designate_producer: []\n    designate_sink: []\n    designate_service_cleaner: []\n    designate_db_sync: []\n  replicas:\n    api: 1\n    central: 1\n    mdns: 1\n    producer: 1\n    sink: 1\n    worker: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    disruption_budget:\n      api:\n        min_available: 0\n      central:\n        min_available: 0\n      mdns:\n        min_available: 0\n      worker:\n        min_available: 0\n      producer:\n        min_available: 0\n      sink:\n        min_available: 0\n    termination_grace_period:\n      api:\n        timeout: 30\n      mdns:\n        timeout: 30\n\n  resources:\n    enabled: false\n    api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rabbit_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      service_cleaner:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 9001\n  mdns:\n    name: \"designate-mdns\"\n    proto: \"http\"\n    external_policy_local: false\n    node_port:\n      enabled: true\n      port: 5354\n\nbootstrap:\n  enabled: false\n  script: |\n    openstack token issue\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - designate-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n    job_rabbit_init:\n      api:\n        jobs:\n          - designate-rabbit-init\n      sink:\n        jobs:\n          - designate-rabbit-init\n      central:\n        jobs:\n          - designate-rabbit-init\n      worker:\n        jobs:\n          - designate-rabbit-init\n  static:\n    db_init:\n      services:\n        - service: oslo_db\n          endpoint: internal\n    db_sync:\n      jobs:\n        - designate-db-init\n      services:\n        - service: oslo_db\n          endpoint: internal\n    ks_user:\n      services:\n        - service: identity\n          endpoint: internal\n    ks_service:\n      services:\n        - service: identity\n          endpoint: internal\n    ks_endpoints:\n      jobs:\n        - designate-ks-service\n      services:\n        - service: identity\n          endpoint: internal\n    rabbit_init:\n      services:\n        - service: oslo_messaging\n          endpoint: internal\n    api:\n      jobs:\n        - designate-db-sync\n        - designate-ks-user\n        - designate-ks-endpoints\n      service:\n        - service: oslo_db\n          endpoint: internal\n        - service: identity\n          endpoint: internal\n        - service: oslo_messaging\n          endpoint: internal\n    central:\n      jobs:\n        - designate-db-sync\n        - designate-ks-user\n        - designate-ks-endpoints\n      service:\n        - service: oslo_db\n          endpoint: internal\n        - service: identity\n          endpoint: internal\n        - service: oslo_messaging\n          endpoint: internal\n    worker:\n      jobs:\n        - designate-db-sync\n        - designate-ks-user\n        - designate-ks-endpoints\n      services:\n        - service: oslo_db\n          endpoint: internal\n        - service: identity\n          endpoint: internal\n        - service: mdns\n          endpoint: internal\n    mdns:\n      jobs:\n        - designate-db-sync\n        - designate-ks-user\n        - designate-ks-endpoints\n      services:\n        - service: oslo_db\n          endpoint: internal\n        - service: identity\n          endpoint: internal\n    producer:\n      jobs:\n        - designate-db-sync\n        - designate-ks-user\n        - designate-ks-endpoints\n      services:\n        - service: oslo_db\n          endpoint: internal\n        - service: identity\n          endpoint: internal\n    sink:\n      jobs:\n        - designate-db-sync\n        - designate-ks-user\n        - designate-ks-endpoints\n      services:\n        - service: oslo_db\n          endpoint: internal\n        - service: identity\n          endpoint: internal\n\nconf:\n  pools: |\n    - name: default\n      # The name is immutable. There will be no option to change the name after\n      # creation and the only way will to change it will be to delete it\n      # (and all zones associated with it) and recreate it.\n      description: Default Pool\n\n      attributes: {}\n\n      # List out the NS records for zones hosted within this pool\n      # This should be a record that is created outside of designate, that\n      # points to the public IP of the controller node.\n      ns_records:\n        - hostname: {{ printf \"ns.%s.svc.%s.\" .Release.Namespace .Values.endpoints.cluster_domain_suffix }}\n          priority: 1\n\n      # List out the nameservers for this pool. These are the actual DNS servers.\n      # We use these to verify changes have propagated to all nameservers.\n      nameservers:\n        - host: ${POWERDNS_SERVICE_HOST}\n          port: {{ tuple \"powerdns\" \"internal\" \"powerdns\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n\n      # List out the targets for this pool. For BIND there will be one\n      # entry for each BIND server, as we have to run rndc command on each server\n      targets:\n        - type: pdns4\n          description: PowerDNS Server\n\n          # List out the designate-mdns servers from which PowerDNS servers should\n          # request zone transfers (AXFRs) from.\n          # This should be the IP of the controller node.\n          # If you have multiple controllers you can add multiple masters\n          # by running designate-mdns on them, and adding them here.\n          masters:\n            - host: ${MINIDNS_SERVICE_HOST}\n              port: {{ tuple \"mdns\" \"internal\" \"ipc\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n\n          # PowerDNS Configuration options\n          options:\n            host: ${POWERDNS_SERVICE_HOST}\n            port: {{ tuple \"powerdns\" \"internal\" \"powerdns\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            api_endpoint: http://${POWERDNS_SERVICE_HOST}:{{ tuple \"powerdns\" \"internal\" \"powerdns_api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            api_token: {{ tuple \"powerdns\" \"service\" . | include \"helm-toolkit.endpoints.endpoint_token_lookup\" }}\n  paste:\n    composite:osapi_dns:\n      use: egg:Paste#urlmap\n      /: osapi_dns_versions\n      /v2: osapi_dns_v2\n      /admin: osapi_dns_admin\n    composite:osapi_dns_versions:\n      use: call:designate.api.middleware:auth_pipeline_factory\n      noauth: http_proxy_to_wsgi cors maintenance faultwrapper osapi_dns_app_versions\n      keystone: http_proxy_to_wsgi cors maintenance faultwrapper osapi_dns_app_versions\n    app:osapi_dns_app_versions:\n      paste.app_factory: designate.api.versions:factory\n    composite:osapi_dns_v2:\n      use: call:designate.api.middleware:auth_pipeline_factory\n      noauth: http_proxy_to_wsgi cors request_id faultwrapper validation_API_v2 noauthcontext maintenance normalizeuri osapi_dns_app_v2\n      keystone: http_proxy_to_wsgi cors request_id faultwrapper validation_API_v2 authtoken keystonecontext maintenance normalizeuri osapi_dns_app_v2\n    app:osapi_dns_app_v2:\n      paste.app_factory: designate.api.v2:factory\n    composite:osapi_dns_admin:\n      use: call:designate.api.middleware:auth_pipeline_factory\n      noauth: http_proxy_to_wsgi cors request_id faultwrapper noauthcontext maintenance normalizeuri osapi_dns_app_admin\n      keystone: http_proxy_to_wsgi cors request_id faultwrapper authtoken keystonecontext maintenance normalizeuri osapi_dns_app_admin\n    app:osapi_dns_app_admin:\n      paste.app_factory: designate.api.admin:factory\n    filter:cors:\n      paste.filter_factory: oslo_middleware.cors:filter_factory\n      oslo_config_project: designate\n    filter:request_id:\n      paste.filter_factory: oslo_middleware:RequestId.factory\n    filter:http_proxy_to_wsgi:\n      paste.filter_factory: oslo_middleware:HTTPProxyToWSGI.factory\n    filter:noauthcontext:\n      paste.filter_factory: designate.api.middleware:NoAuthContextMiddleware.factory\n    filter:authtoken:\n      paste.filter_factory: keystonemiddleware.auth_token:filter_factory\n    filter:keystonecontext:\n      paste.filter_factory: designate.api.middleware:KeystoneContextMiddleware.factory\n    filter:maintenance:\n      paste.filter_factory: designate.api.middleware:MaintenanceMiddleware.factory\n    filter:normalizeuri:\n      paste.filter_factory: designate.api.middleware:NormalizeURIMiddleware.factory\n    filter:faultwrapper:\n      paste.filter_factory: designate.api.middleware:FaultWrapperMiddleware.factory\n    filter:validation_API_v2:\n      paste.filter_factory: designate.api.middleware:APIv2ValidationErrorMiddleware.factory\n  policy: {}\n  designate:\n    DEFAULT:\n      debug: false\n      log_config_append: /etc/designate/logging.conf\n    service:api:\n      auth_strategy: keystone\n      enable_api_v2: true\n      enable_api_admin: true\n      enabled_extensions_v2: quotas,reports\n      workers: 2\n    service:worker:\n      enabled: true\n      notify: false\n    oslo_middleware:\n      enable_proxy_headers_parsing: true\n    oslo_policy:\n      policy_file: /etc/designate/policy.yaml\n    oslo_concurrency:\n      lock_path: /var/lock\n    database:\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    storage:sqlalchemy:\n      max_retries: -1\n      # -- Storage backend connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    keystone_authtoken:\n      auth_version: v3\n      auth_type: password\n      memcache_security_strategy: ENCRYPT\n      service_type: dns\n  logging:\n    loggers:\n      keys:\n        - root\n        - designate\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: WARNING\n      handlers: 'null'\n    logger_designate:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: designate\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    formatter_default:\n      format: \"%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n  designate_api_uwsgi:\n    uwsgi:\n      add-header: \"Connection: close\"\n      buffer-size: 65535\n      die-on-term: true\n      enable-threads: true\n      exit-on-reload: false\n      hook-master-start: unix_signal:15 gracefully_kill_them_all\n      lazy-apps: true\n      log-x-forwarded-for: true\n      master: true\n      procname-prefix-spaced: \"designate-api:\"\n      route-user-agent: '^kube-probe.* donotlog:'\n      thunder-lock: true\n      worker-reload-mercy: 80\n      wsgi-file: /var/lib/openstack/bin/designate-api-wsgi\n      stats: 0.0.0.0:1717\n      stats-http: true\n\n# Names of secrets used by bootstrap and environmental checks\nsecrets:\n  identity:\n    admin: designate-keystone-admin\n    designate: designate-keystone-user\n    test: designate-keystone-test\n  oslo_db:\n    admin: designate-db-admin\n    designate: designate-db-user\n  oslo_messaging:\n    admin: designate-rabbitmq-admin\n    designate: designate-rabbitmq-user\n  tls:\n    dns:\n      api:\n        public: designate-tls-public\n  oci_image_registry:\n    designate: designate-oci-image-registry\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      designate:\n        username: designate\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      designate:\n        role: admin\n        region_name: RegionOne\n        username: designate\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      test:\n        role: admin\n        region_name: RegionOne\n        username: designate-test\n        password: password\n        project_name: test\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  dns:\n    name: designate\n    hosts:\n      default: designate-api\n      public: designate\n    host_fqdn_override:\n      default: null\n    path:\n      default: /\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 9001\n        public: 80\n  mdns:\n    name: minidns\n    hosts:\n      default: minidns\n      public: designate-mdns\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: 'tcp'\n    port:\n      ipc:\n        default: 5354\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n      designate:\n        username: designate\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /designate\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_cache:\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n    auth:\n      # NOTE: this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n  oslo_messaging:\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n      designate:\n        username: designate\n        password: password\n    statefulset:\n      replicas: 2\n      name: rabbitmq-rabbitmq\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /designate\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n  powerdns:\n    auth:\n      service:\n        token: chiave_segreta\n    hosts:\n      default: powerdns\n    host_fqdn_override:\n      default: null\n    port:\n      powerdns_api:\n        default: 8081\n      powerdns:\n        default: 53\n\njobs:\n  service_cleaner:\n    cron: \"*/10 * * * *\"\n    history:\n      success: 3\n      failed: 1\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  cron_job_service_cleaner: true\n  deployment_api: true\n  deployment_central: true\n  deployment_worker: true\n  deployment_producer: true\n  deployment_mdns: true\n  deployment_sink: false\n  ingress_api: true\n  job_bootstrap: true\n  job_db_init: true\n  job_db_sync: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  job_rabbit_init: true\n  pdb_api: true\n  pdb_producer: true\n  pdb_central: true\n  pdb_worker: true\n  pdb_mdns: true\n  pdb_sink: false\n  secret_db: true\n  secret_ingress_tls: true\n  secret_keystone: true\n  secret_rabbitmq: true\n  secret_registry: true\n  service_api: true\n  service_mdns: true\n  service_ingress_api: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "doc/helm-docs.rst.gotmpl",
    "content": "{{- define \"chart.valueDefaultColumnRender\" }}\n{{- $defaultValue := (default .Default .AutoDefault)  -}}\n{{- $notationType := .NotationType }}\n{{- if (and (hasPrefix \"```\" $defaultValue) (hasSuffix \"```\" $defaultValue) ) -}}\n{{- $defaultValue = (toPrettyJson (fromJson (trimAll \"```\" (default .Default .AutoDefault) ) ) ) -}}\n{{- $notationType = \"json\" }}\n{{- end -}}\n{{- if contains \"\\\\n\" $defaultValue }}\n{{- $notationType = \"default\" }}\n{{- end }}\n{{- if eq $notationType \"\" -}}\n{{ $defaultValue }}\n{{- else -}}\n.. code-block:: {{ $notationType }}\n\n{{ (trimAll \"`\" $defaultValue | trimAll \"\\\"\" | replace \"\\\\n\" \"\\n\") | indent 10 }}\n{{- end }}\n{{- end }}\n\n{{ title .Name }}\n{{ repeat (len .Name) \"=\" }}\n\nThere are various customizations you can do to tailor the deployment of\nOpenStack {{ title .Name }}. You can find those below.\n\n==================\nGeneral Parameters\n==================\n\n   {{- define \"chart.generalParamsvaluesTable\" }}\n     {{- range .Values }}\n   * {{ .Key }}\n\n     * Type: {{ .Type }}\n     * Description: {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }}\n     * {{ template \"chart.valueDefaultColumnRender\" . }}\n\n     {{- end }}\n   {{- end }}\n\n   {{ template \"chart.generalParamsvaluesTable\" . }}\n"
  },
  {
    "path": "doc/requirements.txt",
    "content": "# The order of packages is significant, because pip processes them in the order\n# of appearance. Changing the order has an impact on the overall integration\n# process, which may cause wedges in the gate later.\n\nsphinx>=2.0.0,!=2.1.0 # BSD\nopenstackdocstheme>=2.2.1 # Apache-2.0\nreno>=3.1.0 # Apache-2.0\n"
  },
  {
    "path": "doc/source/_exts/helm_docs.py",
    "content": "# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nfrom pathlib import Path\nimport subprocess\nfrom sphinx.application import Sphinx\nfrom sphinx.util import logging\nfrom sphinx.util.typing import ExtensionMetadata\n\nPREFIX = \"[helm_docs] \"\nVERSION = \"0.1\"\n\n# the main template we use for all charts\nHELMDOCSTMPL = \"helm-docs.rst.gotmpl\"\nLOCALHELMDOCSTMPL = \"README.rst.gotmpl\"\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _run_helm_docs(\n    helmdocsbin: Path,\n    rootdir: Path,\n    outfile: Path,\n    chart: str,\n    helmdocstmpl: Path,\n    charttmpl: Path | None,\n):\n    tmpls = [str(p) for p in [helmdocstmpl, charttmpl] if p is not None]\n    cmd = [\n        str(helmdocsbin),\n        \"--output-file\",\n        str(outfile),\n        \"--template-files\",\n        \",\".join(tmpls),\n        \"--chart-search-root\",\n        chart,\n    ]\n    subprocess.run(cmd, cwd=str(rootdir), check=True)\n\n\ndef setup(app: Sphinx) -> ExtensionMetadata:\n    logger.info(PREFIX + \"plugin %s\", VERSION)\n\n    # calculate our repo root level\n    rootdir = (Path(app.srcdir) / \"..\" / \"..\").resolve()\n    # our helm-docs binary\n    helmdocsbin = rootdir / \"tools\" / \"helm-docs\"\n    # this is where we will be writing our docs which\n    # must be a relative path from a chart directory\n    outdir = Path(\"..\") / \"doc\" / \"source\" / \"chart\"\n    # where our main helm template is which must be\n    # relative to a chart directory\n    helmdocstmpl = Path(\"..\") / \"doc\" / HELMDOCSTMPL\n\n    # find each chart\n    for chartyaml in rootdir.rglob(\"Chart.yaml\"):\n        # the directory to the chart\n        chartdir = chartyaml.parent\n        # name of our chart\n        chart = chartyaml.parent.name\n        logger.info(PREFIX + \"found %s\", chart)\n        # does the chart have a local template to include\n        localtmpl = (\n            LOCALHELMDOCSTMPL if (chartdir / \"README.rst.gotmpl\").exists() else None\n        )\n        outfile = outdir / f\"{chart}.rst\"\n        _run_helm_docs(helmdocsbin, rootdir, outfile, chart, helmdocstmpl, localtmpl)\n\n    return {\n        \"version\": VERSION,\n        \"parallel_read_safe\": True,\n        \"parallel_write_safe\": True,\n    }\n"
  },
  {
    "path": "doc/source/_static/.placeholder",
    "content": ""
  },
  {
    "path": "doc/source/chart/index.rst",
    "content": "Chart Options\n=============\n\nHere are the charts with their documented values.yaml's for OpenStack Helm:\n\n.. toctree::\n    :maxdepth: 2\n\n    openstack_charts\n    infra_charts\n"
  },
  {
    "path": "doc/source/chart/infra_charts.rst",
    "content": "Infra charts options\n--------------------\n\n.. toctree::\n    :maxdepth: 2\n\n    ca-clusterissuer\n    ca-issuer\n    ceph-adapter-rook\n    ceph-client\n    ceph-mon\n    ceph-osd\n    ceph-provisioners\n    ceph-rgw\n    cert-rotation\n    elastic-apm-server\n    elastic-filebeat\n    elastic-metricbeat\n    elastic-packetbeat\n    elasticsearch\n    etcd\n    fluentbit\n    fluentd\n    gnocchi\n    grafana\n    helm-toolkit\n    kibana\n    kube-dns\n    kubernetes-keystone-webhook\n    kubernetes-node-problem-detector\n    ldap\n    libvirt\n    local-storage\n    local-volume-provisioner\n    mariadb\n    mariadb-backup\n    mariadb-cluster\n    memcached\n    nagios\n    namespace-config\n    nfs-provisioner\n    openvswitch\n    ovn\n    postgresql\n    powerdns\n    prometheus\n    prometheus-alertmanager\n    prometheus-blackbox-exporter\n    prometheus-kube-state-metrics\n    prometheus-mysql-exporter\n    prometheus-node-exporter\n    prometheus-openstack-exporter\n    prometheus-process-exporter\n    rabbitmq\n    redis\n    registry\n"
  },
  {
    "path": "doc/source/chart/openstack_charts.rst",
    "content": "OpenStack charts options\n------------------------\n\n.. toctree::\n    :maxdepth: 2\n\n    aodh\n    barbican\n    blazar\n    ceilometer\n    cinder\n    cyborg\n    cloudkitty\n    designate\n    freezer\n    glance\n    heat\n    horizon\n    ironic\n    keystone\n    magnum\n    manila\n    masakari\n    mistral\n    neutron\n    nova\n    octavia\n    placement\n    rally\n    skyline\n    swift\n    tacker\n    tempest\n    trove\n    watcher\n    zaqar\n"
  },
  {
    "path": "doc/source/conf.py",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#    http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.join(os.path.abspath('.'), '_exts'))\n\nsys.path.insert(0, os.path.abspath('../..'))\n# -- General configuration ----------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n    'openstackdocstheme',\n    'helm_docs',\n]\n\n# openstackdocstheme options\nopenstackdocs_repo_name = 'openstack/openstack-helm'\nopenstackdocs_auto_name = False\nopenstackdocs_use_storyboard = True\nopenstackdocs_pdf_link = True\n\n# autodoc generation is a bit aggressive and a nuisance when doing heavy\n# text edit cycles.\n# execute \"export SPHINX_DEBUG=1\" in your terminal to disable\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'openstack-helm'\ncopyright = '2016-2023, OpenStack Foundation'\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\nadd_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\nshow_authors = True\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'native'\n\n# -- Options for HTML output --------------------------------------------------\n# The theme to use for HTML and HTML Help pages.  See the documentation for\n# a list of builtin themes.\nhtml_theme = 'openstackdocs'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n# html_last_updated_fmt = '%Y-%m-%d %H:%M'\n\n# The theme to use for HTML and HTML Help pages.  Major themes that come with\n# Sphinx are currently 'default' and 'sphinxdoc'.\n# html_theme_path = [\".\"]\n# html_theme = '_theme'\n# html_static_path = ['static']\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = '%sdoc' % project\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass\n# [howto/manual]).\nlatex_documents = [\n    ('index',\n     'doc-%s.tex' % project,\n     '%s Documentation' % project,\n     'OpenStack Foundation', 'manual'),\n]\n\n# Example configuration for intersphinx: refer to the Python standard library.\n# intersphinx_mapping = {'http://docs.python.org/': None}\n"
  },
  {
    "path": "doc/source/devref/endpoints.rst",
    "content": "Endpoints\n---------\n\nThe project's goal is to provide a consistent mechanism for endpoints.\nOpenStack is a highly interconnected application, with various\ncomponents requiring connectivity details to numerous services,\nincluding other OpenStack components and infrastructure elements such as\ndatabases, queues, and memcached infrastructure. The project's goal is\nto ensure that it can provide a consistent mechanism for defining these\n\"endpoints\" across all charts and provide the macros necessary to\nconvert those definitions into usable endpoints. The charts should\nconsistently default to building endpoints that assume the operator is\nleveraging all charts to build their OpenStack cloud. Endpoints should\nbe configurable if an operator would like a chart to work with their\nexisting infrastructure or run elements in different namespaces.\n\nFor instance, in the Neutron chart ``values.yaml`` the following\nendpoints are defined:\n\n::\n\n    # typically overridden by environmental\n    # values, but should include all endpoints\n    # required by this chart\n    endpoints:\n      image:\n        hosts:\n          default: glance-api\n        type: image\n        path: null\n        scheme: 'http'\n        port:\n          api: 9292\n      compute:\n        hosts:\n          default: nova-api\n        path: \"/v2/%(tenant_id)s\"\n        type: compute\n        scheme: 'http'\n        port:\n          api: 8774\n          metadata: 8775\n          novncproxy: 6080\n      identity:\n        hosts:\n          default: keystone-api\n        path: /v3\n        type: identity\n        scheme: 'http'\n        port:\n          admin: 35357\n          public: 5000\n      network:\n        hosts:\n          default: neutron-server\n        path: null\n        type: network\n        scheme: 'http'\n        port:\n          api: 9696\n\nThese values define all the endpoints that the Neutron chart may need in\norder to build full URL compatible endpoints to various services.\nLong-term, these will also include database, memcached, and rabbitmq\nelements in one place. Essentially, all external connectivity can be\ndefined centrally.\n\nThe macros that help translate these into the actual URLs necessary are\ndefined in the ``helm-toolkit`` chart. For instance, the cinder chart\ndefines a ``glance_api_servers`` definition in the ``cinder.conf``\ntemplate:\n\n::\n\n    {{- if empty .Values.conf.cinder.DEFAULT.glance_api_servers -}}\n    {{- $_ := tuple \"image\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.cinder.DEFAULT \"glance_api_servers\" -}}\n    {{- end -}}\n\n\nAs an example, this line uses the ``endpoints.keystone_endpoint_uri_lookup``\nmacro in the ``helm-toolkit`` chart (since it is used by all charts). Note that\nthere is a second convention here. All ``{{ define }}`` macros in charts\nshould be pre-fixed with the chart that is defining them. This allows\ndevelopers to easily identify the source of a Helm macro and also avoid\nnamespace collisions. In the example above, the macro\n``endpoints.keystone_endpoint_uri_lookup`` is defined in the ``helm-toolkit`` chart.\nThis macro is passing three parameters (aided by the ``tuple`` method\nbuilt into the go/sprig templating library used by Helm):\n\n-  image: This is the OpenStack service that the endpoint is being built\n   for. This will be mapped to ``glance`` which is the image service for\n   OpenStack.\n-  internal: This is the OpenStack endpoint type we are looking for -\n   valid values would be ``internal``, ``admin``, and ``public``\n-  api: This is the port to map to for the service.\n\nCharts should not use hard coded values such as\n``http://keystone-api:5000`` because these are not compatible with\noperator overrides and do not support spreading components out over\nvarious namespaces.\n\nBy default, each endpoint is located in the same namespace as the current\nservice's helm chart. To connect to a service which is running in a different\nKubernetes namespace, a ``namespace`` can be provided for each individual\nendpoint.\n"
  },
  {
    "path": "doc/source/devref/fluent-logging.rst",
    "content": "Logging Mechanism\n=================\n\nLogging Requirements\n--------------------\n\nOpenStack-Helm defines a centralized logging mechanism to provide insight into\nthe state of the OpenStack services and infrastructure components as\nwell as underlying Kubernetes platform. Among the requirements for a logging\nplatform, where log data can come from and where log data need to be delivered\nare very variable. To support various logging scenarios, OpenStack-Helm should\nprovide a flexible mechanism to meet with certain operation needs.\n\n\nEFK (Elasticsearch, Fluent-bit & Fluentd, Kibana) based Logging Mechanism\n-------------------------------------------------------------------------\nOpenStack-Helm provides fast and lightweight log forwarder and full featured log\naggregator complementing each other providing a flexible and reliable solution.\nEspecially, Fluent-bit is used as a log forwarder and Fluentd is used as a main\nlog aggregator and processor.\n\nFluent-bit, Fluentd meet OpenStack-Helm's logging requirements for gathering,\naggregating, and delivering of logged events. Fluent-bit runs as a daemonset on\neach node and mounts the ``/var/lib/docker/containers`` directory. The Docker\ncontainer runtime engine directs events posted to stdout and stderr to this\ndirectory on the host. Fluent-bit then forward the contents of that directory to\nFluentd. Fluentd runs as deployment at the designated nodes and expose service\nfor Fluent-bit to forward logs. Fluentd should then apply the Logstash format to\nthe logs. Fluentd can also write kubernetes and OpenStack metadata to the logs.\nFluentd will then forward the results to Elasticsearch and to optionally Kafka.\nElasticsearch indexes the logs in a logstash-* index by default. Kafka stores\nthe logs in a ``logs`` topic by default. Any external tool can then consume the\n``logs`` topic.\n\nThe resulting logs can then be queried directly through Elasticsearch, or they\ncan be viewed via Kibana. Kibana offers a dashboard that can create custom views\non logged events, and Kibana integrates well with Elasticsearch by default.\n"
  },
  {
    "path": "doc/source/devref/images.rst",
    "content": ".. _images documentation:\n\nImages\n------\n\nThe project's core philosophy regarding images is that the toolsets\nrequired to enable the OpenStack services should be applied by\nKubernetes itself. This requires OpenStack-Helm to develop common and\nsimple scripts with minimal dependencies that can be overlaid on any\nimage that meets the OpenStack core library requirements. The advantage\nof this is that the project can be image agnostic, allowing operators to\nuse Stackanetes, Kolla, LOCI, or any image flavor and format they\nchoose and they will all function the same.\n\nA long-term goal, besides being image agnostic, is to also be able to\nsupport any of the container runtimes that Kubernetes supports, even\nthose that might not use Docker's own packaging format. This will allow\nthe project to continue to offer maximum flexibility with regard to\noperator choice.\n\nTo that end, all charts provide an ``images:`` section that allows\noperators to override images. Also, all default image references should\nbe fully spelled out, even those hosted by Docker or Quay. Further, no\ndefault image reference should use ``:latest`` but rather should be\npinned to a specific version to ensure consistent behavior for\ndeployments over time.\n\nToday, the ``images:`` section has several common conventions. Most\nOpenStack services require a database initialization function, a\ndatabase synchronization function, and a series of steps for Keystone\nregistration and integration. Each component may also have a specific\nimage that composes an OpenStack service. The images may or may not\ndiffer, but regardless, should all be defined in ``images``.\n\nThe following standards are in use today, in addition to any components\ndefined by the service itself:\n\n-  dep\\_check: The image that will perform dependency checking in an\n   init-container.\n-  db\\_init: The image that will perform database creation operations\n   for the OpenStack service.\n-  db\\_sync: The image that will perform database sync (schema\n   initialization and migration) for the OpenStack service.\n-  db\\_drop: The image that will perform database deletion operations\n   for the OpenStack service.\n-  ks\\_user: The image that will perform keystone user creation for the\n   service.\n-  ks\\_service: The image that will perform keystone service\n   registration for the service.\n-  ks\\_endpoints: The image that will perform keystone endpoint\n   registration for the service.\n-  pull\\_policy: The image pull policy, one of \"Always\", \"IfNotPresent\",\n   and \"Never\" which will be used by all containers in the chart.\n\nAn illustrative example of an ``images:`` section taken from the heat\nchart:\n\n::\n\n    images:\n      tags:\n        bootstrap: docker.io/openstackhelm/heat:ocata\n        db_init: docker.io/openstackhelm/heat:ocata\n        db_sync: docker.io/kolla/ubuntu-source-heat-api:ocata\n        db_drop: docker.io/openstackhelm/heat:ocata\n        ks_user: docker.io/openstackhelm/heat:ocata\n        ks_service: docker.io/openstackhelm/heat:ocata\n        ks_endpoints: docker.io/openstackhelm/heat:ocata\n        api: docker.io/kolla/ubuntu-source-heat-api:ocata\n        cfn: docker.io/kolla/ubuntu-source-heat-api:ocata\n        engine: docker.io/openstackhelm/heat:ocata\n        dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal\n      pull_policy: \"IfNotPresent\"\n\nThe OpenStack-Helm project today uses a mix of Docker images from\nStackanetes and Kolla, but will likely standardize on a default set of\nimages for all charts without any reliance on image-specific utilities.\n"
  },
  {
    "path": "doc/source/devref/index.rst",
    "content": "Developer References\n====================\n\nContents:\n\n.. toctree::\n   :maxdepth: 2\n\n   endpoints\n   images\n   networking\n   oslo-config\n   pod-disruption-budgets\n   upgrades\n   fluent-logging\n   node-and-label-specific-configurations\n"
  },
  {
    "path": "doc/source/devref/networking.rst",
    "content": "==========\nNetworking\n==========\nCurrently OpenStack-Helm supports OpenVSwitch and LinuxBridge as a network\nvirtualization engines. In order to support many possible backends (SDNs),\nmodular architecture of Neutron chart was developed. OpenStack-Helm can support\nevery SDN solution that has Neutron plugin, either core_plugin or mechanism_driver.\n\nThe Neutron reference architecture provides mechanism_drivers :code:`OpenVSwitch`\n(OVS) and :code:`linuxbridge` (LB) with ML2 :code:`core_plugin` framework.\n\nOther networking services provided by Neutron are:\n\n#. L3 routing - creation of routers\n#. DHCP - auto-assign IP address and DNS info\n#. Metadata - Provide proxy for Nova metadata service\n\nIntroducing a new SDN solution should consider how the above services are\nprovided. It maybe required to disable built-in Neutron functionality.\n\nNeutron architecture\n--------------------\n\nNeutron chart includes the following services:\n\nneutron-server\n~~~~~~~~~~~~~~\nneutron-server is serving the networking REST API for operator and other\nOpenStack services usage. The internals of Neutron are highly flexible,\nproviding plugin mechanisms for all networking services exposed. The\nconsistent API is exposed to the user, but the internal implementation\nis up to the chosen SDN.\n\nTypical networking API request is an operation of create/update/delete:\n * network\n * subnet\n * port\n\nNeutron-server service is scheduled on nodes with\n``openstack-control-plane=enabled`` label.\n\nneutron-rpc-server\n~~~~~~~~~~~~~~~~~~\nneutron-rpc-server is serving the networking PRC backend for neutron API\nservices. The internals of Neutron are highly flexible,\nproviding plugin mechanisms for all networking services exposed. The\nconsistent API is exposed to the user, but the internal implementation\nis up to the chosen SDN.\n\nTypical networking API request is an operation of create/update/delete:\n * network\n * subnet\n * port\n\nTo use other Neutron reference architecture types of SDN, these options\nshould be configured in :code:`neutron.conf`:\n\n.. code-block:: ini\n\n    [DEFAULT]\n    ...\n    # core_plugin - plugin responsible for L2 connectivity and IP address\n    #               assignments.\n    # ML2 (Modular Layer 2) is the core plugin provided by Neutron ref arch\n    # If other SDN implements its own logic for L2, it should replace the\n    # ml2 here\n    core_plugin = ml2\n\n    # service_plugins - a list of extra services exposed by Neutron API.\n    # Example: router, qos, trunk, metering.\n    # If other SDN implement L3 or other services, it should be configured\n    # here\n    service_plugins = router\n\nAll of the above configs are endpoints or path to the specific class\nimplementing the interface. You can see the endpoints to class mapping in\n`setup.cfg <https://github.com/openstack/neutron/blob/412c49b3930ce8aecb0a07aec50a9607058e5bc7/setup.cfg#L69>`_.\n\nIf the SDN of your choice is using the ML2 core plugin, then the extra\noptions in ``neutron/ml2/plugins/ml2_conf.ini`` should be configured:\n\n.. code-block:: ini\n\n    [ml2]\n    # type_drivers - layer 2 technologies that ML2 plugin supports.\n    # Those are local,flat,vlan,gre,vxlan,geneve\n    type_drivers = flat,vlan,vxlan\n\n    # mech_drivers - implementation of above L2 technologies. This option is\n    # pointing to the engines like linux bridge or OpenVSwitch in ref arch.\n    # This is the place where SDN implementing ML2 driver should be configured\n    mech_drivers = openvswitch, l2population\n\nSDNs implementing ML2 driver can add extra/plugin-specific configuration\noptions in ``neutron/ml2/plugins/ml2_conf.ini``. Or define its own ``ml2_conf_<name>.ini``\nfile where configs specific to the SDN would be placed.\n\nThe above configuration options are handled by ``neutron/values.yaml``:\n\n.. code-block:: yaml\n\n    conf:\n      neutron:\n        DEFAULT:\n            ...\n          # core_plugin can be: ml2, calico\n          core_plugin: ml2\n          # service_plugin can be: router, empty for calico,\n          # networking_ovn.l3.l3_ovn.OVNL3RouterPlugin for OVN\n          service_plugins: router\n\n      plugins:\n        ml2_conf:\n          ml2:\n            # mechnism_drivers can be: openvswitch, linuxbridge, ovn\n            mechanism_drivers: openvswitch,l2population\n            type_drivers: flat,vlan,vxlan\n\n\nNeutron-rpc-server service is scheduled on nodes with\n``openstack-control-plane=enabled`` label.\n\nneutron-dhcp-agent\n~~~~~~~~~~~~~~~~~~\nDHCP agent is running dnsmasq process which is serving the IP assignment and\nDNS info. DHCP agent is dependent on the L2 agent wiring the interface.\nSo one should be aware that when changing the L2 agent, it also needs to be\nchanged in the DHCP agent. The configuration of the DHCP agent includes\noption ``interface_driver``, which will instruct how the tap interface created\nfor serving the request should be wired.\n\n.. code-block:: yaml\n\n    conf:\n      dhcp_agent:\n        DEFAULT:\n          # we can define here, which driver we are using:\n          # openvswitch or linuxbridge\n          interface_driver: openvswitch\n\nAnother place where the DHCP agent is dependent on L2 agent is the dependency\nfor the L2 agent daemonset:\n\n.. code-block:: yaml\n\n    dependencies:\n      dynamic:\n        targeted:\n          openvswitch:\n            dhcp:\n              pod:\n                # this should be set to corresponding neutron L2 agent\n                - requireSameNode: true\n                  labels:\n                    application: neutron\n                    component: neutron-ovs-agent\n\nThere is also a need for DHCP agent to pass ovs agent config file\n(in :code:`neutron/templates/bin/_neutron-dhcp-agent.sh.tpl`):\n\n.. code-block:: bash\n\n    exec neutron-dhcp-agent \\\n          --config-file /etc/neutron/neutron.conf \\\n          --config-file /etc/neutron/dhcp_agent.ini \\\n          --config-file /etc/neutron/metadata_agent.ini \\\n          --config-file /etc/neutron/plugins/ml2/ml2_conf.ini\n    {{- if ( has \"openvswitch\" .Values.network.backend ) }} \\\n          --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini\n    {{- end }}\n\nThis requirement is OVS specific, the ``ovsdb_connection`` string is defined\nin ``openvswitch_agent.ini`` file, specifying how DHCP agent can connect to ovs.\nWhen using other SDNs, running the DHCP agent may not be required. When the\nSDN solution is addressing the IP assignments in another way, neutron's\nDHCP agent should be disabled.\n\nneutron-dhcp-agent service is scheduled to run on nodes with the label\n``openstack-control-plane=enabled``.\n\nneutron-l3-agent\n~~~~~~~~~~~~~~~~\nL3 agent is serving the routing capabilities for Neutron networks. It is also\ndependent on the L2 agent wiring the tap interface for the routers.\n\nAll dependencies described in neutron-dhcp-agent are valid here.\n\nIf the SDN implements its own version of L3 networking, neutron-l3-agent\nshould not be started.\n\nneutron-l3-agent service is scheduled to run on nodes with the label\n``openstack-control-plane=enabled``.\n\nneutron-metadata-agent\n~~~~~~~~~~~~~~~~~~~~~~\nMetadata-agent is a proxy to nova-metadata service. This one provides\ninformation about public IP, hostname, ssh keys, and any tenant specific\ninformation. The same dependencies apply for metadata as it is for DHCP\nand L3 agents. Other SDNs may require to force the config driver in nova,\nsince the metadata service is not exposed by it.\n\nneutron-metadata-agent service is scheduled to run on nodes with the label\n``openstack-control-plane=enabled``.\n\n\nConfiguring network plugin\n--------------------------\nTo be able to configure multiple networking plugins inside of OpenStack-Helm,\na new configuration option is added:\n\n.. code-block:: yaml\n\n    network:\n      # provide what type of network wiring will be used\n      # possible options: openvswitch, linuxbridge, sriov\n      backend:\n        - openvswitch\n\nThis option will allow to configure the Neutron services in proper way, by\nchecking what is the actual backed set in :code:`neutron/values.yaml`.\n\nIn order to meet modularity criteria of Neutron chart, section ``manifests`` in\n:code:`neutron/values.yaml` contains boolean values describing which Neutron's\nKubernetes resources should be deployed:\n\n.. code-block:: yaml\n\n    manifests:\n      configmap_bin: true\n      configmap_etc: true\n      daemonset_dhcp_agent: true\n      daemonset_l3_agent: true\n      daemonset_lb_agent: false\n      daemonset_metadata_agent: true\n      daemonset_ovs_agent: true\n      daemonset_sriov_agent: true\n      deployment_server: true\n      deployment_rpc_server: true\n      ingress_server: true\n      job_bootstrap: true\n      job_db_init: true\n      job_db_sync: true\n      job_db_drop: false\n      job_image_repo_sync: true\n      job_ks_endpoints: true\n      job_ks_service: true\n      job_ks_user: true\n      job_rabbit_init: true\n      pdb_server: true\n      pod_rally_test: true\n      secret_db: true\n      secret_keystone: true\n      secret_rabbitmq: true\n      service_ingress_server: true\n      service_server: true\n\nIf :code:`.Values.manifests.daemonset_ovs_agent` will be set to false, neutron\novs agent would not be launched. In that matter, other type of L2 or L3 agent\non compute node can be run.\n\nTo enable new SDN solution, there should be separate chart created, which would\nhandle the deployment of service, setting up the database and any related\nnetworking functionality that SDN is providing.\n\nOpenVSwitch\n~~~~~~~~~~~\nThe ovs set of daemonsets are running on the node labeled\n``openvswitch=enabled``. This includes the compute and controller/network nodes.\nFor more flexibility, OpenVSwitch as a tool was split out of Neutron chart, and\nput in separate chart dedicated OpenVSwitch. Neutron OVS agent remains in\nNeutron chart. Splitting out the OpenVSwitch creates possibilities to use it\nwith different SDNs, adjusting the configuration accordingly.\n\nneutron-ovs-agent\n+++++++++++++++++\nAs part of Neutron chart, this daemonset is running Neutron OVS agent.\nIt is dependent on having :code:`openvswitch-db` and :code:`openvswitch-vswitchd`\ndeployed and ready. Since its the default choice of the networking backend,\nall configuration is in place in ``neutron/values.yaml``. :code:`neutron-ovs-agent`\nshould not be deployed when another SDN is used in ``network.backend``.\n\nScript in :code:`neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl`\nis responsible for determining the tunnel interface and its IP for later usage\nby :code:`neutron-ovs-agent`. The IP is set in init container and shared between\ninit container and main container with :code:`neutron-ovs-agent` via file\n:code:`/tmp/pod-shared/ml2-local-ip.ini`.\n\nConfiguration of OVS bridges can be done via\n``neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl``. The\nscript is configuring the external network bridge and sets up any\nbridge mappings defined in :code:`conf.auto_bridge_add`.  These\nvalues should align with\n:code:`conf.plugins.openvswitch_agent.ovs.bridge_mappings`.\n\nopenvswitch-db and openvswitch-vswitchd\n+++++++++++++++++++++++++++++++++++++++\nThis runs the OVS tool and database. OpenVSwitch chart is not Neutron specific,\nit may be used with other technologies that are leveraging the OVS technology,\nsuch as OVN.\n\nA detail worth mentioning is that ovs is configured to use sockets, rather\nthan the default loopback mechanism.\n\n.. code-block:: bash\n\n    exec /usr/sbin/ovs-vswitchd unix:${OVS_SOCKET} \\\n            -vconsole:emer \\\n            -vconsole:err \\\n            -vconsole:info \\\n            --pidfile=${OVS_PID} \\\n            --mlockall\n\nLinuxbridge\n~~~~~~~~~~~\nLinuxbridge is the second type of Neutron reference architecture L2 agent.\nIt is running on nodes labeled ``linuxbridge=enabled``. As mentioned before,\nall nodes that are requiring the L2 services need to be labeled with linuxbridge.\nThis includes both the compute and controller/network nodes. It is not possible\nto label the same node with both openvswitch and linuxbridge (or any other\nnetwork virtualization technology) at the same time.\n\nneutron-lb-agent\n++++++++++++++++\nThis daemonset includes the linuxbridge Neutron agent with bridge-utils and\nebtables utilities installed. This is all that is needed, since linuxbridge\nuses native kernel libraries.\n\n:code:`neutron/templates/bin/_neutron-linuxbridge-agent-init.sh.tpl` is\nconfiguring the tunnel IP, external bridge and all bridge mappings defined\nin config. It is done in init container, and the IP for tunneling is shared\nusing file :code:`/tmp/pod-shared/ml2-local-ip.ini` with main linuxbridge\ncontainer.\n\nIn order to use linuxbridge in your OpenStack-Helm deployment, you need to\nlabel the compute and controller/network nodes with ``linuxbridge=enabled``\nand use this ``neutron/values.yaml`` override:\n\n.. code-block:: yaml\n\n    network:\n      backend: linuxbridge\n    dependencies:\n      dynamic:\n        targeted:\n          linuxbridge:\n            dhcp:\n              pod:\n                - requireSameNode: true\n                  labels:\n                    application: neutron\n                    component: neutron-lb-agent\n            l3:\n              pod:\n                - requireSameNode: true\n                  labels:\n                    application: neutron\n                    component: neutron-lb-agent\n            metadata:\n              pod:\n                - requireSameNode: true\n                  labels:\n                    application: neutron\n                    component: neutron-lb-agent\n            lb_agent:\n              pod: null\n    conf:\n      neutron:\n        DEFAULT\n          interface_driver: linuxbridge\n      dhcp_agent:\n        DEFAULT:\n          interface_driver: linuxbridge\n      l3_agent:\n        DEFAULT:\n          interface_driver: linuxbridge\n\n\nOther SDNs\n~~~~~~~~~~\nIn order to add support for more SDNs, these steps need to be performed:\n\n#. Configure neutron-server with SDN specific core_plugin/mechanism_drivers.\n#. If required, add new networking agent label type.\n#. Specify if new SDN would like to use existing services from Neutron:\n   L3, DHCP, metadata.\n#. Create separate chart with new SDN deployment method.\n\n\nNova config dependency\n~~~~~~~~~~~~~~~~~~~~~~\nWhenever we change the L2 agent, it should be reflected in ``nova/values.yaml``\nin dependency resolution for nova-compute.\n"
  },
  {
    "path": "doc/source/devref/node-and-label-specific-configurations.rst",
    "content": "Node and node label specific daemonset configurations\n=====================================================\n\nA typical Helm daemonset may leverage a secret to store configuration data.\nHowever, there are cases where the same secret document can't be used for\nthe entire daemonset, because there are node-specific differences.\n\nTo address this use-case, the ``helm-toolkit.utils.daemonset_overrides``\ntemplate was added in helm-toolkit. This was created with the intention that it\nshould be straightforward to convert (wrap) a pre-existing daemonset with the\nfunctionality to override secret parameters on a per-node or per-nodelabel\nbasis.\n\nAdapting your daemonset to support node/nodelabel overrides\n-----------------------------------------------------------\n\nConsider the following (simplified) secret and daemonset pairing example:\n\n.. code-block:: yaml\n\n    # Simplified secret definition\n    # ===============================\n    ---\n    apiVersion: v1\n    kind: Secret\n    # Note ref to $secretName for dynamically generated secrets\n    metadata:\n      name: mychart-etc\n    data:\n      myConf: {{ include \"helm-toolkit.utils.template\" | b64enc }}\n\n    # Simplified daemonset definition\n    # ===============================\n    ---\n    apiVersion: apps/v1\n    kind: DaemonSet\n    metadata:\n      name: mychart-name\n    spec:\n      template:\n        spec:\n          containers:\n          - name: my-container\n          volumes:\n          - name: mychart-etc\n            secret:\n              name: mychart-etc\n              defaultMode: 0444\n\nAssume the chart name is ``mychart``.\n\nNow we can wrap the existing YAML to make it support node and nodelabel\noverrides, with minimal changes to the existing YAML (note where $secretName\nhas been substituted):\n\n.. code-block:: yaml\n\n    # Simplified secret definition needed for node/nodelabel overrides\n    # -------------------------------------------------------------------\n    # Wrap secret definition\n    {{- define \"mychart.secret.etc\" }}\n    {{- $secretName := index . 0 }}\n    {{- $envAll := index . 1 }}\n    # Set to the same env context as was available to the caller, so we can\n    # access any env data needed to build the template (e.g., envAll.Values...)\n    {{- with $envAll }}\n    ---\n    apiVersion: v1\n    kind: Secret\n    # Note ref to $secretName for dynamically generated secrets\n    metadata:\n      name: {{ $secretName }}\n    data:\n      myConf: {{ include \"helm-toolkit.utils.template\" | b64enc }}\n    {{- end }}\n    {{- end }}\n\n    # Simplified daemonset definition needed for node/nodelabel overrides\n    # -------------------------------------------------------------------\n    # Wrap daemonset definition\n    {{- define \"mychart.daemonset\" }}\n    {{- $daemonset := index . 0 }}\n    {{- $secretName := index . 1 }}\n    {{- $envAll := index . 2 }}\n    # Set to the same env context as was available to the caller, so we can\n    # access any env data needed to build the template (e.g., envAll.Values...)\n    {{- with $envAll }}\n    ---\n    apiVersion: apps/v1\n    kind: DaemonSet\n    metadata:\n      name: {{ $daemonset }}\n    spec:\n      template:\n        spec:\n          containers:\n          - name: {{ $daemonset }}\n          volumes:\n            # Note refs to $secretName for dynamically generated secrets\n          - name: {{ $secretName }}\n            secret:\n              name: {{ $secretName }}\n              defaultMode: 0444\n    {{- end }}\n    {{- end }}\n    # Desired daemonset name/prefix that helm will register with kubernetes\n    # Note that this needs to be a valid dns-1123 name for a k8s resource\n    {{- $daemonset := \"mydaemonset\" }}\n    # Desired secret name/prefix that helm will register with kubernetes\n    # Note that this needs to be a valid dns-1123 name for a k8s resource\n    {{- $secretName := \"mychart-etc\" }}\n    # Generate the daemonset YAML with a matching/consistent secretName (so\n    # daemonset_overrides knows which volumes to dynamically substitute with the\n    # auto-generated secrets). You may include in this list any other vars\n    # which you need to reference or substitute into the daemonset YAML above.\n    {{- $daemonset_yaml := list $secretName . | include \"mychart.daemonset\" | toString | fromYaml }}\n    # Namespace to the secret definition which will be used/manipulated\n    {{- $secret_include := \"mychart.secret.etc\" }}\n    # Pass all these elements to daemonset_overrides to generate secret/daemonset\n    # pairings for each set of overrides (plus one with no overrides)\n    {{- list $daemonset $daemonset_yaml $secret_include $secretName . | include \"helm-toolkit.utils.daemonset_overrides\" }}\n\nYour daemonset should now support node and nodelabl level overrides. (Note that\nyou will also need your chart to have helm-toolkit listed as a dependency.)\n\nImplementation details of node/nodelabel overrides\n--------------------------------------------------\n\nInstead of having one daemonset with one monolithic secret, this helm-toolkit\nfeature permits a common daemonset and secret template, from which daemonset\nand secret pairings are auto-generated. It supports establishing value\noverrides for nodes with specific label value pairs and for targeting nodes with\nspecific hostnames and hostlabels. The overridden configuration is merged with\nthe normal config data, with the override data taking precedence.\n\nThe chart will then generate one daemonset for each host and label override, in\naddition to a default daemonset for which no overrides are applied. Each\ndaemonset generated will also exclude from its scheduling criteria all other\nhosts and labels defined in other overrides for the same daemonset, to ensure\nthat there is no overlap of daemonsets (i.e., one and only one daemonset of a\ngiven type for each node).\n\nFor example, if you have some special conf setting that should be applied\nto ``host1.fqdn``, and another special conf setting that should be applied\nto nodes labeled with ``someNodeLabel``, then three secret/daemonset pairs\nwill be generated and registered with kubernetes: one for ``host1.fqdn``, one\nfor ``someNodeLabel``, and one for ``default``.\n\nThe order of precedence for matches is FQDN, node label, and then default. If a\nnode matches both a FQDN and a nodelabel, then only the FQDN override is applied.\nPay special attention to adding FQDN overrides for nodes that match a nodelabel\noverride, as you would need to duplicate the nodelabel overrides for that node\nin the FQDN overrides for them to still apply.\n\nIf there is no matching FQDN and no matching nodelabel, then the default\ndaemonset/secret (with no overrides applied) is used.\n\nIf a node matches more than one nodelabel, only the last matching nodelabel will\napply (last in terms of the order the overrides are defined in the YAML).\n\nExercising node/nodelabel overrides\n-----------------------------------\n\nThe following example demonstrates how to exercise the node/nodelabel overrides:\n\n.. code-block:: yaml\n\n    data:\n      values:\n        conf:\n          mychart:\n            foo: 1\n          # \"overrides\" keyword to invoke override behavior\n          overrides:\n            # To match these overrides to the right daemonset, the following key\n            # needs to follow the pattern:\n            # Chart.Name + '_' + $daemonset\n            # where $daemonset is the value set for $daemonset in the daemonset\n            # config above.\n            mychart_mydaemonset:\n              # labels dict contains a list of labels which overrides apply to. Dict may be excluded\n              # if there are no labels to override.\n              # Note - if a host satisfies more than one label in this list, then whichever matching\n              # label is furtherest down on the list will be the one applied to the node. E.g., if\n              # a host matched both label criteria below, then the overrides for \"another_label\"\n              # would be applied.\n              labels:\n                # node label key and values to match against to apply these config overrides.\n                # The values are ORed, so the daemonset will spawn to all nodes to node_type\n                # set to \"foo\" and to all nodes with node_type set to \"bar\".\n              - label:\n                  key: node_type\n                  values:\n                  - \"foo\"\n                  - \"bar\"\n                # The setting overrides that will be applied for hosts with this host label\n                conf:\n                  mychart:\n                    foo: 2\n                # another label/key to match against to apply different overrides\n              - label:\n                  key: another_label\n                  values:\n                  - \"another_value\"\n                # The setting overrides that will be applied for hosts with this host label\n                conf:\n                  mychart:\n                    foo: 3\n              # hosts dict contains a list of hosts which overrides apply to. Dict may be excluded\n              # if there are no hosts to override.\n              hosts:\n                # FQDN of the host to override settings on\n              - name: superhost\n                # The setting overrides that will be applied for this host\n                conf:\n                  mychart:\n                    foo: 4\n                # FQDN of another host to override settings on\n              - name: superhost2\n                # The setting overrides that will be applied for this host\n                conf:\n                  mychart:\n                    foo: 5\n\nNova vcpu example\n------------------\n\nSome nodes may have a different vcpu_pin_set in nova.conf due to differences\nin CPU hardware.\n\nTo address this, we can specify overrides in the values fed to the chart. Ex:\n\n.. code-block:: yaml\n\n    conf:\n      nova:\n        DEFAULT:\n          vcpu_pin_set: \"0-31\"\n          cpu_allocation_ratio: 3.0\n      overrides:\n        nova_compute:\n          labels:\n          - label:\n              key: compute-type\n              values:\n              - \"dpdk\"\n              - \"sriov\"\n            conf:\n              nova:\n                DEFAULT:\n                  vcpu_pin_set: \"0-15\"\n          - label:\n              key: another-label\n              values:\n              - \"another-value\"\n            conf:\n              nova:\n                DEFAULT:\n                  vcpu_pin_set: \"16-31\"\n          hosts:\n          - name: host1.fqdn\n            conf:\n              nova:\n                DEFAULT:\n                  vcpu_pin_set: \"8-15\"\n          - name: host2.fqdn\n            conf:\n              nova:\n                DEFAULT:\n                  vcpu_pin_set: \"16-23\"\n\nNote that only one set of overrides is applied per node, such that:\n\n1. Host overrides supercede label overrides\n2. The farther down the list the label appears, the greater precedence it has.\n   e.g., \"another-label\" overrides will apply to a node containing both labels.\n\nAlso note that other non-overridden values are inherited by hosts and labels with overrides.\nThe following shows a set of example hosts and the values fed into each:\n\n1. ``host1.fqdn`` with labels ``compute-type: dpdk, sriov`` and ``another-label: another-value``:\n\n   .. code-block:: yaml\n\n    conf:\n      nova:\n        DEFAULT:\n          vcpu_pin_set: \"8-15\"\n          cpu_allocation_ratio: 3.0\n\n2. ``host2.fqdn`` with labels ``compute-type: dpdk, sriov`` and ``another-label: another-value``:\n\n   .. code-block:: yaml\n\n    conf:\n      nova:\n        DEFAULT:\n          vcpu_pin_set: \"16-23\"\n          cpu_allocation_ratio: 3.0\n\n3. ``host3.fqdn`` with labels ``compute-type: dpdk, sriov`` and ``another-label: another-value``:\n\n   .. code-block:: yaml\n\n    conf:\n      nova:\n        DEFAULT:\n          vcpu_pin_set: \"16-31\"\n          cpu_allocation_ratio: 3.0\n\n4. ``host4.fqdn`` with labels ``compute-type: dpdk, sriov``:\n\n   .. code-block:: yaml\n\n    conf:\n      nova:\n        DEFAULT:\n          vcpu_pin_set: \"0-15\"\n          cpu_allocation_ratio: 3.0\n\n5. ``host5.fqdn`` with no labels:\n\n   .. code-block:: yaml\n\n    conf:\n      nova:\n        DEFAULT:\n          vcpu_pin_set: \"0-31\"\n          cpu_allocation_ratio: 3.0\n"
  },
  {
    "path": "doc/source/devref/oslo-config.rst",
    "content": "OSLO-Config Values\n------------------\n\nOpenStack-Helm generates oslo-config compatible formatted configuration files for\nservices dynamically from values specified in a yaml tree. This allows operators to\ncontrol any and all aspects of an OpenStack services configuration. An example\nsnippet for an imaginary Keystone configuration is described here:\n\n::\n\n    conf:\n      keystone:\n        DEFAULT: # Keys at this level are used for section headings\n          max_token_size: 255\n        token:\n          provider: fernet\n        fernet_tokens:\n          key_repository: /etc/keystone/fernet-keys/\n        credential:\n          key_repository: /etc/keystone/credential-keys/\n        database:\n          max_retries: -1\n        cache:\n          enabled: true\n          backend: dogpile.cache.memcached\n        oslo_messaging_notifications:\n          driver: # An example of a multistring option's syntax\n            type: multistring\n            values:\n              - messagingv2\n              - log\n        security_compliance:\n          password_expires_ignore_user_ids:\n          # Values in a list will be converted to a comma separated key\n            - \"123\"\n            - \"456\"\n\nThis will be consumed by the templated ``configmap-etc.yaml`` manifest to\nproduce the following config file:\n\n::\n\n    ---\n    # Source: keystone/templates/configmap-etc.yaml\n    apiVersion: v1\n    kind: ConfigMap\n    metadata:\n      name: keystone-etc\n    data:\n      keystone.conf: |\n        [DEFAULT]\n        max_token_size = 255\n        transport_url = rabbit://keystone:password@rabbitmq.default.svc.cluster.local:5672/openstack\n        [cache]\n        backend = dogpile.cache.memcached\n        enabled = true\n        memcache_servers = memcached.default.svc.cluster.local:11211\n        [credential]\n        key_repository = /etc/keystone/credential-keys/\n        [database]\n        connection = mysql+pymysql://keystone:password@mariadb.default.svc.cluster.local:3306/keystone\n        max_retries = -1\n        [fernet_tokens]\n        key_repository = /etc/keystone/fernet-keys/\n        [oslo_messaging_notifications]\n        driver = messagingv2\n        driver = log\n        [security_compliance]\n        password_expires_ignore_user_ids = 123,456\n        [token]\n        provider = fernet\n\nNote that some additional values have been injected into the config file, this is\nperformed via statements in the configmap template, which also calls the\n``helm-toolkit.utils.to_oslo_conf`` to convert the yaml to the required layout:\n\n::\n\n    {{- if empty .Values.conf.keystone.database.connection -}}\n    {{- $_ := tuple \"oslo_db\" \"internal\" \"user\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\"| set .Values.conf.keystone.database \"connection\" -}}\n    {{- end -}}\n\n    {{- if empty .Values.conf.keystone.DEFAULT.transport_url -}}\n    {{- $_ := tuple \"oslo_messaging\" \"internal\" \"user\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | set .Values.conf.keystone.DEFAULT \"transport_url\" -}}\n    {{- end -}}\n\n    {{- if empty .Values.conf.keystone.cache.memcache_servers -}}\n    {{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.keystone.cache \"memcache_servers\" -}}\n    {{- end -}}\n\n    ---\n    apiVersion: v1\n    kind: ConfigMap\n    metadata:\n      name: keystone-etc\n    data:\n      keystone.conf: |\n    {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.keystone | indent 4 }}\n    {{- end }}\n"
  },
  {
    "path": "doc/source/devref/pod-disruption-budgets.rst",
    "content": "Pod Disruption Budgets\n----------------------\n\nOpenStack-Helm leverages PodDisruptionBudgets to enforce quotas\nthat ensure that a certain number of replicas of a pod are available\nat any given time.  This is particularly important in the case when a Kubernetes\nnode needs to be drained.\n\n\nThese quotas are configurable by modifying the ``minAvailable`` field\nwithin each PodDisruptionBudget manifest, which is conveniently mapped\nto a templated variable inside the ``values.yaml`` file.\nThe ``min_available`` within each service's ``values.yaml`` file can be\nrepresented by either a whole number, such as ``1``, or a percentage,\nsuch as ``80%``.  For example, when deploying 5 replicas of a pod (such as\nkeystone-api), using ``min_available: 3`` would enforce policy to ensure at\nleast 3 replicas were running, whereas using ``min_available: 80%`` would ensure\nthat 4 replicas of that pod are running.\n\n**Note:** The values defined in a PodDisruptionBudget may\nconflict with other values that have been provided if an operator chooses to\nleverage Rolling Updates for deployments.  In the case where an\noperator defines a ``maxUnavailable`` and ``maxSurge`` within an update strategy\nthat is higher than a ``minAvailable`` within a pod disruption budget,\na scenario may occur where pods fail to be evicted from a deployment.\n"
  },
  {
    "path": "doc/source/devref/upgrades.rst",
    "content": "Upgrades and Reconfiguration\n----------------------------\n\nThe OpenStack-Helm project assumes all upgrades will be done through\nHelm. This includes handling several different resource types. First,\nchanges to the Helm chart templates themselves are handled. Second, all\nof the resources layered on top of the container image, such as\n``ConfigMaps`` which includes both scripts and configuration files, are\nupdated during an upgrade. Finally, any image references will result in\nrolling updates of containers, replacing them with the updating image.\n\nAs Helm stands today, several issues exist when you update images within\ncharts that might have been used by jobs that already ran to completion\nor are still in flight. An example of where this behavior would be\ndesirable is when an updated db\\_sync image has updated to point from\none openstack release to another. In this case, the operator will likely\nwant a db\\_sync job, which was already run and completed during site\ninstallation, to run again with the updated image to bring the schema\ninline with the Newton release.\n\nThe OpenStack-Helm project also implements annotations across all chart\nconfigmaps so that changing resources inside containers, such as\nconfiguration files, triggers a Kubernetes rolling update. This means\nthat those resources can be updated without deleting and redeploying the\nservice and can be treated like any other upgrade, such as a container\nimage change.\n\nNote: Rolling update values can conflict with values defined in each\nservice's PodDisruptionBudget.  See\n`here <https://docs.openstack.org/openstack-helm/latest/devref/pod-disruption-budgets.html>`_\nfor more information.\n\nThis is accomplished with the following annotation:\n\n::\n\n          ...\n          annotations:\n            configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n            configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n\nThe ``hash`` function defined in the ``helm-toolkit`` chart ensures that\nany change to any file referenced by configmap-bin.yaml or\nconfigmap-etc.yaml results in a new hash, which will then trigger a\nrolling update.\n\nAll ``Deployment`` chart components are outfitted by default\nwith rolling update strategies:\n\n::\n\n    # Source: keystone/templates/deployment-api.yaml\n    spec:\n      replicas: {{ .Values.pod.replicas.api }}\n    {{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }\n\nIn ``values.yaml`` in each chart, the same defaults are supplied in every\nchart, which allows the operator to override at upgrade or deployment\ntime.\n\n::\n\n    pod:\n      lifecycle:\n        upgrades:\n          deployments:\n            revision_history: 3\n            pod_replacement_strategy: RollingUpdate\n            rolling_update:\n              max_unavailable: 1\n              max_surge: 3\n"
  },
  {
    "path": "doc/source/index.rst",
    "content": "Welcome to OpenStack-Helm's documentation!\n==========================================\n\nContents:\n\n.. toctree::\n    :maxdepth: 2\n\n    readme\n    install/index\n    chart/index\n    devref/index\n    testing/index\n    monitoring/index\n    logging/index\n    upgrade/index\n    troubleshooting/index\n    specs/index\n\nIndices and Tables\n==================\n\n* :ref:`genindex`\n* :ref:`search`\n"
  },
  {
    "path": "doc/source/install/before_starting.rst",
    "content": "Before starting\n===============\n\nThe OpenStack-Helm charts are published in the `openstack-helm`_ helm repository.\nLet's enable it:\n\n.. code-block:: bash\n\n    helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm\n\nThe OpenStack-Helm `plugin`_ provides some helper commands used later on.\nSo, let's install it:\n\n.. code-block:: bash\n\n    helm plugin install https://opendev.org/openstack/openstack-helm-plugin\n\n.. _openstack-helm: https://tarballs.opendev.org/openstack/openstack-helm\n.. _plugin: https://opendev.org/openstack/openstack-helm-plugin.git\n"
  },
  {
    "path": "doc/source/install/index.rst",
    "content": "Installation\n============\n\nHere are sections that describe how to install OpenStack using OpenStack-Helm:\n\n.. toctree::\n    :maxdepth: 2\n\n    before_starting\n    kubernetes\n    prerequisites\n    openstack\n"
  },
  {
    "path": "doc/source/install/kubernetes.rst",
    "content": "Kubernetes\n==========\n\nOpenStack-Helm provides charts that can be deployed on any Kubernetes cluster if it meets\nthe version :doc:`requirements </readme>`. However, deploying the Kubernetes cluster itself is beyond\nthe scope of OpenStack-Helm.\n\nYou can use any Kubernetes deployment tool for this purpose. In this guide, we detail how to set up\na Kubernetes cluster using Kubeadm and Ansible. While not production-ready, this cluster is ideal\nas a starting point for lab or proof-of-concept environments.\n\nAll OpenStack projects test their code through an infrastructure managed by the CI\ntool, Zuul, which executes Ansible playbooks on one or more test nodes. Therefore, we employ Ansible\nroles/playbooks to install required packages, deploy Kubernetes, and then execute tests on it.\n\nTo establish a test environment, the Ansible role `deploy-env`_ is employed. This role deploys\na basic single/multi-node Kubernetes cluster, used to prove the functionality of commonly used\ndeployment configurations. The role is compatible with Ubuntu Focal and Ubuntu Jammy distributions.\n\n.. note::\n   The role `deploy-env`_ is not idempotent and assumed to be applied to a clean environment.\n\nClone roles git repositories\n----------------------------\n\nBefore proceeding with the steps outlined in the following sections, it is\nimperative that you clone the git repositories containing the required Ansible roles.\n\n.. code-block:: bash\n\n    mkdir ~/osh\n    cd ~/osh\n    git clone https://opendev.org/openstack/openstack-helm.git\n    git clone https://opendev.org/zuul/zuul-jobs.git\n\nInstall Ansible\n---------------\n\n.. code-block:: bash\n\n    pip install ansible\n\nSet roles lookup path\n---------------------\n\nNow let's set the environment variable ``ANSIBLE_ROLES_PATH`` which specifies\nwhere Ansible will lookup roles\n\n.. code-block:: bash\n\n    export ANSIBLE_ROLES_PATH=~/osh/openstack-helm/roles:~/osh/zuul-jobs/roles\n\nTo avoid setting it every time when you start a new terminal instance you can define this\nin the Ansible configuration file. Please see the Ansible documentation.\n\nPrepare inventory\n-----------------\n\nThe example below assumes that there are four nodes which must be available via\nSSH using the public key authentication and a ssh user (let say ``ubuntu``)\nmust have passwordless sudo on the nodes.\n\n.. code-block:: bash\n\n    cat > ~/osh/inventory.yaml <<EOF\n    ---\n    all:\n      vars:\n        ansible_port: 22\n        ansible_user: ubuntu\n        ansible_ssh_private_key_file: /home/ubuntu/.ssh/id_rsa\n        ansible_ssh_extra_args: -o StrictHostKeyChecking=no\n        # The user and group that will be used to run Kubectl and Helm commands.\n        kubectl:\n          user: ubuntu\n          group: ubuntu\n        # The user and group that will be used to run Docker commands.\n        docker_users:\n          - ununtu\n        # By default the deploy-env role sets up ssh key to make it possible\n        # to connect to the k8s master node via ssh without a password.\n        client_ssh_user: ubuntu\n        cluster_ssh_user: ubuntu\n        # The MetalLB controller will be installed on the Kubernetes cluster.\n        metallb_setup: true\n        # Loopback devices will be created on all cluster nodes which then can be used\n        # to deploy a Ceph cluster which requires block devices to be provided.\n        # Please use loopback devices only for testing purposes. They are not suitable\n        # for production due to performance reasons.\n        loopback_setup: true\n        loopback_device: /dev/loop100\n        loopback_image: /var/lib/openstack-helm/ceph-loop.img\n        loopback_image_size: 12G\n      children:\n        # The primary node where Kubectl and Helm will be installed. If it is\n        # the only node then it must be a member of the groups k8s_cluster and\n        # k8s_control_plane. If there are more nodes then the wireguard tunnel\n        # will be established between the primary node and the k8s_control_plane node.\n        primary:\n          hosts:\n            primary:\n              ansible_host: 10.10.10.10\n        # The nodes where the Kubernetes components will be installed.\n        k8s_cluster:\n          hosts:\n            node-1:\n              ansible_host: 10.10.10.11\n            node-2:\n              ansible_host: 10.10.10.12\n            node-3:\n              ansible_host: 10.10.10.13\n        # The control plane node where the Kubernetes control plane components will be installed.\n        # It must be the only node in the group k8s_control_plane.\n        k8s_control_plane:\n          hosts:\n            node-1:\n              ansible_host: 10.10.10.11\n        # These are Kubernetes worker nodes. There could be zero such nodes.\n        # In this case the Openstack workloads will be deployed on the control plane node.\n        k8s_nodes:\n          hosts:\n            node-2:\n              ansible_host: 10.10.10.12\n            node-3:\n              ansible_host: 10.10.10.13\n    EOF\n\n.. note::\n   If you would like to set up a Kubernetes cluster on the local host,\n   configure the Ansible inventory to designate the ``primary`` node as the local host.\n   For further guidance, please refer to the Ansible documentation.\n\n.. note::\n   The full list of variables that you can define in the inventory file can be found in the\n   file `deploy-env/defaults/main.yaml`_.\n\nPrepare playbook\n----------------\n\nCreate an Ansible playbook that will deploy the environment\n\n.. code-block:: bash\n\n    cat > ~/osh/deploy-env.yaml <<EOF\n    ---\n    - hosts: all\n      become: true\n      gather_facts: true\n      roles:\n        - ensure-python\n        - ensure-pip\n        - clear-firewall\n        - deploy-env\n    EOF\n\nRun the playbook\n-----------------\n\n.. code-block:: bash\n\n    cd ~/osh\n    ansible-playbook -i inventory.yaml deploy-env.yaml\n\nThe playbook only changes the state of the nodes listed in the inventory file.\n\nIt installs necessary packages, deploys and configures Containerd and Kubernetes. For\ndetails please refer to the role `deploy-env`_ and other roles (`ensure-python`_,\n`ensure-pip`_, `clear-firewall`_) used in the playbook.\n\n.. note::\n   The role `deploy-env`_ configures cluster nodes to use Google DNS servers (8.8.8.8).\n\n   By default, it also configures internal Kubernetes DNS server (Coredns) to work\n   as a recursive DNS server and adds its IP address (10.96.0.10 by default) to the\n   ``/etc/resolv.conf`` file.\n\n   Processes running on the cluster nodes will be able to resolve internal\n   Kubernetes domain names ``*.svc.cluster.local``.\n\n.. _deploy-env: https://opendev.org/openstack/openstack-helm/src/branch/master/roles/deploy-env\n.. _deploy-env/defaults/main.yaml: https://opendev.org/openstack/openstack-helm/src/branch/master/roles/deploy-env/defaults/main.yaml\n.. _zuul-jobs: https://opendev.org/zuul/zuul-jobs.git\n.. _ensure-python: https://opendev.org/zuul/zuul-jobs/src/branch/master/roles/ensure-python\n.. _ensure-pip: https://opendev.org/zuul/zuul-jobs/src/branch/master/roles/ensure-pip\n.. _clear-firewall: https://opendev.org/zuul/zuul-jobs/src/branch/master/roles/clear-firewall\n.. _openstack-helm: https://opendev.org/openstack/openstack-helm.git\n"
  },
  {
    "path": "doc/source/install/openstack.rst",
    "content": "Deploy OpenStack\n================\n\nCheck list before deployment\n----------------------------\n\nAt this point we assume all the prerequisites listed below are met:\n\n- Kubernetes cluster is up and running.\n- `kubectl`_ and `helm`_ command line tools are installed and\n  configured to access the cluster.\n- The OpenStack-Helm repositories are enabled, OpenStack-Helm\n  plugin is installed and necessary environment variables are set.\n- The ``openstack`` namespace is created.\n- MetalLB is deployed. A Gateway API controller is installed and a\n  ``Gateway`` resource is created. The controller will provision a\n  ``LoadBalancer`` service automatically. DNS is configured to resolve\n  the OpenStack endpoint names to the external IP of that service.\n  We recommend `Envoy Gateway`_ as the Gateway API implementation.\n- Ceph is deployed and enabled for using by OpenStack-Helm.\n\n.. _Envoy Gateway: https://gateway.envoyproxy.io/\n\n.. note::\n\n    The recommended way to expose OpenStack services externally is through\n    the `Kubernetes Gateway API`_. The gateway controller (e.g. Envoy Gateway)\n    is deployed in its own namespace (``envoy-gateway-system`` for Envoy Gateway)\n    and creates a ``LoadBalancer`` service backed by MetalLB. Traffic is routed\n    to backend services via ``HTTPRoute`` resources.\n\n    How exactly users expose their workloads may vary. ``HTTPRoute`` objects\n    and additional ``Service`` resources can be added to any chart via the\n    ``.Values.extraObjects`` field available in all OpenStack-Helm charts.\n    For an example see ``values_overrides/nova/gateway.yaml``.\n\n    Legacy ``Ingress`` resources are still supported.\n\n.. _Kubernetes Gateway API: https://gateway-api.sigs.k8s.io/\n\n.. _kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/\n.. _helm: https://helm.sh/docs/intro/install/\n\n\nEnvironment variables\n---------------------\n\nFirst let's set environment variables that are later used in the subsequent sections:\n\n.. code-block:: bash\n\n    export OPENSTACK_RELEASE=2025.1\n    # Features enabled for the deployment. This is used to look up values overrides.\n    export FEATURES=\"${OPENSTACK_RELEASE} ubuntu_noble\"\n    # Directory where values overrides are looked up or downloaded to.\n    export OVERRIDES_DIR=$(pwd)/overrides\n\nGet values overrides\n--------------------\n\nOpenStack-Helm provides values overrides for predefined feature sets and various\nOpenStack/platform versions. The overrides are stored in the OpenStack-Helm\ngit repository and OpenStack-Helm plugin provides a command to look them up\nlocally and download (optional) if not found.\n\nPlease read the help:\n\n.. code-block:: bash\n\n    helm osh get-values-overrides --help\n\nFor example, if you pass the feature set ``2025.1 ubuntu_noble`` it will try to\nlook up the following files:\n\n.. code-block:: bash\n\n    2025.1.yaml\n    ubuntu_noble.yaml\n    2025.1-ubuntu_noble.yaml\n\nLet's download the values overrides for the feature set defined above:\n\n.. code-block:: bash\n\n    OVERRIDES_URL=https://opendev.org/openstack/openstack-helm/raw/branch/master/values_overrides\n    for chart in rabbitmq mariadb memcached openvswitch libvirt keystone heat glance cinder trove placement nova neutron horizon; do\n        helm osh get-values-overrides -d -u ${OVERRIDES_URL} -p ${OVERRIDES_DIR} -c ${chart} ${FEATURES}\n    done\n\nNow you can inspect the downloaded files in the ``${OVERRIDES_DIR}`` directory and\nadjust them if needed.\n\nOpenStack backend\n-----------------\n\nOpenStack is a cloud computing platform that consists of a variety of\nservices, and many of these services rely on backend services like RabbitMQ,\nMariaDB, and Memcached for their proper functioning. These backend services\nplay crucial role in OpenStack architecture.\n\nRabbitMQ\n~~~~~~~~\nRabbitMQ is a message broker that is often used in OpenStack to handle\nmessaging between different components and services. It helps in managing\ncommunication and coordination between various parts of the OpenStack\ninfrastructure. Services like Nova (compute), Neutron (networking), and\nCinder (block storage) use RabbitMQ to exchange messages and ensure\nproper orchestration.\n\nUse the following script to deploy RabbitMQ service:\n\n.. code-block:: bash\n\n    helm upgrade --install rabbitmq openstack-helm/rabbitmq \\\n        --namespace=openstack \\\n        --set pod.replicas.server=1 \\\n        --timeout=600s \\\n        $(helm osh get-values-overrides -p ${OVERRIDES_DIR} -c rabbitmq ${FEATURES})\n\n    helm osh wait-for-pods openstack\n\nMariaDB\n~~~~~~~\nDatabase services like MariaDB are used as a backend database for majority of\nOpenStack projects. These databases store critical information such as user\ncredentials, service configurations, and data related to instances, networks,\nand volumes. Services like Keystone (identity), Nova, Glance (image), and\nCinder rely on MariaDB for data storage.\n\n.. code-block:: bash\n\n    helm upgrade --install mariadb openstack-helm/mariadb \\\n        --namespace=openstack \\\n        --set pod.replicas.server=1 \\\n        $(helm osh get-values-overrides -p ${OVERRIDES_DIR} -c mariadb ${FEATURES})\n\n    helm osh wait-for-pods openstack\n\nMemcached\n~~~~~~~~~\nMemcached is a distributed memory object caching system that is often used\nin OpenStack to improve performance. OpenStack services cache frequently\naccessed data in Memcached, which helps in faster\ndata retrieval and reduces the load on the database backend.\n\n.. code-block:: bash\n\n    helm upgrade --install memcached openstack-helm/memcached \\\n        --namespace=openstack \\\n        $(helm osh get-values-overrides -p ${OVERRIDES_DIR} -c memcached ${FEATURES})\n\n    helm osh wait-for-pods openstack\n\nOpenStack\n---------\n\nNow we are ready for the deployment of OpenStack components.\nSome of them are mandatory while others are optional.\n\nKeystone\n~~~~~~~~\n\nOpenStack Keystone is the identity and authentication service\nfor the OpenStack cloud computing platform. It serves as the\ncentral point of authentication and authorization, managing user\nidentities, roles, and access to OpenStack resources. Keystone\nensures secure and controlled access to various OpenStack services,\nmaking it an integral component for user management and security\nin OpenStack deployments.\n\nThis is a ``mandatory`` component of any OpenStack cluster.\n\nTo deploy the Keystone service run the following:\n\n.. code-block:: bash\n\n    helm upgrade --install keystone openstack-helm/keystone \\\n        --namespace=openstack \\\n        $(helm osh get-values-overrides -p ${OVERRIDES_DIR} -c keystone ${FEATURES})\n\n    helm osh wait-for-pods openstack\n\nHeat\n~~~~\n\nOpenStack Heat is an orchestration service that provides templates\nand automation for deploying and managing cloud resources. It enables\nusers to define infrastructure as code, making it easier to create\nand manage complex environments in OpenStack through templates and\nautomation scripts.\n\nHere are the commands for the deployment of Heat service.\n\n.. code-block:: bash\n\n    helm upgrade --install heat openstack-helm/heat \\\n        --namespace=openstack \\\n        $(helm osh get-values-overrides -p ${OVERRIDES_DIR} -c heat ${FEATURES})\n\n    helm osh wait-for-pods openstack\n\nGlance\n~~~~~~\n\nOpenStack Glance is the image service component of OpenStack.\nIt manages and catalogs virtual machine images, such as operating\nsystem images and snapshots, making them available for use in\nOpenStack compute instances.\n\nThis is a ``mandatory`` component.\n\nThe Glance deployment commands are as follows:\n\n.. code-block:: bash\n\n    tee ${OVERRIDES_DIR}/glance/glance_pvc_storage.yaml <<EOF\n    storage: pvc\n    volume:\n      class_name: general\n      size: 10Gi\n    EOF\n\n    helm upgrade --install glance openstack-helm/glance \\\n        --namespace=openstack \\\n        $(helm osh get-values-overrides -p ${OVERRIDES_DIR} -c glance glance_pvc_storage ${FEATURES})\n\n    helm osh wait-for-pods openstack\n\n.. note::\n\n    In the above we prepare a values override file for ``glance`` chart which\n    makes it use a Persistent Volume Claim (PVC) for storing images. We put\n    the values in the ``${OVERRIDES_DIR}/glance/glance_pvc_storage.yaml``\n    so the OpenStack-Helm plugin can pick it up if we pass the feature\n    ``glance_pvc_storage`` to it.\n\nCinder\n~~~~~~\n\nOpenStack Cinder is the block storage service component of the\nOpenStack cloud computing platform. It manages and provides persistent\nblock storage to virtual machines, enabling users to attach and detach\npersistent storage volumes to their VMs as needed.\n\nTo deploy the OpenStack Cinder use the following\n\n.. code-block:: bash\n\n    helm upgrade --install cinder openstack-helm/cinder \\\n        --namespace=openstack \\\n        --timeout=600s \\\n        $(helm osh get-values-overrides -p ${OVERRIDES_DIR} -c cinder ${FEATURES})\n\n    helm osh wait-for-pods openstack\n\nCompute kit backend: Openvswitch and Libvirt\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nOpenStack-Helm recommends using OpenvSwitch as the networking backend\nfor the OpenStack cloud. OpenvSwitch is a software-based, open-source\nnetworking solution that provides virtual switching capabilities.\n\nTo deploy the OpenvSwitch service use the following:\n\n.. code-block:: bash\n\n    helm upgrade --install openvswitch openstack-helm/openvswitch \\\n        --namespace=openstack \\\n        $(helm osh get-values-overrides -p ${OVERRIDES_DIR} -c openvswitch ${FEATURES})\n\n    helm osh wait-for-pods openstack\n\nLibvirt is a toolkit that provides a common API for managing virtual\nmachines. It is used in OpenStack to interact with hypervisors like\nKVM, QEMU, and Xen.\n\nLet's deploy the Libvirt service using the following command:\n\n.. code-block:: bash\n\n    helm upgrade --install libvirt openstack-helm/libvirt \\\n        --namespace=openstack \\\n        --set conf.ceph.enabled=true \\\n        $(helm osh get-values-overrides -p ${OVERRIDES_DIR} -c libvirt ${FEATURES})\n\n.. note::\n    Here we don't need to run ``helm osh wait-for-pods`` because the Libvirt pods\n    depend on Neutron OpenvSwitch agent pods which are not yet deployed.\n\nCompute kit: Placement, Nova, Neutron\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nOpenStack Placement is a service that helps manage and allocate\nresources in an OpenStack cloud environment. It helps Nova (compute)\nfind and allocate the right resources (CPU, memory, etc.)\nfor virtual machine instances.\n\n.. code-block:: bash\n\n    helm upgrade --install placement openstack-helm/placement \\\n        --namespace=openstack \\\n        $(helm osh get-values-overrides -p ${OVERRIDES_DIR} -c placement ${FEATURES})\n\nOpenStack Nova is the compute service responsible for managing\nand orchestrating virtual machines in an OpenStack cloud.\nIt provisions and schedules instances, handles their lifecycle,\nand interacts with underlying hypervisors.\n\n.. code-block:: bash\n\n    helm upgrade --install nova openstack-helm/nova \\\n        --namespace=openstack \\\n        --set bootstrap.wait_for_computes.enabled=true \\\n        --set conf.ceph.enabled=true \\\n        $(helm osh get-values-overrides -p ${OVERRIDES_DIR} -c nova ${FEATURES})\n\nOpenStack Neutron is the networking service that provides network\nconnectivity and enables users to create and manage network resources\nfor their virtual machines and other services.\n\n.. code-block:: bash\n\n    PROVIDER_INTERFACE=<provider_interface_name>\n    tee ${OVERRIDES_DIR}/neutron/neutron_simple.yaml << EOF\n    conf:\n      neutron:\n        DEFAULT:\n          l3_ha: False\n          max_l3_agents_per_router: 1\n      # <provider_interface_name> will be attached to the br-ex bridge.\n      # The IP assigned to the interface will be moved to the bridge.\n      auto_bridge_add:\n        br-ex: ${PROVIDER_INTERFACE}\n      plugins:\n        ml2_conf:\n          ml2_type_flat:\n            flat_networks: public\n        openvswitch_agent:\n          ovs:\n            bridge_mappings: public:br-ex\n    EOF\n\n    helm upgrade --install neutron openstack-helm/neutron \\\n        --namespace=openstack \\\n        $(helm osh get-values-overrides -p ${OVERRIDES_DIR} -c neutron neutron_simple ${FEATURES})\n\n    helm osh wait-for-pods openstack\n\nHorizon\n~~~~~~~\n\nOpenStack Horizon is the web application that is intended to provide a graphic\nuser interface to Openstack services.\n\nLet's deploy it:\n\n.. code-block:: bash\n\n    helm upgrade --install horizon openstack-helm/horizon \\\n        --namespace=openstack \\\n        $(helm osh get-values-overrides -p ${OVERRIDES_DIR} -c horizon ${FEATURES})\n\n    helm osh wait-for-pods openstack\n\nOpenStack client\n----------------\n\nInstalling the OpenStack client on the developer's machine is a vital step.\nThe easiest way to install the OpenStack client is to create a Python\nvirtual environment and install the client using ``pip``.\n\n.. code-block:: bash\n\n    python3 -m venv ~/openstack-client\n    source ~/openstack-client/bin/activate\n    pip install python-openstackclient\n\nNow let's prepare the OpenStack client configuration file:\n\n.. code-block:: bash\n\n    mkdir -p ~/.config/openstack\n    tee ~/.config/openstack/clouds.yaml << EOF\n    clouds:\n      openstack_helm:\n        region_name: RegionOne\n        identity_api_version: 3\n        auth:\n          username: 'admin'\n          password: 'password'\n          project_name: 'admin'\n          project_domain_name: 'default'\n          user_domain_name: 'default'\n          auth_url: 'http://keystone.openstack.svc.cluster.local/v3'\n\nThat is it! Now you can use the OpenStack client. Try to run this:\n\n.. code-block:: bash\n\n    openstack --os-cloud openstack_helm endpoint list\n\n.. note::\n\n    In some cases it is more convenient to use the OpenStack client\n    inside a Docker container. OpenStack-Helm provides the\n    `quay.io/airshipit/openstack-client`_ image. The below is an example\n    of how to use it.\n\n\n.. code-block:: bash\n\n    docker run -it --rm --network host \\\n        -v ~/.config/openstack/clouds.yaml:/etc/openstack/clouds.yaml \\\n        -e OS_CLOUD=openstack_helm \\\n        quay.io/airshipit/openstack-client:${OPENSTACK_RELEASE}-ubuntu_jammy \\\n        openstack endpoint list\n\nRemember that the container file system is ephemeral and is destroyed\nwhen you stop the container. So if you would like to use the\nOpenstack client capabilities interfacing with the file system then you have to mount\na directory from the host file system where necessary files are located.\nFor example, this is useful when you create a key pair and save the private key in a file\nwhich is then used for ssh access to VMs. Or it could be Heat templates\nwhich you prepare in advance and then use with Openstack client.\n\nFor convenience, you can create an executable entry point that runs the\nOpenstack client in a Docker container. See for example `setup-client.sh`_.\n\n.. _setup-client.sh: https://opendev.org/openstack/openstack-helm/src/branch/master/tools/deployment/common/setup-client.sh\n.. _quay.io/airshipit/openstack-client: https://quay.io/repository/airshipit/openstack-client?tab=tags&tag=latest\n\n\nOther Openstack components (optional)\n-------------------------------------\n\nBarbican\n~~~~~~~~\n\nOpenStack Barbican is a component within the OpenStack ecosystem that\nprovides secure storage, provisioning, and management of secrets,\nsuch as encryption keys, certificates, and passwords.\n\nIf you want other OpenStack services to use Barbican for secret management,\nyou'll need to reconfigure those services to integrate with Barbican.\nEach OpenStack service has its own configuration settings\nthat need to be updated.\n\n.. code-block:: bash\n\n    helm upgrade --install barbican openstack-helm/barbican \\\n        --namespace=openstack \\\n        $(helm osh get-values-overrides -p ${OVERRIDES_DIR} -c barbican ${FEATURES})\n\n    helm osh wait-for-pods openstack\n\nTacker\n~~~~~~\n\nTacker is an OpenStack service for NFV Orchestration with a general\npurpose VNF Manager to deploy and operate Virtual Network Functions\n(VNFs) and Network Services on an NFV Platform. It is based on ETSI MANO\nArchitectural Framework and provides OpenStack's NFV Orchestration API.\n\n.. note::\n\n    Barbican must be installed before Tacker, as it is a necessary component for\n    Tacker's installation.\n\nTo deploy the OpenStack Tacker, use the following:\n\n.. code-block:: bash\n\n    helm upgrade --install tacker openstack-helm/tacker \\\n        --namespace=openstack \\\n        $(helm osh get-values-overrides -p ${OVERRIDES_DIR} -c tacker ${FEATURES})\n\n    helm osh wait-for-pods openstack\n\nFor comprehensive instructions on installing Tacker using Openstack Helm,\nplease refer `Install Tacker via Openstack Helm`_.\n\n.. _Install Tacker via Openstack Helm: https://docs.openstack.org/tacker/latest/install/openstack_helm.html\n\nTrove\n~~~~~\n\nOpenStack Trove is the Database as a Service (DBaaS) component of the\nOpenStack cloud computing platform. It provides scalable and reliable\ncloud database services, allowing users to provision and manage database\ninstances without the complexity of handling database administration tasks.\nTrove supports multiple database engines including MySQL, PostgreSQL,\nMongoDB, and others.\n\nTo deploy the OpenStack Trove use the following\n\n.. code-block:: bash\n\n    helm upgrade --install trove openstack-helm/trove \\\n        --namespace=openstack \\\n        --timeout=600s \\\n        $(helm osh get-values-overrides -p ${OVERRIDES_DIR} -c trove ${FEATURES})\n\n    helm osh wait-for-pods openstack\n\nBlazar\n~~~~~~\n\nBlazar is the resource reservation service for OpenStack. It provides a way to reserve\nresources such as compute hosts, servers and floating IPs for future use.\n\nTo deploy the Blazar service run the following:\n\n.. code-block:: bash\n\n    helm upgrade --install blazar openstack-helm/blazar \\\n        --namespace=openstack \\\n        $(helm osh get-values-overrides -p ${OVERRIDES_DIR} -c blazar ${FEATURES})\n\n    helm osh wait-for-pods openstack\n\nFreezer\n~~~~~~~\n\nFreezer is a disaster recovery and backup-as-a-service component for OpenStack.\nIt provides a way to back up various resources, such as virtual machine instances,\ndatabases, and file systems.\n\nIt allows users to schedule backups, restore data, and manage the lifecycle of their\nbackups to ensure data protection and business continuity within an OpenStack cloud.\n\nTo deploy the OpenStack Freezer, use the following:\n\n.. code-block:: bash\n\n    helm upgrade --install freezer openstack-helm/freezer \\\n        --namespace=openstack \\\n        $(helm osh get-values-overrides -p ${OVERRIDES_DIR} -c freezer ${FEATURES})\n\nZaqar\n~~~~~\n\nZaqar is the messaging service for OpenStack. It provides a multi-tenant, RESTful and\nWebSocket-based message queue service that allows applications and services to communicate\nasynchronously.\n\nTo deploy the Zaqar service use the following:\n\n.. code-block:: bash\n\n    helm upgrade --install zaqar openstack-helm/zaqar \\\n        --namespace=openstack \\\n        $(helm osh get-values-overrides -p ${OVERRIDES_DIR} -c zaqar ${FEATURES})\n\n    helm osh wait-for-pods openstack\n"
  },
  {
    "path": "doc/source/install/prerequisites.rst",
    "content": "Kubernetes prerequisites\n========================\n\nGateway API\n-----------\n\nThe `Kubernetes Gateway API`_ is the recommended way to expose OpenStack\nservices externally. It provides an expressive and extensible routing model.\nWe recommend using `Envoy Gateway`_ as the Gateway API implementation.\n\nBelow we describe how we deploy the Gateway API and Envoy Gateway in the cluster on\ntest clusters.\n\nFirst, install the Gateway API CRDs and `Envoy Gateway`_:\n\n.. code-block:: bash\n\n    helm install eg oci://docker.io/envoyproxy/gateway-helm \\\n      --version v1.7.0 \\\n      --namespace envoy-gateway-system \\\n      --create-namespace\n\nNext, create the ``EnvoyProxy`` custom resource, the ``Gateway``.\nThe ``EnvoyProxy`` tells Envoy Gateway\nhow to configure the data-plane pods and the ``LoadBalancer`` service\n(backed by MetalLB, see below). The ``Gateway`` defines the listener that\naccepts traffic. The gateway controller will automatically create a\n``LoadBalancer`` service.\n\n.. code-block:: bash\n\n    GATEWAY_IP=<metallb_ip_for_gateway>\n\n    tee > /tmp/gatewayapi_envoy_default.yaml <<EOF\n    ---\n    apiVersion: gateway.envoyproxy.io/v1alpha1\n    kind: EnvoyProxy\n    metadata:\n      name: gateway-proxy-default\n      namespace: envoy-gateway-system\n    spec:\n      provider:\n        type: Kubernetes\n        kubernetes:\n          envoyService:\n            type: LoadBalancer\n            externalTrafficPolicy: Cluster\n            annotations:\n              metallb.universe.tf/loadBalancerIPs: \"${GATEWAY_IP}\"\n            patch:\n              type: StrategicMerge\n              value:\n                spec:\n                  externalTrafficPolicy: Cluster\n    ---\n    apiVersion: gateway.networking.k8s.io/v1\n    kind: Gateway\n    metadata:\n      name: gateway-default\n      namespace: envoy-gateway-system\n    spec:\n      gatewayClassName: default\n      infrastructure:\n        parametersRef:\n          group: gateway.envoyproxy.io\n          kind: EnvoyProxy\n          name: gateway-proxy-default\n      listeners:\n        - name: http\n          protocol: HTTP\n          port: 80\n          allowedRoutes:\n            namespaces:\n              from: All\n    EOF\n    kubectl apply -f /tmp/gatewayapi_envoy_default.yaml\n\nKubernetes workloads that need to reach public OpenStack endpoints\n(e.g. Octavia workers calling the Nova API) send requests to the cluster\nCoredns service which by default forwards requests to the DNS server\nconfigured in the /etc/resolv.conf file on the cluster nodes. We configure\nthe cluster nodes to use the Dnsmasq running on the control plane node\nas the default nameserver. The Dnsmasq is configured to resolve\nthe Openstack public names to the LB IP.\n\nHow exactly users expose their workloads may vary. ``HTTPRoute`` objects\ncan be added to any OpenStack-Helm chart via\nthe ``.Values.extraObjects`` field. For a complete example see\n``values_overrides/nova/gateway.yaml``.\n\n.. _Kubernetes Gateway API: https://gateway-api.sigs.k8s.io/\n.. _Envoy Gateway: https://gateway.envoyproxy.io/\n\n.. image:: OSH_GatewayAPI.svg\n    :width: 100%\n    :align: center\n    :alt: Gateway API traffic flow\n\n.. note::\n    Legacy ``Ingress`` resources are still supported. If you prefer to use an\n    ingress controller instead of the Gateway API, deploy one in the\n    ``openstack`` namespace with pods labeled ``app: ingress-api``.\n\n\nMetalLB\n-------\n\nMetalLB is a load-balancer for bare metal Kubernetes clusters levereging\nL2/L3 protocols. This is a popular way of exposing the web\napplications running in Kubernetes to the external world.\n\nThe following commands can be used to deploy MetalLB:\n\n.. code-block:: bash\n\n    tee > /tmp/metallb_system_namespace.yaml <<EOF\n    apiVersion: v1\n    kind: Namespace\n    metadata:\n      name: metallb-system\n    EOF\n    kubectl apply -f /tmp/metallb_system_namespace.yaml\n\n    helm repo add metallb https://metallb.github.io/metallb\n    helm install metallb metallb/metallb -n metallb-system\n\nNow it is necessary to configure the MetalLB IP address pool and the IP address\nadvertisement. The MetalLB custom resources are used for this:\n\n.. code-block:: bash\n\n    tee > /tmp/metallb_ipaddresspool.yaml <<EOF\n    ---\n    apiVersion: metallb.io/v1beta1\n    kind: IPAddressPool\n    metadata:\n        name: public\n        namespace: metallb-system\n    spec:\n        addresses:\n        - \"172.24.128.0/24\"\n    EOF\n\n    kubectl apply -f /tmp/metallb_ipaddresspool.yaml\n\n    tee > /tmp/metallb_l2advertisement.yaml <<EOF\n    ---\n    apiVersion: metallb.io/v1beta1\n    kind: L2Advertisement\n    metadata:\n        name: public\n        namespace: metallb-system\n    spec:\n        ipAddressPools:\n        - public\n    EOF\n\n    kubectl apply -f /tmp/metallb_l2advertisement.yaml\n\nNext, let's create a service of type ``LoadBalancer`` which will the\npublic endpoint for all OpenStack services that we will later deploy.\nThe MetalLB will assign an IP address to it (we can assinged a dedicated\nIP using annotations):\n\n.. code-block:: bash\n\n    tee > /tmp/openstack_endpoint_service.yaml <<EOF\n    ---\n    kind: Service\n    apiVersion: v1\n    metadata:\n      name: public-openstack\n      namespace: openstack\n      annotations:\n        metallb.universe.tf/loadBalancerIPs: \"172.24.128.100\"\n    spec:\n      externalTrafficPolicy: Cluster\n      type: LoadBalancer\n      selector:\n        app: ingress-api\n      ports:\n        - name: http\n          port: 80\n        - name: https\n          port: 443\n    EOF\n\n    kubectl apply -f /tmp/openstack_endpoint_service.yaml\n\nThis service will redirect the traffic to the ingress controller pods\n(see the ``app: ingress-api`` selector). OpenStack-Helm charts create\n``Ingress`` resources which are used by the ingress controller to configure the\nreverse proxy backend so that the traffic eventually goes to particular\nOpenstack API pods.\n\nBy default, the ``Ingress`` objects will only contain rules for the\n``openstack.svc.cluster.local`` DNS domain. This is the internal Kubernetes domain\nand it is not supposed to be used outside the cluster.\n\nYou can use the ``host_fqdn_override`` for the endpoints to set an alternate\nhostname using a service like `sslip.io`_. Assuming your services are exposed\nat ``172.24.128.100`` as is referenced in the Service above, you could use\n``<service>.172-24-128-100.sslip.io``\n\nHere is an example of how to set the ``host_fqdn_override`` for the Keystone chart:\n\n.. code-block:: yaml\n\n    endpoints:\n      identity:\n        host_fqdn_override:\n          public:\n            host: \"keystone.172-24-128-100.sslip.io\"\n\n.. note::\n    In production environments you probably choose to use a different DNS\n    domain for public OpenStack endpoints. This is easy to achieve by setting\n    the necessary chart values. All Openstack-Helm charts values have the\n    ``endpoints`` section where you can specify the ``host_fqdn_override``.\n    In this case a chart will create additional ``Ingress`` resources to\n    handle the external domain name and also the Keystone endpoint catalog\n    will be updated.\n\n.. _sslip.io: https://sslip.io/\n\nCeph\n----\n\nCeph is a highly scalable and fault-tolerant distributed storage\nsystem. It offers object storage, block storage, and\nfile storage capabilities, making it a versatile solution for\nvarious storage needs.\n\nKubernetes CSI (Container Storage Interface) allows storage providers\nlike Ceph to implement their drivers, so that Kubernetes can\nuse the CSI driver to provision and manage volumes which can be\nused by stateful applications deployed on top of Kubernetes\nto store their data. In the context of OpenStack running in Kubernetes,\nthe Ceph is used as a storage backend for services like MariaDB, RabbitMQ and\nother services that require persistent storage. By default OpenStack-Helm\nstateful sets expect to find a storage class named **general**.\n\nAt the same time, Ceph provides the RBD API, which applications\ncan utilize directly to create and mount block devices distributed across\nthe Ceph cluster. For example the OpenStack Cinder utilizes this Ceph\ncapability to offer persistent block devices to virtual machines\nmanaged by the OpenStack Nova.\n\nThe recommended way to manage Ceph on top of Kubernetes is by means\nof the `Rook`_ operator. The Rook project provides the Helm chart\nto deploy the Rook operator which extends the Kubernetes API\nadding CRDs that enable managing Ceph clusters via Kuberntes custom objects.\nThere is also another Helm chart that facilitates deploying Ceph clusters\nusing Rook custom resources.\n\nFor details please refer to the `Rook`_ documentation and the `charts`_.\n\n.. note::\n    The following script `ceph-rook.sh`_ (recommended for testing only) can be used as\n    an example of how to deploy the Rook Ceph operator and a Ceph cluster using the\n    Rook `charts`_. Please note that the script places Ceph OSDs on loopback devices\n    which is **not recommended** for production. The loopback devices must exist before\n    using this script.\n\nOnce the Ceph cluster is deployed, the next step is to enable using it\nfor services depoyed by OpenStack-Helm charts. The ``ceph-adapter-rook`` chart\nprovides the necessary functionality to do this. The chart will\nprepare Kubernetes secret resources containing Ceph client keys/configs\nthat are later used to interface with the Ceph cluster.\n\nHere we assume the Ceph cluster is deployed in the ``ceph`` namespace.\n\n.. code-block:: bash\n\n    helm upgrade --install ceph-adapter-rook openstack-helm/ceph-adapter-rook \\\n        --namespace=openstack\n\n    helm osh wait-for-pods openstack\n\n.. _Rook: https://rook.io/\n.. _charts: https://rook.io/docs/rook/latest-release/Helm-Charts/helm-charts/\n.. _ceph-rook.sh: https://opendev.org/openstack/openstack-helm/src/branch/master/tools/deployment/ceph/ceph-rook.sh\n\nNode labels\n-----------\n\nOpenstack-Helm charts rely on Kubernetes node labels to determine which nodes\nare suitable for running specific OpenStack components.\n\nThe following sets labels on all the Kubernetes nodes in the cluster\nincluding control plane nodes but you can choose to label only a subset of nodes\nwhere you want to run OpenStack:\n\n.. code-block::\n\n    kubectl label --overwrite nodes --all openstack-control-plane=enabled\n    kubectl label --overwrite nodes --all openstack-compute-node=enabled\n    kubectl label --overwrite nodes --all openvswitch=enabled\n    kubectl label --overwrite nodes --all linuxbridge=enabled\n\n.. note::\n    The control plane nodes are tainted by default to prevent scheduling\n    of pods on them. You can untaint the control plane nodes using the following command:\n\n.. code-block:: bash\n\n    kubectl taint nodes -l 'node-role.kubernetes.io/control-plane' node-role.kubernetes.io/control-plane-\n"
  },
  {
    "path": "doc/source/logging/elasticsearch.rst",
    "content": "Elasticsearch\n=============\n\nThe Elasticsearch chart in openstack-helm provides a distributed data\nstore to index and analyze logs generated from the OpenStack-Helm services.\nThe chart contains templates for:\n\n- Elasticsearch client nodes\n- Elasticsearch data nodes\n- Elasticsearch master nodes\n- An Elasticsearch exporter for providing cluster metrics to Prometheus\n- A cronjob for Elastic Curator to manage data indices\n\nAuthentication\n--------------\n\nThe Elasticsearch deployment includes a sidecar container that runs an Apache\nreverse proxy to add authentication capabilities for Elasticsearch.  The\nusername and password are configured under the Elasticsearch entry in the\nendpoints section of the chart's values.yaml.\n\nThe configuration for Apache can be found under the conf.httpd key, and uses a\nhelm-toolkit function that allows for including gotpl entries in the template\ndirectly.  This allows the use of other templates, like the endpoint lookup\nfunction templates, directly in the configuration for Apache.\n\nElasticsearch Service Configuration\n-----------------------------------\n\nThe Elasticsearch service configuration file can be modified with a combination\nof pod environment variables and entries in the values.yaml file.  Elasticsearch\ndoes not require much configuration out of the box, and the default values for\nthese configuration settings are meant to provide a highly available cluster by\ndefault.\n\nThe vital entries in this configuration file are:\n\n- path.data:  The path at which to store the indexed data\n- path.repo:  The location of any snapshot repositories to backup indexes\n- bootstrap.memory_lock:  Ensures none of the JVM is swapped to disk\n- discovery.zen.minimum_master_nodes:  Minimum required masters for the cluster\n\nThe bootstrap.memory_lock entry ensures none of the JVM will be swapped to disk\nduring execution, and setting this value to false will negatively affect the\nhealth of your Elasticsearch nodes.  The discovery.zen.minimum_master_nodes flag\nregisters the minimum number of masters required for your Elasticsearch cluster\nto register as healthy and functional.\n\nTo read more about Elasticsearch's configuration file, please see the official\ndocumentation_.\n\n.. _documentation: https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html\n\nElastic Curator\n---------------\n\nThe Elasticsearch chart contains a cronjob to run Elastic Curator at specified\nintervals to manage the lifecycle of your indices.  Curator can perform:\n\n- Take and send a snapshot of your indexes to a specified snapshot repository\n- Delete indexes older than a specified length of time\n- Restore indexes with previous index snapshots\n- Reindex an index into a new or preexisting index\n\nThe full list of supported Curator actions can be found in the actions_ section of\nthe official Curator documentation.  The list of options available for those\nactions can be found in the options_ section of the Curator documentation.\n\n.. _actions: https://www.elastic.co/guide/en/elasticsearch/client/curator/current/actions.html\n.. _options: https://www.elastic.co/guide/en/elasticsearch/client/curator/current/options.html\n\nCurator's configuration is handled via entries in Elasticsearch's values.yaml\nfile and must be overridden to achieve your index lifecycle management\nneeds.  Please note that any unused field should be left blank, as an entry of\n\"None\" will result in an exception, as Curator will read it as a Python NoneType\ninsead of a value of None.\n\nThe section for Curator's service configuration can be found at:\n\n::\n\n    conf:\n      curator:\n        config:\n          client:\n            hosts:\n              - elasticsearch-logging\n            port: 9200\n            url_prefix:\n            use_ssl: False\n            certificate:\n            client_cert:\n            client_key:\n            ssl_no_validate: False\n            http_auth:\n            timeout: 30\n            master_only: False\n          logging:\n            loglevel: INFO\n            logfile:\n            logformat: default\n            blacklist: ['elasticsearch', 'urllib3']\n\nCurator's actions are configured in the following section:\n\n::\n\n    conf:\n      curator:\n        action_file:\n          actions:\n            1:\n              action: delete_indices\n              description: \"Clean up ES by deleting old indices\"\n              options:\n                timeout_override:\n                continue_if_exception: False\n                ignore_empty_list: True\n                disable_action: True\n              filters:\n              - filtertype: age\n                source: name\n                direction: older\n                timestring: '%Y.%m.%d'\n                unit: days\n                unit_count: 30\n                field:\n                stats_result:\n                epoch:\n                exclude: False\n\nThe Elasticsearch chart contains commented example actions for deleting and\nsnapshotting indexes older 30 days.  Please note these actions are provided as a\nreference and are disabled by default to avoid any unexpected behavior against\nyour indexes.\n\nElasticsearch Exporter\n----------------------\n\nThe Elasticsearch chart contains templates for an exporter to provide metrics\nfor Prometheus.  These metrics provide insight into the performance and overall\nhealth of your Elasticsearch cluster.  Please note monitoring for Elasticsearch\nis disabled by default, and must be enabled with the following override:\n\n\n::\n\n    monitoring:\n      prometheus:\n        enabled: true\n\n\nThe Elasticsearch exporter uses the same service annotations as the other\nexporters, and no additional configuration is required for Prometheus to target\nthe Elasticsearch exporter for scraping.  The Elasticsearch exporter is\nconfigured with command line flags, and the flags' default values can be found\nunder the following key in the values.yaml file:\n\n::\n\n    conf:\n      prometheus_elasticsearch_exporter:\n        es:\n          all: true\n          timeout: 20s\n\nThe configuration keys configure the following behaviors:\n\n- es.all:  Gather information from all nodes, not just the connecting node\n- es.timeout:  Timeout for metrics queries\n\nMore information about the Elasticsearch exporter can be found on the exporter's\nGitHub_ page.\n\n.. _GitHub: https://github.com/prometheus-community/elasticsearch_exporter\n\n\nSnapshot Repositories\n---------------------\n\nBefore Curator can store snapshots in a specified repository, Elasticsearch must\nregister the configured repository.  To achieve this, the Elasticsearch chart\ncontains a job for registering an s3 snapshot repository backed by radosgateway.\nThis job is disabled by default as the curator actions for snapshots are\ndisabled by default.  To enable the snapshot job, the\nconf.elasticsearch.snapshots.enabled flag must be set to true.  The following\nconfiguration keys are relevant:\n\n- conf.elasticsearch.snapshots.enabled: Enable snapshot repositories\n- conf.elasticsearch.snapshots.bucket: Name of the RGW s3 bucket to use\n- conf.elasticsearch.snapshots.repositories: Name of repositories to create\n\nMore information about Elasticsearch repositories can be found in the official\nElasticsearch snapshot_ documentation:\n\n.. _snapshot: https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html#_repositories\n"
  },
  {
    "path": "doc/source/logging/fluent-logging.rst",
    "content": "Fluent-logging\n===============\n\nThe fluent-logging chart in openstack-helm provides the base for a\ncentralized logging platform for OpenStack-Helm.  The chart combines two\nservices, Fluentbit and Fluentd, to gather logs generated by the services,\nfilter on or add metadata to logged events, then forward them to Elasticsearch\nfor indexing.\n\nFluentbit\n---------\n\nFluentbit runs as a log-collecting component on each host in the cluster, and\ncan be configured to target specific log locations on the host.  The Fluentbit_\nconfiguration schema can be found on the official Fluentbit website.\n\n.. _Fluentbit: http://fluentbit.io/documentation/0.12/configuration/schema.html\n\nFluentbit provides a set of plug-ins for ingesting and filtering various log\ntypes.  These plug-ins include:\n\n- Tail:  Tails a defined file for logged events\n- Kube:  Adds Kubernetes metadata to a logged event\n- Systemd:  Provides ability to collect logs from the journald daemon\n- Syslog:  Provides the ability to collect logs from a Unix socket (TCP or UDP)\n\nThe complete list of plugins can be found in the configuration_ section of the\nFluentbit documentation.\n\n.. _configuration: http://fluentbit.io/documentation/current/configuration/\n\nFluentbit uses parsers to turn unstructured log entries into structured entries\nto make processing and filtering events easier.  The two formats supported are\nJSON maps and regular expressions.  More information about Fluentbit's parsing\nabilities can be found in the parsers_ section of Fluentbit's documentation.\n\n.. _parsers: http://fluentbit.io/documentation/current/parser/\n\nFluentbit's service and parser configurations are defined via the values.yaml\nfile, which allows for custom definitions of inputs, filters and outputs for\nyour logging needs.\nFluentbit's configuration can be found under the following key:\n\n::\n\n    conf:\n      fluentbit:\n        - service:\n            header: service\n            Flush: 1\n            Daemon: Off\n            Log_Level: info\n            Parsers_File: parsers.conf\n        - containers_tail:\n            header: input\n            Name: tail\n            Tag: kube.*\n            Path: /var/log/containers/*.log\n            Parser: docker\n            DB: /var/log/flb_kube.db\n            Mem_Buf_Limit: 5MB\n        - kube_filter:\n            header: filter\n            Name: kubernetes\n            Match: kube.*\n            Merge_JSON_Log: On\n        - fluentd_output:\n            header: output\n            Name: forward\n            Match: \"*\"\n            Host: ${FLUENTD_HOST}\n            Port: ${FLUENTD_PORT}\n\nFluentbit is configured by default to capture logs at the info log level.  To\nchange this, override the Log_Level key with the appropriate levels, which are\ndocumented in Fluentbit's configuration_.\n\nFluentbit's parser configuration can be found under the following key:\n\n::\n\n    conf:\n      parsers:\n        - docker:\n            header: parser\n            Name: docker\n            Format: json\n            Time_Key: time\n            Time_Format: \"%Y-%m-%dT%H:%M:%S.%L\"\n            Time_Keep: On\n\nThe values for the fluentbit and parsers keys are consumed by a fluent-logging\nhelper template that produces the appropriate configurations for the relevant\nsections.  Each list item (keys prefixed with a '-') represents a section in the\nconfiguration files, and the arbitrary name of the list item should represent a\nlogical description of the section defined.  The header key represents the type\nof definition (filter, input, output, service or parser), and the remaining\nentries will be rendered as space delimited configuration keys and values. For\nexample, the definitions above would result in the following:\n\n::\n\n    [SERVICE]\n        Daemon false\n        Flush 1\n        Log_Level info\n        Parsers_File parsers.conf\n    [INPUT]\n        DB /var/log/flb_kube.db\n        Mem_Buf_Limit 5MB\n        Name tail\n        Parser docker\n        Path /var/log/containers/*.log\n        Tag kube.*\n    [FILTER]\n        Match kube.*\n        Merge_JSON_Log true\n        Name kubernetes\n    [OUTPUT]\n        Host ${FLUENTD_HOST}\n        Match *\n        Name forward\n        Port ${FLUENTD_PORT}\n    [PARSER]\n        Format json\n        Name docker\n        Time_Format %Y-%m-%dT%H:%M:%S.%L\n        Time_Keep true\n        Time_Key time\n\nFluentd\n-------\n\nFluentd runs as a forwarding service that receives event entries from Fluentbit\nand routes them to the appropriate destination.  By default, Fluentd will route\nall entries received from Fluentbit to Elasticsearch for indexing.  The\nFluentd_ configuration schema can be found at the official Fluentd website.\n\n.. _Fluentd: https://docs.fluentd.org/v0.12/articles/config-file\n\nFluentd's configuration is handled in the values.yaml file in fluent-logging.\nSimilar to Fluentbit, configuration overrides provide flexibility in defining\ncustom routes for tagged log events.  The configuration can be found under the\nfollowing key:\n\n::\n\n    conf:\n      fluentd:\n        - fluentbit_forward:\n            header: source\n            type: forward\n            port: \"#{ENV['FLUENTD_PORT']}\"\n            bind: 0.0.0.0\n        - elasticsearch:\n            header: match\n            type: elasticsearch\n            expression: \"**\"\n            include_tag_key: true\n            host: \"#{ENV['ELASTICSEARCH_HOST']}\"\n            port: \"#{ENV['ELASTICSEARCH_PORT']}\"\n            logstash_format: true\n            buffer_chunk_limit: 10M\n            buffer_queue_limit: 32\n            flush_interval: \"20\"\n            max_retry_wait: 300\n            disable_retry_limit: \"\"\n\nThe values for the fluentd keys are consumed by a fluent-logging helper template\nthat produces appropriate configurations for each directive desired.  The list\nitems (keys prefixed with a '-') represent sections in the configuration file,\nand the name of each list item should represent a logical description of the\nsection defined.  The header key represents the type of definition (name of the\nfluentd plug-in used), and the expression key is used when the plug-in requires\na pattern to match against (example: matches on certain input patterns).  The\nremaining entries will be rendered as space delimited configuration keys and\nvalues.  For example, the definition above would result in the following:\n\n::\n\n    <source>\n      bind 0.0.0.0\n      port \"#{ENV['FLUENTD_PORT']}\"\n      @type forward\n    </source>\n    <match **>\n      buffer_chunk_limit 10M\n      buffer_queue_limit 32\n      disable_retry_limit\n      flush_interval 20s\n      host \"#{ENV['ELASTICSEARCH_HOST']}\"\n      include_tag_key true\n      logstash_format true\n      max_retry_wait 300\n      port \"#{ENV['ELASTICSEARCH_PORT']}\"\n      @type elasticsearch\n    </match>\n\nSome fluentd plug-ins require nested definitions.  The fluentd helper template\ncan handle these definitions with the following structure:\n\n::\n\n    conf:\n      td_agent:\n        - fluentbit_forward:\n            header: source\n            type: forward\n            port: \"#{ENV['FLUENTD_PORT']}\"\n            bind: 0.0.0.0\n        - log_transformer:\n            header: filter\n            type: record_transformer\n            expression: \"foo.bar\"\n            inner_def:\n              - record_transformer:\n                  header: record\n                  hostname: my_host\n                  tag: my_tag\n\nIn this example, the my_transformer list will generate a nested configuration\nentry in the log_transformer section.  The nested definitions are handled by\nsupplying a list as the value for an arbitrary key, and the list value will\nindicate the entry should be handled as a nested definition.  The helper\ntemplate will render the above example key/value pairs as the following:\n\n::\n\n    <source>\n      bind 0.0.0.0\n      port \"#{ENV['FLUENTD_PORT']}\"\n      @type forward\n    </source>\n    <filter foo.bar>\n      <record>\n        hostname my_host\n        tag my_tag\n      </record>\n      @type record_transformer\n    </filter>\n\nFluentd Exporter\n----------------------\n\nThe fluent-logging chart contains templates for an exporter to provide metrics\nfor Fluentd.  These metrics provide insight into Fluentd's performance.  Please\nnote monitoring for Fluentd is disabled by default, and must be enabled with the\nfollowing override:\n\n::\n\n    monitoring:\n      prometheus:\n        enabled: true\n\n\nThe Fluentd exporter uses the same service annotations as the other exporters,\nand no additional configuration is required for Prometheus to target the\nFluentd exporter for scraping.  The Fluentd exporter is configured with command\nline flags, and the flags' default values can be found under the following key\nin the values.yaml file:\n\n::\n\n    conf:\n      fluentd_exporter:\n        log:\n          format: \"logger:stdout?json=true\"\n          level: \"info\"\n\nThe configuration keys configure the following behaviors:\n\n- log.format:  Define the logger used and format of the output\n- log.level:  Log level for the exporter to use\n\nMore information about the Fluentd exporter can be found on the exporter's\nGitHub_ page.\n\n.. _GitHub: https://github.com/V3ckt0r/fluentd_exporter\n"
  },
  {
    "path": "doc/source/logging/index.rst",
    "content": "OpenStack-Helm Logging\n======================\n\nContents:\n\n.. toctree::\n   :maxdepth: 2\n\n   elasticsearch\n   fluent-logging\n   kibana\n"
  },
  {
    "path": "doc/source/logging/kibana.rst",
    "content": "Kibana\n======\n\nThe Kibana chart in OpenStack-Helm Infra provides visualization for logs indexed\ninto Elasticsearch.  These visualizations provide the means to view logs captured\nfrom services deployed in cluster and targeted for collection by Fluentbit.\n\nAuthentication\n--------------\n\nThe Kibana deployment includes a sidecar container that runs an Apache reverse\nproxy to add authentication capabilities for Kibana.  The username and password\nare configured under the Kibana entry in the endpoints section of the chart's\nvalues.yaml.\n\nThe configuration for Apache can be found under the conf.httpd key, and uses a\nhelm-toolkit function that allows for including gotpl entries in the template\ndirectly.  This allows the use of other templates, like the endpoint lookup\nfunction templates, directly in the configuration for Apache.\n\nConfiguration\n-------------\n\nKibana's configuration is driven by the chart's values.yaml file.  The configuration\noptions are found under the following keys:\n\n::\n\n    conf:\n      elasticsearch:\n        pingTimeout: 1500\n        preserveHost: true\n        requestTimeout: 30000\n        shardTimeout: 0\n        startupTimeout: 5000\n      i18n:\n        defaultLocale: en\n      kibana:\n        defaultAppId: discover\n        index: .kibana\n      logging:\n        quiet: false\n        silent: false\n        verbose: false\n      ops:\n        interval: 5000\n      server:\n        host: localhost\n        maxPayloadBytes: 1048576\n        port: 5601\n        ssl:\n          enabled: false\n\nThe case of the sub-keys is important as these values are injected into\nKibana's configuration configmap with the toYaml function.  More information on\nthe configuration options and available settings can be found in the official\nKibana documentation_.\n\n.. _documentation: https://www.elastic.co/guide/en/kibana/current/settings.html\n\nInstallation\n------------\n\n.. code_block: bash\n\nhelm install --namespace=<namespace> local/kibana --name=kibana\n\nSetting Time Field\n------------------\n\nFor Kibana to successfully read the logs from Elasticsearch's indexes, the time\nfield will need to be manually set after Kibana has successfully deployed.  Upon\nvisiting the Kibana dashboard for the first time, a prompt will appear to choose the\ntime field with a drop down menu.  The default time field for Elasticsearch indexes\nis '@timestamp'.  Once this field is selected, the default view for querying log entries\ncan be found by selecting the \"Discover\"\n"
  },
  {
    "path": "doc/source/monitoring/grafana.rst",
    "content": "Grafana\n=======\n\nThe Grafana chart in OpenStack-Helm Infra provides default dashboards for the\nmetrics gathered with Prometheus.  The default dashboards include visualizations\nfor metrics on: Ceph, Kubernetes, nodes, containers, MySQL, RabbitMQ, and\nOpenStack.\n\nConfiguration\n-------------\n\nGrafana\n~~~~~~~\n\nGrafana's configuration is driven with the chart's values.YAML file, and the\nrelevant configuration entries are under the following key:\n\n::\n\n    conf:\n      grafana:\n        paths:\n        server:\n        database:\n        session:\n        security:\n        users:\n        log:\n        log.console:\n        dashboards.json:\n        grafana_net:\n\nThese keys correspond to sections in the grafana.ini configuration file, and the\nto_ini helm-toolkit function will render these values into the appropriate\nformat in grafana.ini.  The list of options for these keys can be found in the\nofficial Grafana configuration_ documentation.\n\n.. _configuration: https://grafana.com/docs/installation/configuration/\n\nPrometheus Data Source\n~~~~~~~~~~~~~~~~~~~~~~\n\nGrafana requires configured data sources for gathering metrics for display in\nits dashboards.  The configuration options for datasources are found under the\nfollowing key in Grafana's values.YAML file:\n\n::\n\n    conf:\n      provisioning:\n        datasources;\n          monitoring:\n            name: prometheus\n            type: prometheus\n            access: proxy\n            orgId: 1\n            editable: true\n            basicAuth: true\n\nThe Grafana chart will use the keys under each entry beneath\n.conf.provisioning.datasources as inputs to a helper template that will render\nthe appropriate configuration for the data source.  The key for each data source\n(monitoring in the above example) should map to an entry in the endpoints\nsection in the chart's values.yaml, as the data source's URL and authentication\ncredentials will be populated by the values defined in the defined endpoint.\n\n.. _sources: https://grafana.com/docs/features/datasources/\n\nDashboards\n~~~~~~~~~~\n\nGrafana adds dashboards during installation with dashboards defined in YAML under\nthe following key:\n\n::\n\n    conf:\n      dashboards:\n\n\nThese YAML definitions are transformed to JSON are added to Grafana's\nconfiguration configmap and mounted to the Grafana pods dynamically, allowing for\nflexibility in defining and adding custom dashboards to Grafana.  Dashboards can\nbe added by inserting a new key along with a YAML dashboard definition as the\nvalue.  Additional dashboards can be found by searching on Grafana's dashboards_\npage or you can define your own. A json-to-YAML tool, such as json2yaml_ , will\nhelp transform any custom or new dashboards from JSON to YAML.\n\n.. _json2yaml: https://www.json2yaml.com/\n"
  },
  {
    "path": "doc/source/monitoring/index.rst",
    "content": "OpenStack-Helm Monitoring\n=========================\n\nContents:\n\n.. toctree::\n   :maxdepth: 2\n\n   grafana\n   prometheus\n   nagios\n"
  },
  {
    "path": "doc/source/monitoring/nagios.rst",
    "content": "Nagios\n======\n\nThe Nagios chart in openstack-helm can be used to provide an alarming\nservice that's tightly coupled to an OpenStack-Helm deployment.  The Nagios\nchart uses a custom Nagios core image that includes plugins developed to query\nPrometheus directly for scraped metrics and triggered alarms, query the Ceph\nmanager endpoints directly to determine the health of a Ceph cluster, and to\nquery Elasticsearch for logged events that meet certain criteria (experimental).\n\nAuthentication\n--------------\n\nThe Nagios deployment includes a sidecar container that runs an Apache reverse\nproxy to add authentication capabilities for Nagios.  The username and password\nare configured under the nagios entry in the endpoints section of the chart's\nvalues.yaml.\n\nThe configuration for Apache can be found under the conf.httpd key, and uses a\nhelm-toolkit function that allows for including gotpl entries in the template\ndirectly.  This allows the use of other templates, like the endpoint lookup\nfunction templates, directly in the configuration for Apache.\n\nImage Plugins\n-------------\n\nThe Nagios image used contains custom plugins that can be used for the defined\nservice check commands.  These plugins include:\n\n- check_prometheus_metric.py: Query Prometheus for a specific metric and value\n- check_exporter_health_metric.sh: Nagios plugin to query prometheus exporter\n- check_rest_get_api.py: Check REST API status\n- check_update_prometheus_hosts.py: Queries Prometheus, updates Nagios config\n- query_prometheus_alerts.py: Nagios plugin to query prometheus ALERTS metric\n\nMore information about the Nagios image and plugins can be found here_.\n\n.. _here: https://github.com/att-comdev/nagios\n\n\nNagios Service Configuration\n----------------------------\n\nThe Nagios service is configured via the following section in the chart's\nvalues file:\n\n::\n\n    conf:\n      nagios:\n        nagios:\n          log_file: /opt/nagios/var/log/nagios.log\n          cfg_file:\n            - /opt/nagios/etc/nagios_objects.cfg\n            - /opt/nagios/etc/objects/commands.cfg\n            - /opt/nagios/etc/objects/contacts.cfg\n            - /opt/nagios/etc/objects/timeperiods.cfg\n            - /opt/nagios/etc/objects/templates.cfg\n            - /opt/nagios/etc/objects/prometheus_discovery_objects.cfg\n          object_cache_file: /opt/nagios/var/objects.cache\n          precached_object_file: /opt/nagios/var/objects.precache\n          resource_file: /opt/nagios/etc/resource.cfg\n          status_file: /opt/nagios/var/status.dat\n          status_update_interval: 10\n          nagios_user: nagios\n          nagios_group: nagios\n          check_external_commands: 1\n          command_file: /opt/nagios/var/rw/nagios.cmd\n          lock_file: /var/run/nagios.lock\n          temp_file: /opt/nagios/var/nagios.tmp\n          temp_path: /tmp\n          event_broker_options: -1\n          log_rotation_method: d\n          log_archive_path: /opt/nagios/var/log/archives\n          use_syslog: 1\n          log_service_retries: 1\n          log_host_retries: 1\n          log_event_handlers: 1\n          log_initial_states: 0\n          log_current_states: 1\n          log_external_commands: 1\n          log_passive_checks: 1\n          service_inter_check_delay_method: s\n          max_service_check_spread: 30\n          service_interleave_factor: s\n          host_inter_check_delay_method: s\n          max_host_check_spread: 30\n          max_concurrent_checks: 60\n          check_result_reaper_frequency: 10\n          max_check_result_reaper_time: 30\n          check_result_path: /opt/nagios/var/spool/checkresults\n          max_check_result_file_age: 3600\n          cached_host_check_horizon: 15\n          cached_service_check_horizon: 15\n          enable_predictive_host_dependency_checks: 1\n          enable_predictive_service_dependency_checks: 1\n          soft_state_dependencies: 0\n          auto_reschedule_checks: 0\n          auto_rescheduling_interval: 30\n          auto_rescheduling_window: 180\n          service_check_timeout: 60\n          host_check_timeout: 60\n          event_handler_timeout: 60\n          notification_timeout: 60\n          ocsp_timeout: 5\n          perfdata_timeout: 5\n          retain_state_information: 1\n          state_retention_file: /opt/nagios/var/retention.dat\n          retention_update_interval: 60\n          use_retained_program_state: 1\n          use_retained_scheduling_info: 1\n          retained_host_attribute_mask: 0\n          retained_service_attribute_mask: 0\n          retained_process_host_attribute_mask: 0\n          retained_process_service_attribute_mask: 0\n          retained_contact_host_attribute_mask: 0\n          retained_contact_service_attribute_mask: 0\n          interval_length: 1\n          check_workers: 4\n          check_for_updates: 1\n          bare_update_check: 0\n          use_aggressive_host_checking: 0\n          execute_service_checks: 1\n          accept_passive_service_checks: 1\n          execute_host_checks: 1\n          accept_passive_host_checks: 1\n          enable_notifications: 1\n          enable_event_handlers: 1\n          process_performance_data: 0\n          obsess_over_services: 0\n          obsess_over_hosts: 0\n          translate_passive_host_checks: 0\n          passive_host_checks_are_soft: 0\n          check_for_orphaned_services: 1\n          check_for_orphaned_hosts: 1\n          check_service_freshness: 1\n          service_freshness_check_interval: 60\n          check_host_freshness: 0\n          host_freshness_check_interval: 60\n          additional_freshness_latency: 15\n          enable_flap_detection: 1\n          low_service_flap_threshold: 5.0\n          high_service_flap_threshold: 20.0\n          low_host_flap_threshold: 5.0\n          high_host_flap_threshold: 20.0\n          date_format: us\n          use_regexp_matching: 1\n          use_true_regexp_matching: 0\n          daemon_dumps_core: 0\n          use_large_installation_tweaks: 0\n          enable_environment_macros: 0\n          debug_level: 0\n          debug_verbosity: 1\n          debug_file: /opt/nagios/var/nagios.debug\n          max_debug_file_size: 1000000\n          allow_empty_hostgroup_assignment: 1\n          illegal_macro_output_chars: \"`~$&|'<>\\\"\"\n\nNagios CGI Configuration\n------------------------\n\nThe Nagios CGI configuration is defined via the following section in the chart's\nvalues file:\n\n::\n\n    conf:\n      nagios:\n        cgi:\n          main_config_file: /opt/nagios/etc/nagios.cfg\n          physical_html_path: /opt/nagios/share\n          url_html_path: /nagios\n          show_context_help: 0\n          use_pending_states: 1\n          use_authentication: 0\n          use_ssl_authentication: 0\n          authorized_for_system_information: \"*\"\n          authorized_for_configuration_information: \"*\"\n          authorized_for_system_commands: nagiosadmin\n          authorized_for_all_services: \"*\"\n          authorized_for_all_hosts: \"*\"\n          authorized_for_all_service_commands: \"*\"\n          authorized_for_all_host_commands: \"*\"\n          default_statuswrl_layout: 4\n          ping_syntax: /bin/ping -n -U -c 5 $HOSTADDRESS$\n          refresh_rate: 90\n          result_limit: 100\n          escape_html_tags: 1\n          action_url_target: _blank\n          notes_url_target: _blank\n          lock_author_names: 1\n          navbar_search_for_addresses: 1\n          navbar_search_for_aliases: 1\n\nNagios Host Configuration\n-------------------------\n\nThe Nagios chart includes a single host definition for the Prometheus instance\nqueried for metrics.  The host definition can be found under the following\nvalues key:\n\n::\n\n    conf:\n      nagios:\n        hosts:\n          - prometheus:\n              use: linux-server\n              host_name: prometheus\n              alias: \"Prometheus Monitoring\"\n              address: 127.0.0.1\n              hostgroups: prometheus-hosts\n              check_command: check-prometheus-host-alive\n\nThe address for the Prometheus host is defined by the PROMETHEUS_SERVICE\nenvironment variable in the deployment template, which is determined by the\nmonitoring entry in the Nagios chart's endpoints section.  The endpoint is then\navailable as a macro for Nagios to use in all Prometheus based queries.  For\nexample:\n\n::\n\n    - check_prometheus_host_alive:\n        command_name: check-prometheus-host-alive\n        command_line: \"$USER1$/check_rest_get_api.py --url $USER2$ --warning_response_seconds 5 --critical_response_seconds 10\"\n\nThe $USER2$ macro above corresponds to the Prometheus endpoint defined in the\nPROMETHEUS_SERVICE environment variable.  All checks that use the\nprometheus-hosts hostgroup will map back to the Prometheus host defined by this\nendpoint.\n\nNagios HostGroup Configuration\n------------------------------\n\nThe Nagios chart includes configuration values for defined host groups under the\nfollowing values key:\n\n::\n\n    conf:\n      nagios:\n        host_groups:\n          - prometheus-hosts:\n              hostgroup_name: prometheus-hosts\n              alias: \"Prometheus Virtual Host\"\n          - base-os:\n              hostgroup_name: base-os\n              alias: \"base-os\"\n\nThese hostgroups are used to define which group of hosts should be targeted by\na particular nagios check.  An example of a check that targets Prometheus for a\nspecific metric query would be:\n\n::\n\n    - check_ceph_monitor_quorum:\n        use: notifying_service\n        hostgroup_name: prometheus-hosts\n        service_description: \"CEPH_quorum\"\n        check_command: check_prom_alert!ceph_monitor_quorum_low!CRITICAL- ceph monitor quorum does not exist!OK- ceph monitor quorum exists\n        check_interval: 60\n\nAn example of a check that targets all hosts for a base-os type check (memory\nusage, latency, etc) would be:\n\n::\n\n    - check_memory_usage:\n        use: notifying_service\n        service_description: Memory_usage\n        check_command: check_memory_usage\n        hostgroup_name: base-os\n\nThese two host groups allow for a wide range of targeted checks for determining\nthe status of all components of an OpenStack-Helm deployment.\n\nNagios Command Configuration\n----------------------------\n\nThe Nagios chart includes configuration values for the command definitions Nagios\nwill use when executing service checks. These values are found under the\nfollowing key:\n\n::\n\n    conf:\n      nagios:\n        commands:\n          - send_service_snmp_trap:\n              command_name: send_service_snmp_trap\n              command_line: \"$USER1$/send_service_trap.sh '$USER8$' '$HOSTNAME$' '$SERVICEDESC$' $SERVICESTATEID$ '$SERVICEOUTPUT$' '$USER4$' '$USER5$'\"\n          - send_host_snmp_trap:\n              command_name: send_host_snmp_trap\n              command_line: \"$USER1$/send_host_trap.sh '$USER8$' '$HOSTNAME$' $HOSTSTATEID$ '$HOSTOUTPUT$' '$USER4$' '$USER5$'\"\n          - send_service_http_post:\n              command_name: send_service_http_post\n              command_line: \"$USER1$/send_http_post_event.py --type service --hostname '$HOSTNAME$' --servicedesc '$SERVICEDESC$' --state_id $SERVICESTATEID$ --output '$SERVICEOUTPUT$' --monitoring_hostname '$HOSTNAME$' --primary_url '$USER6$' --secondary_url '$USER7$'\"\n          - send_host_http_post:\n              command_name: send_host_http_post\n              command_line: \"$USER1$/send_http_post_event.py --type host --hostname '$HOSTNAME$' --state_id $HOSTSTATEID$ --output '$HOSTOUTPUT$' --monitoring_hostname '$HOSTNAME$' --primary_url '$USER6$' --secondary_url '$USER7$'\"\n          - check_prometheus_host_alive:\n              command_name: check-prometheus-host-alive\n              command_line: \"$USER1$/check_rest_get_api.py --url $USER2$ --warning_response_seconds 5 --critical_response_seconds 10\"\n\nThe list of defined commands can be modified with configuration overrides, which\nallows for the ability define commands specific to an infrastructure deployment.\nThese commands can include querying Prometheus for metrics on dependencies for a\nservice to determine whether an alert should be raised, executing checks on each\nhost to determine network latency or file system usage, or checking each node\nfor issues with ntp clock skew.\n\nNote: Since the conf.nagios.commands key contains a list of the defined commands,\nthe entire contents of conf.nagios.commands will need to be overridden if\nadditional commands are desired (due to the immutable nature of lists).\n\nNagios Service Check Configuration\n----------------------------------\n\nThe Nagios chart includes configuration values for the service checks Nagios\nwill execute.  These service check commands can be found under the following\nkey:\n\n::\n    conf:\n      nagios:\n        services:\n          - notifying_service:\n              name: notifying_service\n              use: generic-service\n              flap_detection_enabled: 0\n              process_perf_data: 0\n              contact_groups: snmp_and_http_notifying_contact_group\n              check_interval: 60\n              notification_interval: 120\n              retry_interval: 30\n              register: 0\n          - check_ceph_health:\n              use: notifying_service\n              hostgroup_name: base-os\n              service_description: \"CEPH_health\"\n              check_command: check_ceph_health\n              check_interval: 300\n          - check_hosts_health:\n              use: generic-service\n              hostgroup_name: prometheus-hosts\n              service_description: \"Nodes_health\"\n              check_command: check_prom_alert!K8SNodesNotReady!CRITICAL- One or more nodes are not ready.\n              check_interval: 60\n          - check_prometheus_replicas:\n              use: notifying_service\n              hostgroup_name: prometheus-hosts\n              service_description: \"Prometheus_replica-count\"\n              check_command: check_prom_alert_with_labels!replicas_unavailable_statefulset!statefulset=\"prometheus\"!statefulset {statefulset} has lesser than configured replicas\n              check_interval: 60\n\nThe Nagios service configurations define the checks Nagios will perform.  These\nchecks contain keys for defining: the service type to use, the host group to\ntarget, the description of the service check, the command the check should use,\nand the interval at which to trigger the service check.  These services can also\nbe extended to provide additional insight into the overall status of a\nparticular service.  These services also allow the ability to define advanced\nchecks for determining the overall health and liveness of a service.  For\nexample, a service check could trigger an alarm for the OpenStack services when\nNagios detects that the relevant database and message queue has become\nunresponsive.\n"
  },
  {
    "path": "doc/source/monitoring/prometheus.rst",
    "content": "Prometheus\n==========\n\nThe Prometheus chart in openstack-helm provides a time series database and\na strong querying language for monitoring various components of OpenStack-Helm.\nPrometheus gathers metrics by scraping defined service endpoints or pods at\nspecified intervals and indexing them in the underlying time series database.\n\nAuthentication\n--------------\n\nThe Prometheus deployment includes a sidecar container that runs an Apache\nreverse proxy to add authentication capabilities for Prometheus.  The\nusername and password are configured under the monitoring entry in the endpoints\nsection of the chart's values.yaml.\n\nThe configuration for Apache can be found under the conf.httpd key, and uses a\nhelm-toolkit function that allows for including gotpl entries in the template\ndirectly.  This allows the use of other templates, like the endpoint lookup\nfunction templates, directly in the configuration for Apache.\n\nPrometheus Service configuration\n--------------------------------\n\nThe Prometheus service is configured via command line flags set during runtime.\nThese flags include: setting the configuration file, setting log levels, setting\ncharacteristics of the time series database, and enabling the web admin API for\nsnapshot support.  These settings can be configured via the values tree at:\n\n::\n\n    conf:\n      prometheus:\n        command_line_flags:\n          log.level: info\n          query.max_concurrency: 20\n          query.timeout: 2m\n          storage.tsdb.path: /var/lib/prometheus/data\n          storage.tsdb.retention: 7d\n          web.enable_admin_api: false\n          web.enable_lifecycle: false\n\nThe Prometheus configuration file contains the definitions for scrape targets\nand the location of the rules files for triggering alerts on scraped metrics.\nThe configuration file is defined in the values file, and can be found at:\n\n::\n\n    conf:\n      prometheus:\n        scrape_configs: |\n\nBy defining the configuration via the values file, an operator can override all\nconfiguration components of the Prometheus deployment at runtime.\n\nKubernetes Endpoint Configuration\n---------------------------------\n\nThe Prometheus chart in openstack-helm uses the built-in service discovery\nmechanisms for Kubernetes endpoints and pods to automatically configure scrape\ntargets.  Functions added to helm-toolkit allows configuration of these targets\nvia annotations that can be applied to any service or pod that exposes metrics\nfor Prometheus, whether a service for an application-specific exporter or an\napplication that provides a metrics endpoint via its service. The values in\nthese functions correspond to entries in the monitoring tree under the\nprometheus key in a chart's values.yaml file.\n\n\nThe functions definitions are below:\n\n::\n\n    {{- define \"helm-toolkit.snippets.prometheus_service_annotations\" -}}\n    {{- $config := index . 0 -}}\n    {{- if $config.scrape }}\n    prometheus.io/scrape: {{ $config.scrape | quote }}\n    {{- end }}\n    {{- if $config.scheme }}\n    prometheus.io/scheme: {{ $config.scheme | quote }}\n    {{- end }}\n    {{- if $config.path }}\n    prometheus.io/path: {{ $config.path | quote }}\n    {{- end }}\n    {{- if $config.port }}\n    prometheus.io/port: {{ $config.port | quote }}\n    {{- end }}\n    {{- end -}}\n\n::\n\n    {{- define \"helm-toolkit.snippets.prometheus_pod_annotations\" -}}\n    {{- $config := index . 0 -}}\n    {{- if $config.scrape }}\n    prometheus.io/scrape: {{ $config.scrape | quote }}\n    {{- end }}\n    {{- if $config.path }}\n    prometheus.io/path: {{ $config.path | quote }}\n    {{- end }}\n    {{- if $config.port }}\n    prometheus.io/port: {{ $config.port | quote }}\n    {{- end }}\n    {{- end -}}\n\nThese functions render the following annotations:\n\n- prometheus.io/scrape:  Must be set to true for Prometheus to scrape target\n- prometheus.io/scheme:  Overrides scheme used to scrape target if not http\n- prometheus.io/path:    Overrides path used to scrape target metrics if not /metrics\n- prometheus.io/port:    Overrides port to scrape metrics on if not service's default port\n\nEach chart that can be targeted for monitoring by Prometheus has a prometheus\nsection under a monitoring tree in the chart's values.yaml, and Prometheus\nmonitoring is disabled by default for those services.  Example values for the\nrequired entries can be found in the following monitoring configuration for the\nprometheus-node-exporter chart:\n\n::\n\n    monitoring:\n      prometheus:\n        enabled: false\n        node_exporter:\n          scrape: true\n\nIf the prometheus.enabled key is set to true, the annotations are set on the\ntargeted service or pod as the condition for applying the annotations evaluates\nto true.  For example:\n\n::\n\n    {{- $prometheus_annotations := $envAll.Values.monitoring.prometheus.node_exporter }}\n    ---\n    apiVersion: v1\n    kind: Service\n    metadata:\n    name: {{ tuple \"node_metrics\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n    labels:\n    {{ tuple $envAll \"node_exporter\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n    annotations:\n    {{- if .Values.monitoring.prometheus.enabled }}\n    {{ tuple $prometheus_annotations | include \"helm-toolkit.snippets.prometheus_service_annotations\" | indent 4 }}\n    {{- end }}\n\nKubelet, API Server, and cAdvisor\n---------------------------------\n\nThe Prometheus chart includes scrape target configurations for the kubelet, the\nKubernetes API servers, and cAdvisor.  These targets are configured based on\na kubeadm deployed Kubernetes cluster, as OpenStack-Helm uses kubeadm to deploy\nKubernetes in the gates.  These configurations may need to change based on your\nchosen method of deployment.  Please note the cAdvisor metrics will not be\ncaptured if the kubelet was started with the following flag:\n\n::\n\n    --cadvisor-port=0\n\nTo enable the gathering of the kubelet's custom metrics, the following flag must\nbe set:\n\n::\n\n    --enable-custom-metrics\n\nInstallation\n------------\n\nThe Prometheus chart can be installed with the following command:\n\n.. code-block:: bash\n\n    helm install --namespace=openstack local/prometheus --name=prometheus\n\nThe above command results in a Prometheus deployment configured to automatically\ndiscover services with the necessary annotations for scraping, configured to\ngather metrics on the kubelet, the Kubernetes API servers, and cAdvisor.\n\nExtending Prometheus\n--------------------\n\nPrometheus can target various exporters to gather metrics related to specific\napplications to extend visibility into an OpenStack-Helm deployment.  Currently,\nopenstack-helm contains charts for:\n\n- prometheus-kube-state-metrics: Provides additional Kubernetes metrics\n- prometheus-node-exporter: Provides metrics for nodes and linux kernels\n- prometheus-openstack-metrics-exporter: Provides metrics for OpenStack services\n\nKube-State-Metrics\n~~~~~~~~~~~~~~~~~~\n\nThe prometheus-kube-state-metrics chart provides metrics for Kubernetes objects\nas well as metrics for kube-scheduler and kube-controller-manager.  Information\non the specific metrics available via the kube-state-metrics service can be\nfound in the kube-state-metrics_ documentation.\n\nThe prometheus-kube-state-metrics chart can be installed with the following:\n\n.. code-block:: bash\n\n    helm install --namespace=kube-system local/prometheus-kube-state-metrics --name=prometheus-kube-state-metrics\n\n.. _kube-state-metrics: https://github.com/kubernetes/kube-state-metrics/tree/master/Documentation\n\nNode Exporter\n~~~~~~~~~~~~~\n\nThe prometheus-node-exporter chart provides hardware and operating system metrics\nexposed via Linux kernels.  Information on the specific metrics available via\nthe Node exporter can be found on the Node_exporter_ GitHub page.\n\nThe prometheus-node-exporter chart can be installed with the following:\n\n.. code-block:: bash\n\n    helm install --namespace=kube-system local/prometheus-node-exporter --name=prometheus-node-exporter\n\n.. _Node_exporter: https://github.com/prometheus/node_exporter\n\nOpenStack Exporter\n~~~~~~~~~~~~~~~~~~\n\nThe prometheus-openstack-exporter chart provides metrics specific to the\nOpenStack services.  The exporter's source code can be found here_. While the\nmetrics provided are by no means comprehensive, they will be expanded upon.\n\nPlease note the OpenStack exporter requires the creation of a Keystone user to\nsuccessfully gather metrics.  To create the required user, the chart uses the\nsame keystone user management job the OpenStack service charts use.\n\nThe prometheus-openstack-exporter chart can be installed with the following:\n\n.. code-block:: bash\n\n    helm install --namespace=openstack local/prometheus-openstack-exporter --name=prometheus-openstack-exporter\n\n.. _here: https://github.com/att-comdev/openstack-metrics-collector\n\nOther exporters\n~~~~~~~~~~~~~~~\n\nCertain charts in OpenStack-Helm include templates for application-specific\nPrometheus exporters, which keeps the monitoring of those services tightly coupled\nto the chart.  The templates for these exporters can be found in the monitoring\nsubdirectory in the chart.  These exporters are disabled by default, and can be\nenabled by setting the appropriate flag in the monitoring.prometheus key of the\nchart's values.yaml file.  The charts containing exporters include:\n\n- Elasticsearch_\n- RabbitMQ_\n- MariaDB_\n- Memcached_\n- Fluentd_\n- Postgres_\n\n.. _Elasticsearch: https://github.com/prometheus-community/elasticsearch_exporter\n.. _RabbitMQ: https://github.com/kbudde/rabbitmq_exporter\n.. _MariaDB: https://github.com/prometheus/mysqld_exporter\n.. _Memcached: https://github.com/prometheus/memcached_exporter\n.. _Fluentd: https://github.com/V3ckt0r/fluentd_exporter\n.. _Postgres: https://github.com/wrouesnel/postgres_exporter\n\nCeph\n~~~~\n\nStarting with Luminous, Ceph can export metrics with ceph-mgr prometheus module.\nThis module can be enabled in Ceph's values.yaml under the ceph_mgr_enabled_plugins\nkey by appending prometheus to the list of enabled modules.  After enabling the\nprometheus module, metrics can be scraped on the ceph-mgr service endpoint.  This\nrelies on the Prometheus annotations attached to the ceph-mgr service template, and\nthese annotations can be modified in the endpoints section of Ceph's values.yaml\nfile.  Information on the specific metrics available via the prometheus module\ncan be found in the Ceph prometheus_ module documentation.\n\n.. _prometheus: http://docs.ceph.com/docs/master/mgr/prometheus/\n\n\nPrometheus Dashboard\n--------------------\n\nPrometheus includes a dashboard that can be accessed via the accessible\nPrometheus endpoint (NodePort or otherwise).  This dashboard will give you a\nview of your scrape targets' state, the configuration values for Prometheus's\nscrape jobs and command line flags, a view of any alerts triggered based on the\ndefined rules, and a means for using PromQL to query scraped metrics.  The\nPrometheus dashboard is a useful tool for verifying Prometheus is configured\nappropriately and to verify the status of any services targeted for scraping via\nthe Prometheus service discovery annotations.\n\nRules Configuration\n-------------------\n\nPrometheus provides a querying language that can operate on defined rules which\nallow for the generation of alerts on specific metrics.  The Prometheus chart in\nopenstack-helm defines these rules via the values.yaml file.  By defining\nthese in the values file, it allows operators flexibility to provide specific\nrules via overrides at installation.  The following rules keys are provided:\n\n::\n\n    values:\n      conf:\n        rules:\n          alertmanager:\n          etcd3:\n          kube_apiserver:\n          kube_controller_manager:\n          kubelet:\n          kubernetes:\n          rabbitmq:\n          mysql:\n          ceph:\n          openstack:\n          custom:\n\nThese provided keys provide recording and alert rules for all infrastructure\ncomponents of an OpenStack-Helm deployment.  If you wish to exclude rules for a\ncomponent, leave the tree empty in an overrides file.  To read more\nabout Prometheus recording and alert rules definitions, please see the official\nPrometheus recording_ and alert_ rules documentation.\n\n.. _recording: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/\n.. _alert: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/\n\nNote: Prometheus releases prior to 2.0 used gotpl to define rules.  Prometheus\n2.0 changed the rules format to YAML, making them much easier to read.  The\nPrometheus chart in openstack-helm uses Prometheus 2.0 by default to take\nadvantage of changes to the underlying storage layer and the handling of stale\ndata.  The chart will not support overrides for Prometheus versions below 2.0,\nas the command line flags for the service changed between versions.\n\nThe wide range of exporters included in OpenStack-Helm coupled with the ability\nto define rules with configuration overrides allows for the addition of custom\nalerting and recording rules to fit an operator's monitoring needs.  Adding new\nrules or modifying existing rules require overrides for either an existing key\nunder conf.rules or the addition of a new key under conf.rules.  The addition\nof custom rules can be used to define complex checks that can be extended for\ndetermining the liveliness or health of infrastructure components.\n"
  },
  {
    "path": "doc/source/readme.rst",
    "content": ".. include:: ../../README.rst\n"
  },
  {
    "path": "doc/source/specs/2025.1/chart_versioning.rst",
    "content": "=================\nCharts versioning\n=================\n\nProblem Description\n===================\n\nThere are issues:\n\n* All Openstack-Helm charts depend on the helm-toolkit subchart, but\n  the helm-toolkit version is not pinned. When helm-toolkit is updated,\n  we don't bump the version of the charts that depend on it and re-publish\n  them. This can change the behavior of the charts while the version of the\n  chart tarball remains unchanged.\n* We use `chart-testing`_ to lint the charts. The chart-testing tool\n  requires that the chart version is bumped every time any file in the\n  chart directory is changed. In every chart, we have a ``values_overrides``\n  directory where we store the version-specific overrides as well as\n  example overrides for some specific configurations. These overrides are\n  not part of the chart tarball, but when they are changed, we bump the\n  chart version.\n* We use ``apiVersion: v1`` in ``Chart.yaml``, and dependencies are stored in a\n  separate ``requirements.yaml`` file. However, ``apiVersion: v2`` allows defining\n  dependencies directly in the ``Chart.yaml`` file.\n* We track the release notes in a separate directory and we don't have a\n  CHANGELOG.md file in chart tarballs.\n* Chart maintainers are assumed to update the same release notes file\n  when they change the same chart in two separate commits. This leads to\n  merge conflicts that are now resolved manually which is inconvenient.\n  This is because we are misusing the Reno tool which is designed to\n  avoid merge conflicts in the release notes.\n* All the chart versions are independent of each other and do not follow the\n  Openstack release versioning which makes it difficult for users to understand\n  which version of the chart corresponds to which Openstack release.\n\nProposed Change\n===============\n\nWe propose to do the following:\n\n* Move values overrides to a separate directory.\n* Use ``apiVersion: v2`` in ``Chart.yaml``.\n* Move release notes to the CHANGELOG.md files.\n* Once the Openstack is released we will bump the version of all charts to\n  this new release, for example ``2025.1.0``.\n  Semver assumes the following:\n\n    * MAJOR version when you make incompatible API changes\n    * MINOR version when you add functionality in a backward compatible manner\n    * PATCH version when you make backward compatible bug fixes\n\n  However, we will not strictly follow these assumptions. We will still\n  follow the policy that the last version of any chart must\n  be compatible with all currently maintained versions of Openstack\n  (usually 3 most recent versions). All the changes between Openstack\n  releases will be backward compatible.\n\n  We will not bump the chart version in the git repo when we update chart.\n  Instead, we will increment the PATCH automatically when building the tarball.\n  The PATCH will be calculated as the number of commits related to a given\n  chart after the latest git tag.\n  So for example if the latest tag is ``2024.2.0`` and we have 3 commits\n  in the nova chart after this tag, the version of the nova tarball will be\n  ``2024.2.3``.\n\n  All the tarballs will be published with the build metadata showing\n  the commit SHA sum with which the tarball is built. The tarball\n  version will look like ``2025.1.X+<osh_commit_sha>_<osh_infra_commit_sha>``.\n\nImplementation\n==============\n\nAssignee(s)\n-----------\n\nPrimary assignees:\n  kozhukalov (Vladimir Kozhukalov <kozhukalov@gmail.com>)\n\nWork Items\n----------\n\nThe following work items need to be completed for this specification to be\nimplemented.\n\nValues overrides\n~~~~~~~~~~~~~~~~\nMove values_overrides from all charts to a separate directory ``values``\nwith the hierarchy ``values_overrides/<chart-name>/<feature1>_<feature2>.yaml``.\nThe Openstack-Helm plugin is able to lookup the overrides in an arbitrary directory,\nbut the directory structure must be as described above.\n\nUpdate the version of all charts to ``2024.2.0``\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nAll the charts must be updated to the version ``2024.2.0`` in a single commit.\nWhile developing the charts we will not change the version of the charts in\ntheir Chart.yaml files in the git repo. So, in the git repos the versions\nof all charts will be the same, e.g. ``2024.2.0``. It will be changed\ntwice a year when the Openstack is released and the version update\ncommit will be tagged appropriately.\n\nHowever when we build a chart the tarball version will be updated every time.\nThe tarball version will be calculated automatically\n``2024.2.X+<osh_commit_sha>_<osh_infra_commit_sha>`` where ``X`` is the number\nof commits related to the chart after the latest tag.\n\n.. code-block:: bash\n\n    $ PATCH=$(git log --oneline <tag>.. <chart_directory> | wc -l)\n    $ OSH_COMMIT_SHA=$(cd ${SRC}/openstack-helm; git rev-parse --short HEAD)\n    $ OSH_INFRA_COMMIT_SHA=$(cd ${SRC}/openstack-helm-infra; git rev-parse --short HEAD)\n    $ helm package <chart> --version 2024.2.${PATCH}+${OSH_COMMIT_SHA}_${OSH_INFRA_COMMIT_SHA}\n\n.. note::\n    When the chart itself is not changed but is re-built with the new version\n    of the helm-toolkit, the PATCH will not be changed and the tarball will\n    be published with the same version but with the new build metadata (``${OSH_INFRA_COMMIT_SHA}``).\n\nSet git tag for the Openstack-Helm repositories\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nWe will set the git tag ``2024.2.0`` for all the Openstack-Helm repositories.\nThese tags are set by means of submitting a patch to the openstack/releases\nrepository. Since that we will set such tag twice a year when the Openstack\nis released.\n\nUpdate ``apiVersion`` in ``Chart.yaml``\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nUpdate ``apiVersion`` to ``v2`` in all ``Chart.yaml`` files and\nmigrate the dependecies (helm-toolkit) from ``requirements.yaml``\nto ``Chart.yaml``.\n\nReorganize the process of managing release notes\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nThe Reno tool (a Python package used for managing release notes) can be used\nin a way that allows to avoid merge conflicts for PRs that update the same chart.\nIt generates the release notes report using the git history.\n\nWe suggest the following workflow:\n\n* When a chart is updated, the maintainer runs the ``reno new <chart>`` command to create\n  a new release note file ``releasenotes/notes/<chart>-<hash>.yaml``.\n* The maintainer fills in the new release note file with the necessary information.\n* The maintainer commits the release note file.\n* While building the tarball we will use ``reno report`` command with a custom script\n  to generate the release notes report and automatically prepare\n  the ``<chart>/CHANGELOG.md`` file.\n\nSince we are not going to bump the chart version when we update it, all the\nrelease notes will be bound to some git commits and we be put under the headers\nthat correspond to git tags.\n\nThe format of the ``CHANGELOG.md`` file:\n\n.. code-block:: markdown\n\n    ## X.Y.Z-<num_commits_after_X.Y.Z>\n\n    - Some new update\n\n    ## X.Y.Z\n\n    - Some update\n    - Previous update\n\nWhere ``X.Y.Z`` is the tag in the git repository and the ``X.Y.Z`` section contains\nall the release notes made before the tag was set. The ``X.Y.Z-<num_commits_after_X.Y.Z>``\nsection contains all the release notes made after the tag was set.\n\nAt this point we have the only tag ``0.1.0``. So, when we set the ``2024.2.0`` tag almost all\nthe release notes will go to this tag and the ``CHANGELOG.md`` file. So it will look like:\n\n.. code-block:: markdown\n\n    ## 2024.2.0-<num_commits_after_2024.2.0>\n\n    - Some new update\n\n    ## 2024.2.0\n\n    - Some update\n    - Previous update\n\nUpdate the versioning policy\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n* When the helm-toolkit chart is updated and tested with all other charts,\n  we will re-build it and publish with the new version according to how it is\n  described above.\n  All other charts also will be re-built with this new version of\n  helm-toolkit (inside) and published with the new build metadata (new ``$OSH_INFRA_COMMIT_SHA``).\n  Helm-toolkit version will not be pinned in the charts.\n* When a particular chart is changed, we will re-build and publish only this chart.\n  So all charts will be built and published independently of each other.\n  All the test jobs must be able to use updated chart from the PR with other\n  charts taken from the public helm repository (tarballs).\n\nAlternatively, we could pin the helm-toolkit version in the charts, but this would\nmake the maintenance of the charts more complicated.\n\nDocumentation Impact\n====================\n\nThe user documentation must be updated and it must be emphasized that the chart version\nis not equal to the Openstack release version and that the Openstack version is defined\nby the images used with the charts. Also it must be explained that a particular version\nlike ``2024.2.X`` is compatible with those Openstack releases that were maintained at the time\n``2024.2.X`` was built and published (i.e ``2023.1``, ``2023.2``, ``2024.1``, ``2024.2``).\n\n.. _chart-testing: https://github.com/helm/chart-testing.git\n"
  },
  {
    "path": "doc/source/specs/2025.2/own_service_accounts.rst",
    "content": "====================\nOwn Service Accounts\n====================\n\nProblem Description\n===================\n\nCurrently when an OpenStack-Helm chart deploys a OpenStack service,\nit creates a service account that is used by other Openstack services\nto interact with the service's API. For example, the Nova\nchart creates a service account called ``nova`` and other charts\nlike Cinder and Neutron configure Cinder and Neutron services\nto use the ``nova`` service account to interact with the Nova API.\n\nHowever, there might be scenarios where multiple Nova accounts\nare necessary. For instance, if Neutron requires more permissive\naccess to the Nova API than Cinder, it might be desirable to create\ntwo separate accounts with tailored permissions.\n\nAlso the current approach assumes service accounts are owned by\nthe chart that creates them and it requires other charts to be\ndeployed with the credentials of the service account owner chart.\nI.e. the service account credentials must be synced between charts.\n\n\nProposed Change\n===============\n\nThe spec proposes to modify the `Keystone user manifest`_ so\nit is able to create multiple Keystone users. The job will be deployed with\nmultiple containers, each container will create a separate Keystone user.\n\nAll other Openstack charts use the `Keystone user manifest`_ for\nmanaging service accounts. So every Openstack chart will be able to create a bunch\nof service accounts according to their needs.\n\nE.g. the Neutron chart will create the following service accounts:\n\n* neutron (used by Neutron to communicate with the Keystone API to check auth tokens\n  and other services can use it to get access to the Neutron API)\n* neutron_nova (used by Neutron to get access to the Nova API instead\n  of using ``nova`` service account created by the Nova chart)\n* neutron_placement (used by Neutron to get access to the Placement API\n  instead of using ``placement`` service account managed by the Placement chart)\n\nThe proposed change is going to be backward compatible because the Neutron\nchart will still be able to use the ``neutron`` and ``placement`` service accounts\nmanaged by the Nova and Placement charts. Also the ``neutron`` service account\ncan still be used by other charts to communicate with the Neutron API.\n\nImplementation\n==============\n\nAssignee(s)\n-----------\n\nPrimary assignee:\n  kozhukalov (Vladimir Kozhukalov <kozhukalov@gmail.com>)\n\nValues\n------\n\nService accounts credentials are defined in the ``values.yaml`` files\nin the ``.Values.endpoints.identity.auth`` section. The section contains\na bunch of dicts defining credentials for every service account.\n\nCurrently those dicts which correspond to service accounts managed by other charts\nmust be aligned with those charts values. For example, the Neutron values must\ndefine the ``nova`` service account the same way as the Nova chart does.\n\nThe following is the example of how the ``.Values.endpoints.identity.auth``\nsection of a chart must be modified. The example is given for the Neutron chart:\n\n.. code-block:: yaml\n\n    endpoints:\n      identity:\n        auth:\n          # This serivce account is managed by Keystone chart\n          # and created during Keystone database sync.\n          # We should not modify it.\n          admin:\n            region_name: RegionOne\n            username: admin\n            password: password\n            project_name: admin\n            user_domain_name: default\n            project_domain_name: default\n          # Service account with the following username/password\n          # will be created by the Keystone user job\n          # and will be used for Neutron configuration.\n          # For backward compatibility this dict must not be modified\n          # to stay aligned with other charts that use this service account\n          # to get access to the Neutron API.\n          neutron:\n            role: admin,service\n            region_name: RegionOne\n            username: neutron\n            password: password\n            project_name: service\n            user_domain_name: service\n            project_domain_name: service\n          # Service account with the following username/password\n          # will be created by the Keystone user job\n          # and will be used for Neutron configuration. Also the\n          # ``role`` field must be added to assign necessary roles\n          # to the service account.\n          nova:\n            role: admin,service\n            region_name: RegionOne\n            project_name: service\n            username: neutron_nova\n            password: neutron_nova_password\n            user_domain_name: service\n            project_domain_name: service\n          # Service account with the following username/password\n          # will be created by the Keystone user job\n          # and will be used for Neutron configuration. Also the\n          # ``role`` field must be added to assign necessary roles\n          # to the service account.\n          placement:\n            role: admin,service\n            region_name: RegionOne\n            project_name: service\n            username: neutron_placement\n            password: neutron_placement_password\n            user_domain_name: service\n            project_domain_name: service\n\nSecrets\n-------\n\nThe service account credentials are stored in K8s secrets which are\nused by the `Keystone user manifest`_ to create the service accounts.\n\nSo the the template that deploys those secrets must be updated to\ncreate the secrets for all service accounts defined in the\n``.Values.endpoints.identity.auth`` section.\n\nAlso the ``.Values.secrets.identity`` section must be updated and\nsecret names must be added for all service accounts defined in the\n``.Values.endpoints.identity.auth`` section.\n\nKeystone user manifest\n----------------------\n\nThe Helm-toolkit chart defines the ``Keystone user manifest``_\nwhich is used by all Openstack charts to create service accounts.\nThe manifest must be updated to be able to accept ``serviceUsers`` parameter\nwhich will be the list of service accounts to be created by the job.\n\nFor backward compatibility if the ``serviceUsers`` parameter is not given\nthen the manifest will use the ``serviceUser`` parameter or ``serviceName`` parameter\nto define the ``serviceUsers`` as a list with a single element.\n\n.. code-block::\n\n    {{- $serviceName := index . \"serviceName\" -}}\n    {{- $singleServiceUser := index . \"serviceUser\" | default $serviceName -}}\n    {{- $serviceUsers := index . \"serviceUsers\" | default (tuple $singleServiceUser) -}}\n\nKeystone user job\n-----------------\n\nAll Openstack charts deploy the job to create service accounts which uses\nthe `Keystone user manifest`_. The modified manifest will be\nable to create multiple Keystone users and the job template must also be updated\nto pass the proper list of service accounts to the manifest.\n\nFor example, the Neutron chart will be modified to create the following\nservice accounts:\n\n.. code-block::\n\n    {{ dict \"envAll\" . \"serviceName\" \"neutron\" \"serviceUsers\" (tuple \"neutron\" \"nova\" \"placement\") | include \"helm-toolkit.manifests.job_ks_user\" }}\n\nWork Items\n----------\n\n* Modify the `Keystone user manifest`_ to make it possible\n  to create multiple Keystone users in a single job.\n* Modify charts one by one as described above so they create their own\n  service accounts to get access to the APIs of other OpenStack services.\n\nAlternatives\n------------\n\nA K8s operator can be used to manage service accounts. In this case\ncharts will deploy the custom resources that will be handled by the operator.\nThe operator could also be useful for more complex scenarios when users\ndeploy Keystone federation and need more flexibility in managing\nservice accounts.\n\nHowever, the proposed change is simpler to implement and it does not\nrequire any additional components while the change is backward compatible\nand does not break existing deployments.\n\nDocumentation Impact\n====================\nThe documentation must be updated to reflect the change.\n\n.. _Keystone user manifest: https://opendev.org/openstack/openstack-helm/src/branch/master/helm-toolkit/templates/manifests/_job-ks-user.yaml.tpl\n"
  },
  {
    "path": "doc/source/specs/COPYME",
    "content": "..\n This work is licensed under a Creative Commons Attribution 3.0 Unported\n License.\n\n http://creativecommons.org/licenses/by/3.0/legalcode\n\n..\n\n===============\nBlueprint Title\n===============\n\nInclude the URL of your Storyboard RFE:\n\nhttps://storyboard.openstack.org/#!/story/1234567\n\nProblem Description\n===================\n\nA detailed description of the problem.\n\nUse cases\n---------\n1. TODO\n\nProposed Change\n===============\n\nHow do you propose to solve this problem, and what's the scope?\n\nSecurity Impact\n---------------\nHow does this feature impact the securtiy of OpenStack-Helm?\n\nPerformance Impact\n------------------\nDoes this feature impact the performance of OpenStack-Helm?\n\nAlternatives\n------------\nWhy is the proposed approach the best approach?\n\nImplementation\n==============\n\nAssignee(s)\n-----------\n\nWho is leading the implementation?\n\nDesignate the primary author and contact.\n\nPrimary assignee:\n  <Gerrit Id or None>\n\nWork Items\n----------\n\nWork items or tasks.  These can be worked on by multiple contributors.\n\nTesting\n=======\nWhat tests will verify this change is functional?\n\nDocumentation Impact\n====================\nWhat documentation needs must be considered with this change?\n\nReferences\n==========\nPlace any external references here.\n"
  },
  {
    "path": "doc/source/specs/developer-environment.rst",
    "content": "..\n This work is licensed under a Creative Commons Attribution 3.0 Unported\n License.\n\n http://creativecommons.org/licenses/by/3.0/legalcode\n\n..\n\n=====================\nDeveloper Environment\n=====================\n\nhttps://blueprints.launchpad.net/openstack-helm/+spec/developer-environment\n\nProblem Description\n===================\n\nDevelopers require a simple way of instantiating a working environment for\nOpenStack-Helm, that allows them to quickly begin development of the project.\nThis is more complex to achieve than many OpenStack Projects that can simply\nrely upon a devstack plugin to achieve this. This is as OpenStack-Helm is\nfocused on the deployment of OpenStack (and associated) Projects, rather than\nthe development of the projects themselves, and also requires additional\nsupporting infrastructure, e.g. Kubernetes and a CNI.\n\nUse cases\n---------\n1. Development of OpenStack-Helm\n2. PoC deployments of OpenStack-Helm\n\nProposed Change\n===============\n\nThe OpenStack-Helm Zuulv2 gates were written to allow use outside of\nOpenStack-Infra, to quickly set up a Kubernetes cluster, with the adoption of\nZuulv3 underway it is logical to extend this paradigm to the Zuulv3 Playbooks.\nThis will be driven via a ``Makefile`` that will allow developers to perform the\nfollowing actions:\n\n* Prepare Host(s) for OpenStack-Helm deployment\n* Deploy Kubernetes via KubeADM, with charts for CNI and DNS services\n\nAt this point, curated scripts will be used to deploy OpenStack-Helm services on\ndemand as desired, with documentation provided to allow a new developer to\nquickly set up either a single or multimode deployment of a reference\n`OpenStack Compute Kit <https://governance.openstack.org/tc/reference/tags/starter-kit_compute.html>`_\nenvironment with the addition of:\n\n* Ceph backed Object Storage\n* Ceph backed Block Storage (cinder)\n* Orchestration (heat)\n* Web UI (horizon)\n\nA set of scripts will be provided to exercise the deployed environment that\nchecks the basic functionality of the deployed cloud, driven where possible via\nOpenStack heat:\n\n* Create external network\n* Setup access to the external network from the development machine\n* Create tenant network\n* Create tenant router to link tenant network and external\n* Create SSH Key in nova\n* Create VM on tenant network\n* Assign Floating IP to VM\n* SSH into VM and check it can access the internet\n\nThis deployment process will be gated, to ensure that the development\nthe environment is consistently working against ``master`` for the\nOpenStack-Helm repositories.\n\nSecurity Impact\n---------------\nThere will be no security impact, as it will deploy the charts in\nOpenStack-Helm[-infra/-addons] upon a reference KubeADM administered cluster.\n\nPerformance Impact\n------------------\nThis feature will not affect the performance of OpenStack-Helm.\n\nAlternatives\n------------\nThe alternative would be to continue supporting the current bash driven\ncontainerized KubeADM and Kubelet approach, though this has the following\nissues:\n\n* The containerized Kubelet cannot survive a restart, as it does not setup\n  mounts correctly.\n* The bash scripts are largely undocumented and have grown to the point where\n  they are very hard for a new developer to work on.\n* The move to Zuulv3 native operation of the OpenStack-Helm gates mean there\n  would be no code reuse between the gate and developer environments, so\n  supporting the existing code for Zuulv2 will incur significant tech-debt.\n\nImplementation\n==============\n\nAssignee(s)\n-----------\n\nPrimary assignee:\n  portdirect (Pete Birley)\n\nWork Items\n----------\n\nThe following work items need to be completed for this Specification to be\nimplemented.\n\n* Update of Developer Documentation\n* Update of Makefile for OpenStack-Helm-Infra to allow modular deployment of\n  components\n* Develop scripts for bringing up OpenStack-Helm Charts and perform basic\n  interactive tests\n* Add gate for developer environment\n\nTesting\n=======\nA gate will be added to OpenStack-Helm that runs through the developer\nenvironment deployment process.\n\nDocumentation Impact\n====================\nThe developer documentation in OpenStack-Helm should be updated to match the\ngated developer deploy process.\n"
  },
  {
    "path": "doc/source/specs/fluentbit-fluentd-architecture.rst",
    "content": "..\n This work is licensed under a Creative Commons Attribution 3.0 Unported\n License.\n\n http://creativecommons.org/licenses/by/3.0/legalcode\n..\n\n======================================\nfluentbit-fluentd logging architecture\n======================================\n\nBlueprints:\n1. osh-logging-framework_\n\n.. _osh-logging-framework: https://blueprints.launchpad.net/openstack-helm/+spec/osh-logging-framework\n\nReleated Specs:\n1. OSH logging monitoring and alerting: https://review.openstack.org/#/c/482687/\n\n\nProblem Description\n===================\n\nOpenStack-Helm defines a centralized logging mechanism to provide insight into\nthe state of the OpenStack services and infrastructure components as\nwell as underlying Kubernetes platform. Among the requirements for a logging\nplatform, where log data can come from and where log data need to be delivered\nare very variable. To support various logging scenarios, OpenStack-Helm should\nprovide a flexible mechanism to meet with certain operation needs. This spec\nproposes fast and lightweight log forwarder and full featured log aggregator\ncomplementing each other providing a flexible and reliable solution. Especially,\nFluentbit is proposed as a log forwarder and Fluentd is proposed as a main log\naggregator and processor.\n\nPlatform Requirements\n=====================\n\nLogging Requirements\n--------------------\n\nThe requirements for a logging collector/aggregator include:\n\n1. Log collection daemon runs on each node to forward logs to aggregator\n2. Log collection daemon should have a minimal server footprint\n3. Log aggregator deployment runs on a selected node as deployment\n4. Ability to apply custom metadata and uniform format to logs\n5. Log aggregator should have HA capability\n6. Log aggregator should have a flexible output capability to choose from\n7. Log aggregator is able to send data to Elasticsearch and Kafka\n8. Log aggregator should be scalable\n\nLogical Diagram\n---------------\n\n1. diagram link: https://github.com/sktelecom-oslab/docs/blob/master/images/fluentbit-fluentd-diagram.png\n\nUse Cases\n=========\n\nLogging Use Cases\n-----------------\n\nExample uses for centralized logging with Fluentbit and Fluentd include:\n\n1. Cover the following logging use cases https://review.openstack.org/#/c/482687/\n2. Collect logs from the node by Fluentbit\n3. Every Fluentbit send logs to Fluentd with Kubernetes metadata attached\n4. Fluentd then attaches Kubernetes and/or OpenStack metadata\n5. Fluentd properly filters and categorizes logs\n6. Fluentd send aggregated logs to Elasticsearch for the internal use cases\n7. Aggregator also send aggregated logs to Kafka for external tools to consume\n\n\nProposed Change\n===============\n\nLogging\n-------\n\nFluentbit, Fluentd meet OpenStack-Helm's logging requirements for gathering,\naggregating, and delivering of logged events. Fluntbit runs as a daemonset on\neach node and mounts the /var/lib/docker/containers directory. The Docker\ncontainer runtime engine directs events posted to stdout and stderr to this\ndirectory on the host. Fluentbit then forward the contents of that directory to\nFluentd. Fluentd runs as deployment at the designated nodes and expose service\nfor Fluentbit to forward logs. Fluentd should then apply the Logstash format to\nthe logs. Fluentd can also write Kubernetes and OpenStack metadata to the logs.\nFluentd will then forward the results to Elasticsearch and to optionally Kafka.\nElasticsearch indexes the logs in a logstash-* index by default. Kafka stores\nthe logs in a 'logs' topic by default. Any external tool can then consume the\n'logs' topic.\n\nThe proposal includes the following:\n\n1. Helm chart for Fluentbit-Fluentd Combination\n\nThe above chart must include sensible configuration values to make the logging\nplatform usable by default. These include: proper input configurations for both\nFluentbit and Fluentd, proper output configurations for both Fluentbit and\nFluentd, proper metadata and formats applied to the logs via Fluentd.\n\n\nSecurity Impact\n---------------\n\nAll services running within the platform should be subject to the\nsecurity practices applied to the other OpenStack-Helm charts.\n\nPerformance Impact\n------------------\n\nTo minimize the performance impacts, the following should be considered:\n\n1. Sane defaults for log retention and rotation policies\n\nImplementation\n==============\n\nAssignee(s)\n-----------\n\nPrimary assignees:\n  sungil (Sungil Im)\n  jayahn (Jaesuk Ahn)\n\nWork Items\n----------\n\n1. Fluentbit-Fluentd logging chart\n\nAll charts should follow design approaches applied to all other OpenStack-Helm\ncharts, including the use of helm-toolkit.\n\nAll charts require valid and sensible default values to provide operational\nvalue out of the box.\n\nTesting\n=======\nTesting should include Helm tests for each of the included charts as well as an\nintegration test in the gate.\n\n\nDocumentation Impact\n====================\nDocumentation should be included for each of the included charts as well as\ndocumentation detailing the requirements for a usable monitoring platform,\npreferably with sane default values out of the box.\n"
  },
  {
    "path": "doc/source/specs/index.rst",
    "content": "Project Specifications\n======================\n\nSpecifications in this repository represent a consensus on the topics covered\nwithin.  They should be considered a mandate on the path forward with regards\nto the content on which they are drafted.\n\nHere is a list of the current specs:\n\n.. toctree::\n   :maxdepth: 1\n   :glob:\n\n   2025.1/*\n   2025.2/*\n   *\n\nSpecifications Purpose\n----------------------\n\nA specification should precede any broad-reaching technical changes or proposals\nto OpenStack-Helm.  Examples of changes requiring a specification include:  a\nstandard format to the values.yaml files, multiple backend support for neutron,\nand the approach for logging and monitoring in OpenStack-Helm.  Some additional\nfeatures will not need an accompanying specification, but may be tied back to an\nexisting specification.  An example of this would be introducing a service in\nOpenStack-Helm that could be included under the scope of a specification already\ndrafted and approved.\n\nSpecifications Process\n----------------------\n\nBefore drafting a specification, a blueprint should be filed in Storyboard_\nalong with any dependencies the blueprint requires.  Once the blueprint has been\nregistered, submit the specification as a patch set to the specs/ directory\nusing the supplied template.\n\nMore information about the blueprint + specification lifecycle can be found\nhere_.\n\n.. _Storyboard: https://storyboard.openstack.org/#!/project_group/64\n.. _here: https://wiki.openstack.org/wiki/Blueprints#Blueprints_and_Specs\n"
  },
  {
    "path": "doc/source/specs/multi-os.rst",
    "content": "..\n This work is licensed under a Creative Commons Attribution 3.0 Unported\n License.\n\n http://creativecommons.org/licenses/by/3.0/legalcode\n\n..\n\n================\nMulti-OS Support\n================\n\nInclude the URL of your Storyboard RFE:\n\nhttps://storyboard.openstack.org/#!/story/2005130\n\nProblem Description\n===================\n\nOur :ref:`images documentation` documentation claims to be independent\nof the image. However, some helm charts hard code paths of binaries,\nexecutables' runtime configurations, etc. Therefore, the image agnostic\npromise is broken.\n\nWe need to adapt all the helm charts to remove the hard-coded bits,\nbe image agnostic, to allow users to bring their own images.\n\nUse cases\n---------\n\nAllow the usage of multiple base images in OSH.\n\nProposed Change\n===============\n\nEdit all helm charts to remove possible references to image specific elements,\nreplacing them with values overrides or conditionals.\n\nIt is important to notice that the helm charts can be split in two categories\nfor now:\n\n#. Helm charts for which we use official upstream images.\n   (Called further ``Category A`` helm charts)\n#. Helm charts for which we are building images in osh-images.\n   (Called further ``Category B`` helm charts)\n\nFor the ``Category B`` helm charts, an informal testing has been done in the\npast to ensure image independence.\nHowever, there is nothing exercising this independence in gates. Due to that,\ncode of the helm charts might or might not require adapting.\n\nIn all cases, we will need to provide different ``profiles``\n(in other words, overrides), to test different image providers use cases in CI.\n\nThe ``profiles`` yaml files (for example ``centos_7``, ``opensuse_15``)\nwill be provided in each chart's ``example_values/`` directory.\nThis folder will be masked to helm through a helmignore file.\nIts content is only for user consumption, not for inclusion in helm charts\nthrough the File directive.\nIn other words, this is a user interface given for convenience merely using\nthe abilities of the existing helm charts.\n\nThe default ``values.yaml`` need to expose those abilities, by adding a new\nseries of keys/values to add the necessary features.\n\nThe existing schema for images is the following:\n\n.. code-block:: yaml\n\n   images:\n     tags:\n       imagename1: imagelocation:version-distro\n       imagename2: imagelocation:version-distro\n     pull_policy:\n     local_registry:\n\nFor this spec, we assume ``imagename1`` and ``imagename2`` are similarly built.\nThis means we do not require any override per image. Instead we require a\ngeneric kind of override, per application, usable by all charts.\n\nI propose to extend the conf schema with generic software information.\nFor example, for apache2:\n\n.. code-block:: yaml\n\n   conf:\n     software:\n       apache2:\n         #the apache2 binary location\n         binary: apache2\n         start_args: -DFOREGROUND\n         stop_args: -k graceful-stop\n         #directory where to drop the config files for apache vhosts\n         conf_dir: /etc/apache2/conf-enabled\n         sites_dir: /etc/apache2/sites-enabled\n\n\nWhen possible, ``values_overrides/`` will refer to existing\n``helm-toolkit`` functions to avoid repeating ourselves.\n\nThis approach:\n\n* Proposes a common approach to software configuration, describing the\n  distro/image specific differences for applications.\n* Exposes security/configuration features of software, allowing deployers to\n  configure software to their needs.\n* Allows different profiles of apache, should some charts require different\n  args for example for the same kind of images, by using yaml dict merges\n  features.\n\nSecurity Impact\n---------------\n\nNo direct impact, as there is no change in the current software/configuration\nlocation, merely a templating change.\n\nPerformance Impact\n------------------\n\nNo benchmark was done to evaluate:\n\n* the impact of exposing extra key/values in the helm chart ``values.yaml``\n  file (could possibly have a deployment speed/ram usage increase).\n* the impact of adding functionality in the ``helm-toolkit`` to deal with a\n  common multi-distro aspect (could possibly increase helm chart rendering time)\n* the impact of adding extra conditionals in the helm charts, to deal with\n  multi-distro aspect (if not using the approach above, or when using an\n  alternative approach)\n\nThe performance aspect of these point are restricted to deployment, and have\nno performance impact on operations.\n\nAlternatives\n------------\n\n* Not providing a support of multiple images. This leads to ease of\n  maintainance and reduced gate impact, with the risk of having\n  less contributors. For available overrides, users would have to provide\n  many overrides themselves, while this spec proposes a common community\n  approach.\n\n* Create conditionals in the helm charts. This is problematic, as the amount\n  of conditionals will increase and will be harder to maintain.\n  Overrides files are easy to sync between charts.\n\n* Only provide one way to configure software, and expect to always have the\n  same versions. This is further away from the \"image independent\" contract,\n  with extra burden: We will need to maintain a curated list of versions,\n  deal with the differences of the defaults (selinux/apparmor profiles come to\n  mind as path sensitive for example), and different expectations for\n  operational teams (\"The code is not where I expect it to be in the image\").\n  Embracing difference could even allow deployers to have different\n  expectations for images, for example: apache+mod_wsgi vs uwsgi standalone\n  or uwsgi + nginx.\n\n\nImplementation\n==============\n\nAssignee(s)\n-----------\n\nPrimary assignee:\n  - evrardjp\n\nWork Items\n----------\n\nThis spec will be worked helm chart by helm chart, starting with keystone.\n\nA few areas have been identified on changes required.\nEach of them will be a work item.\n\n#.  Make webserver binary path/arguments templated using ``values.yaml``.\n    As expressed above, this allows us to provide different overrides per\n    image/distribution to automatically wire things.\n#.  Dynamically alter webserver environment conditionally in the helm chart.\n    For example, for apache, ensure the necessary modules to run openstack\n    are available and loaded at helm chart deploy time. This will leverage\n    the binaries listed in ``values.yaml``.\n    These series of commands are distribution/image dependent,\n    as commands to list modules to load might differ.\n    However with a few parameters, we can get a very distro independent\n    process which would allow us to load all the required apache modules.\n#.  Alter webserver configuration per distro. Different distros have different\n    expectations in terms of path (including a different series of files\n    required), and it would make the operators' life easier by using their\n    expected distro paths.\n\nTesting\n=======\n\nNo change in testing is required, *per se*.\nIt is expected the new software configuration would be tested with the\ncurrent practices.\n\nOn top of that, the newly provided ``example_values/`` must\naim for being tested **as soon as possible upon delivery**. Without tests,\nthose examples will decrepit. The changes in CI pipelines for making use\nof ``example_values`` is outside the scope of this spec.\n\nDocumentation Impact\n====================\n\nNone more than this spec, as it should be relatively transparent for the\nuser. However, extra documentation to explain the usage of ``value_overrides``\nwould be welcomed.\n\nReferences\n==========\n\nNone\n"
  },
  {
    "path": "doc/source/specs/neutron-multiple-sdns.rst",
    "content": "..\n This work is licensed under a Creative Commons Attribution 3.0 Unported\n License.\n\n http://creativecommons.org/licenses/by/3.0/legalcode\n\n..\n\n=====================\nNeutron multiple SDNs\n=====================\n\nBlueprint:\nneutron-multiple-sdns_\n\n.. _neutron-multiple-sdns: https://blueprints.launchpad.net/openstack-helm/+spec/neutron-multiple-sdns\n\nProblem Description\n===================\n\nCurrently OpenStack-Helm supports OpenVSwitch as a network virtualization engine.\nIn order to support many possible backends (SDNs), changes are required in\nneutron chart and in deployment techniques. OpenStack-Helm can support every SDN\nsolution that has Neutron plugin, either core_plugin or mechanism_driver.\n\nThe Neutron reference architecture provides mechanism_drivers OpenVSwitch (OVS)\nand linuxbridge (LB) with ML2 core_plugin framework.\n\nOther networking services provided by Neutron are:\n\n#. L3 routing - creation of routers\n#. DHCP - auto-assign IP address and DNS info\n#. Metadata- Provide proxy for Nova metadata service\n\nIntroducing a new SDN solution should consider how the above services are\nprovided. It may be required to disable the built-in Neutron functionality.\n\nProposed Change\n===============\n\nTo be able to install Neutron with multiple possible SDNs as networking plugin,\nNeutron chart should be modified to enable installation of base services\nwith decomposable approach. This means that operator can define which components\nfrom base Neutron chart should be installed, and which should not. This plus\nproper configuration of Neutron chart would enable operator to flexibly provision\nOpenStack with chosen SDN.\n\nEvery Kubernetes manifest inside Neutron chart can be enabled or disabled.\nThat would provide flexibility to the operator, to choose which Neutron\ncomponents are reusable with different type of SDNs. For example, neutron-server\nwhich is serving the API and configuring the database can be used with different\ntypes of SDN plugin, and provider of that SDN chart would not need to copy\nall logic from Neutron base chart to manage the API and database.\n\nThe proposes change would be to add in :code:`neutron/values.yaml` new section\nwith boolean values describing which Neutron's Kubernetes resources should be\nenabled:\n\n.. code-block:: yaml\n\n    manifests:\n      configmap_bin: true\n      configmap_etc: true\n      daemonset_dhcp_agent: true\n      daemonset_l3_agent: true\n      daemonset_metadata_agent: true\n      daemonset_ovs_agent: true\n      daemonset_ovs_db: true\n      daemonset_ovs_vswitchd: true\n      deployment_server: true\n      deployment_rpc_server: true\n      ingress_server: true\n      job_db_init: true\n      job_db_sync: true\n      job_ks_endpoints: true\n      job_ks_service: true\n      job_ks_user: true\n      pdb_server: true\n      secret_db: true\n      secret_keystone: true\n      service_ingress_server: true\n      service_server: true\n\nThen, inside Kubernetes manifests, add global if statement, deciding if given\nmanifest should be declared on Kubernetes API, for example\n:code:`neutron/templates/daemonset-ovs-agent.yaml`:\n\n.. code-block:: yaml\n\n    {{- if .Values.manifests.daemonset_ovs_agent }}\n    # Licensed under the Apache License, Version 2.0 (the \"License\");\n\n    ...\n\n            - name: libmodules\n          hostPath:\n            path: /lib/modules\n        - name: run\n          hostPath:\n            path: /run\n    {{- end }}\n\nIf :code:`.Values.manifests.daemonset_ovs_agent` will be set to false, neutron\novs agent would not be launched. In that matter, other type of L2 or L3 agent\non compute node can be run.\n\nTo enable new SDN solution, there should be separate chart created, which would\nhandle the deployment of service, setting up the database and any related\nnetworking functionality that SDN is providing.\n\nUse case\n--------\n\nLet's consider how new SDN can take advantage of disaggregated Neutron services\narchitecture. First assumption is that neutron-server functionality would be\ncommon for all SDNs, as it provides networking API, database management and\nKeystone interaction. Required modifications are:\n\n#. Configuration in :code:`neutron.conf` and :code:`ml2_conf.ini`\n#. Providing the neutron plugin code.\n\nThe code can be supplied as modified neutron server image, or plugin can be\nmounted to original image. The :code:`manifests` section in :code:`neutron/values.yaml`\nshould be enabled for below components:\n\n.. code-block:: yaml\n\n    manifests:\n      # neutron-server components:\n      configmap_bin: true\n      configmap_etc: true\n      deployment_server: true\n      deployment_rpc_server: true\n      ingress_server: true\n      job_db_init: true\n      job_db_sync: true\n      job_ks_endpoints: true\n      job_ks_service: true\n      job_ks_user: true\n      pdb_server: true\n      secret_db: true\n      secret_keystone: true\n      service_ingress_server: true\n      service_server: true\n\nNext, Neutron services like L3 routing, DHCP and metadata serving should be\nconsidered. If SDN provides its own implementation, the Neutron's default one\nshould be disabled:\n\n.. code-block:: yaml\n\n    manifests:\n      daemonset_dhcp_agent: false\n      daemonset_l3_agent: false\n      daemonset_metadata_agent: false\n\nProvision of those services should be included inside SDN chart.\n\nThe last thing to be considered is VM network virtualization. What engine does\nSDN use? It is OpenVSwitch, Linux Bridges or l3 routing (no l2 connectivity).\nIf SDN is using the OpenVSwitch, it can take advantage of existing OVS\ndaemonsets. Any modification that would be required to OVS manifests can be\nincluded in base Neutron chart as a configurable option. In that way, the features\nof OVS can be shared between different SDNs. When using the OVS, default Neutron\nL2 agent should be disabled, but OVS-DB and OVS-vswitchd can be left enabled.\n\n.. code-block:: yaml\n\n    manifests:\n      # Neutron L2 agent:\n      daemonset_ovs_agent: false\n      # OVS tool:\n      daemonset_ovs_db: true\n      daemonset_ovs_vswitchd: true\n\nSecurity Impact\n---------------\nNo security impact.\n\nPerformance Impact\n------------------\nVM networking performance would be dependent of SDN used.\n\n\nAlternatives\n------------\nAlternatives to decomposable Neutron chart would be to copy whole Neutron chart\nand create spin-offs with new SDN enabled. This approach has drawbacks of\nmaintaining the whole neutron chart in many places, and copies of standard\nservices may be out of sync with OSH improvements. This implies constant\nmaintenance effort to up to date.\n\nImplementation\n==============\n\nAssignee(s)\n-----------\n\nPrimary assignees:\n\n* korzen (Artur Korzeniewski)\n* portdirect (Pete Birley)\n\n\nWork Items\n----------\n\n#. Implement decomposable Neutron chart\n#. Add Linux Bridge as first alternative for OVS - separate spec needed.\n#. Add one SDN to see if proposed change is working OK - separate spec needed.\n\n\nTesting\n=======\nFirst reasonable testing in gates would be to setup Linux Bridge and check\nif VM network connectivity is working.\n\nDocumentation Impact\n====================\nDocumentation of how new SDN can be enabled, how Neutron should be configured.\nAlso, for each new SDN that would be incorporated, the architecture overview\nshould be provided.\n\nReferences\n==========\n"
  },
  {
    "path": "doc/source/specs/nginx-sidecar.rst",
    "content": "=============\nNginx Sidecar\n=============\n\nBlueprint: https://blueprints.launchpad.net/openstack-helm/+spec/nginx-sidecar\n\nProblem Description\n===================\n\nIn a secured deployment, TLS certificates are used to protect the transports\namongst the various components.  In some cases, this requires additional\nmechanism to handle TLS offloading and to terminate the connection gracefully:\n\n* services do not handle TLS offloading and termination,\n* services whose native handling of TLS offloading and termination cause major\n  performance impact, for example, eventlet.\n\nProposed Change\n===============\n\nThis specification proposes to add a nginx sidecar container to the\npod for service that requires the tls offloading. The nginx can be used\nto handle the TLS offoading and terminate the TLS connection, and routes\nthe traffic to the service via localhost (127.0.0.1).\n\nSecurity Impact\n---------------\n\nThis enhances the system's security design by allowing pods with services that\ncannot natively manage TLS to secure the traffic to the service pod.\n\nPerformance Impact\n------------------\n\nThere is no significant performance impact as the traffic will be locally\nrouted (via 127.0.0.1) and may potentially improve performance for services\nwhose native TLS handling is inefficient.\n\nAlternatives\n------------\n\n* Instead of using nginx, haproxy can be used instead.\n\nImplementation\n==============\n\nAssignee(s)\n-----------\n\nPrimary assignee:\n  Pete Birley <pete@port.direct>\n\nWork Items\n----------\n\n* Update ``helm toolkit`` to provide snippet to create the nginx sidecar\n  container for the services that require it.\n* Update service charts to use the updated ``helm toolkit``.\n* Update relevant Documentation.\n\nTesting\n=======\n\nThe testing will be performed by the OpenStack-Helm gate to demonstrate\nthe sidecar container correctly routes traffic to the correct services.\n\nDocumentation Impact\n====================\n\nOpenStack-Helm documentation will be updated to indicate the usage of the\nnginx sidecar.\n"
  },
  {
    "path": "doc/source/specs/osh-1.0-requirements.rst",
    "content": "..\n This work is licensed under a Creative Commons Attribution 3.0 Unported\n License.\n\n http://creativecommons.org/licenses/by/3.0/legalcode\n\n..\n\n===============================\nOpenStack-Helm 1.0 Requirements\n===============================\n\nTopic:\nosh-1.0-requirements_\n\n.. _osh-1.0-requirements: https://review.openstack.org/#/q/topic:bp/osh-1.0-requirements\n\nProblem Description\n===================\n\nOpenStack-Helm has undergone rapid development and maturation over its\nlifetime, and is nearing the point of real-world readiness.  This spec\ndetails the functionality that must be implemented in OpenStack-Helm for it to\nbe considered ready for a 1.0 release, as well as for general use.\n\nUse case\n---------\nThis spec describes a point-in-time readiness for OpenStack-Helm 1.0,\nafter which it will be for historical reference only.\n\nProposed Change\n===============\n\nThe proposed requirements for a 1.0 release are as follows:\n\nGating\n------\nA foundational requirement of 1.0 readiness is the presence of robust gating\nthat will ensure functionality, backward compatibility, and upgradeability.\nThis will allow development to continue and for support for new versions of\nOpenStack to be added post-1.0.\nThe following gating requirements must be met:\n\n**Helm test for all charts**\n\nHelm test is the building block for all gating.  Each chart must integrate a\nhelm-test script which validates proper functionality.  This is already a\nmerge criterion for new charts, but a handful of older charts still need\nfor helm test functionality to be added.  No additional charts will be merged\nprior to 1.0 unless they meet this requirement (and others in this document).\n\n**Resiliency across reboots**\n\nAll services should survive node reboots, and their functionality validated\nfollowing a reboot by a gate.\n\n**Upgrades**\n\nGating must prove that upgrades from each supported OpenStack version to the\nnext operate flawlessly, using the default image set (LOCI).  Specifically,\neach OpenStack chart should be upgraded from one release to the next, and\neach infrastructure service from one minor version to the next.  Both the\ncontainer image and configuration must be modified as part of this upgrade.\nAt minimum, Newton to Ocata upgrade must be validated for the 1.0 release.\n\nCode Completion and Refactoring\n-------------------------------\nA number of in-progress and planned development efforts must be completed\nprior to 1.0, to ensure a stable OpenStack-Helm interface thereafter.\n\n**Charts in the appropriate project**\n\nAll charts should migrate to their appropriate home project as follows:\n\n- OpenStack-Helm for OpenStack services\n- OpenStack-Helm-Infra for supporting services\n- OpenStack-Helm-Addons for ancillary services\n\nIn particular, these charts must move to OpenStack-Helm-Infra:\n\n- ceph\n- etcd\n- ingress\n- ldap\n- libvirt\n- mariadb\n- memcached\n- mongodb\n- openvswitch\n- postgresql\n- rabbitmq\n\n**Combined helm-toolkit**\n\nCurrently both OpenStack-Helm and OpenStack-Helm-Infra have their own parallel\nversions of the Helm-Toolkit library chart.  They must be combined into a\nsingle chart in OpenStack-Helm-Infra prior to 1.0.\n\n**Standardization of manifests**\n\nWork is underway to refactor common manifest patterns into reusable snippets\nin Helm-Toolkit.  The following manifests have yet to be combined:\n\n- Database drop Job\n- Prometheus exporters\n- API Deployments\n- Worker Deployments\n- StatefulSets\n- CronJobs\n- Etc ConfigMaps\n- Bin ConfigMaps\n\n**Standardization of values**\n\nOpenStack-Helm has developed a number of conventions around the format and\nordering of charts' ``values.yaml`` file, in support of both reusable Helm-Toolkit\nfunctions and ease of developer ramp-up.  For 1.0 readiness, OpenStack-Helm must\ncement these conventions within a spec, as well as the ordering of ``values.yaml``\nkeys. These conventions must then be gated to guarantee conformity.\nThe spec in progress can be found here [1]_.\n\n**Inclusion of all core services**\n\nCharts for all core OpenStack services must be present to achieve 1.0\nreleasability.  The only core service outstanding at this time is Swift.\n\n**Split Ceph chart**\n\nThe monolithic Ceph chart does not allow for following Ceph upgrade best\npractices, namely to upgrade Mons, OSDs, and client services in that order.\nThe Ceph chart must therefore be split into at least three charts (one\nfor each of the above upgrade phases) prior to 1.0 to ensure smooth\nin-place upgradability.\n\n**Values-driven config files**\n\nIn order to maximize flexibility for operators, and to help facilitate\nupgrades to newer versions of containerized software without editing\nthe chart itself, all configuration files will be specified dynamically\nbased on ``values.yaml`` and overrides.  In most cases the config files\nwill be generated based on the YAML values tree itself, and in some\ncases the config file content will be specified in ``values.yaml`` as a\nstring literal.\n\nDocumentation\n-------------\nComprehensive documentation is key to the ability for real-world operators to\nbenefit from OpenStack-Helm, and so it is a requirement for 1.0 releasability.\nThe following outstanding items must be completed from a documentation\nperspective:\n\n**Document version requirements**\n\nVersion requirements for the following must be documented and maintained:\n\n- Kubernetes\n- Helm\n- Operating system\n- External charts (Calico)\n\n**Document Kubernetes requirements**\n\nOpenStack-Helm supports a \"bring your own Kubernetes\" paradigm.  Any\nparticular k8s configuration or feature requirements must be\ndocumented.\n\n- Hosts must use KubeDNS / CoreDNS for resolution\n- Kubernetes must enable mount propagation (until it is enabled by default)\n- Helm must be installed\n\nExamples of how to set up the above under KubeADM and KubeSpray-based clusters\nmust be documented as well.\n\n**OpenStack-Helm release process**\n\nThe OpenStack-Helm release process will be somewhat orthogonal to the\nOpenStack release process, and the differences and relationship between the\ntwo must be documented in a spec.  This will help folks quickly understand why\nOpenStack-Helm is a Release-Independent project from an OpenStack perspective.\n\n**Release notes**\n\nRelease notes for the 1.0 release must be prepared, following OpenStack\nbest practices.  The criteria for future changes that should be included\nin release notes in an ongoing fashion must be defined / documented as well.\n\n- ``values.yaml`` changes\n- New charts\n- Any other changes to the external interface of OpenStack-Helm\n\n**LMA Operations Guide**\n\nA basic Logging, Monitoring, and Alerting-oriented operations guide must be in\nplace, illustrating for operators (and developers) how to set up and use an\nexample LMA setup for OpenStack and supporting services.  It will include\ninstructions on how to perform basic configuration and how to access and use\nthe user interfaces at a high level.  It will also link out to more detailed\ndocumentation for the LMA tooling itself.\n\nProcess and Tooling\n-------------------\nTo facilitate effective collaboration and communication across the\nOpenStack-Helm community team, work items for the enhancements above will be\ncaptured in Storyboard.  Therefore, migration from Launchpad to Storyboard\nmust be accomplished prior to the 1.0 release.  Going forward, Storyboard\nwill be leveraged as a tool to collaboratively define and communicate the\nOpenStack-Helm roadmap.\n\nSecurity Impact\n---------------\nNo impact\n\nPerformance Impact\n------------------\nNo impact\n\nAlternatives\n------------\nThis spec lays out the criteria for a stable and reliable 1.0 release, which\ncan serve as the basis for real-world use as well as ongoing development.\nThe alternative approaches would be to either iterate indefinitely without\ndefining a 1.0 release, which would fail to signal to operators the point at\nwhich the platform is ready for real-world use; or, to define a 1.0 release\nwhich fails to satisfy key features which real-world operators need.\n\nImplementation\n==============\n\nThis spec describes a wide variety of self-contained work efforts, which will\nbe implemented individually by the whole OpenStack-Helm team.\n\nAssignee(s)\n-----------\n\nPrimary assignee:\n\n- mattmceuen (Matt McEuen <matt.mceuen@att.com>) for coordination\n- powerds (DaeSeong Kim <daeseong.kim@sk.com>) for the\n  ``values.yaml`` ordering spec [1]_\n- portdirect (Pete Birley <pete@port.direct>) for the\n  release management spec [2]_\n- randeep.jalli (Randeep Jalli <rj2083@att.com>) and\n  renmak (Renis Makadia <renis.makadia@att.com>) for splitting\n  up the Ceph chart\n- rwellum (Rich Wellum <richwellum@gmail.com>) for coordination\n  of Storyboard adoption\n- Additional assignees TBD\n\nWork Items\n----------\n\nSee above for the list of work items.\n\nTesting\n=======\nSee above for gating requirements.\n\nDocumentation Impact\n====================\nSee above for documentation requirements.\n\nReferences\n==========\n\n.. [1] https://review.openstack.org/#/c/552485/\n.. [2] TODO - release management spec\n"
  },
  {
    "path": "doc/source/specs/osh-lma-stack.rst",
    "content": "..\n This work is licensed under a Creative Commons Attribution 3.0 Unported\n License.\n\n http://creativecommons.org/licenses/by/3.0/legalcode\n\n..\n\n=====================================\nOSH Logging, Monitoring, and Alerting\n=====================================\n\nBlueprints:\n1. osh-monitoring_\n2. osh-logging-framework_\n\n.. _osh-monitoring: https://blueprints.launchpad.net/openstack-helm/+spec/osh-monitoring\n.. _osh-logging-framework: https://blueprints.launchpad.net/openstack-helm/+spec/osh-logging-framework\n\n\nProblem Description\n===================\n\nOpenStack-Helm currently lacks a centralized mechanism for providing insight\ninto the performance of the OpenStack services and infrastructure components.\nThe log formats of the different components in OpenStack-Helm vary, which makes\nidentifying causes for issues difficult across services.  To support operational\nreadiness by default, OpenStack-Helm should include components for logging\nevents in a common format, monitoring metrics at all levels, alerting and alarms\nfor those metrics, and visualization tools for querying the logs and metrics in\na single pane view.\n\n\nPlatform Requirements\n=====================\n\nLogging Requirements\n--------------------\n\nThe requirements for a logging platform include:\n\n1. All services in OpenStack-Helm log to stdout and stderr by default\n2. Log collection daemon runs on each node to forward logs to storage\n3. Proper directories mounted to retrieve logs from the node\n4. Ability to apply custom metadata and uniform format to logs\n5. Time-series database for logs collected\n6. Backed by highly available storage\n7. Configurable log rotation mechanism\n8. Ability to perform custom queries against stored logs\n9. Single pane visualization capabilities\n\nMonitoring Requirements\n-----------------------\n\nThe requirements for a monitoring platform include:\n\n1. Time-series database for collected metrics\n2. Backed by highly available storage\n3. Common method to configure all monitoring targets\n4. Single pane visualization capabilities\n5. Ability to perform custom queries against metrics collected\n6. Alerting capabilities to notify operators when thresholds exceeded\n\n\nUse Cases\n=========\n\nLogging Use Cases\n-----------------\n\nExample uses for centralized logging include:\n\n1. Record compute instance behavior across nodes and services\n2. Record OpenStack service behavior and status\n3. Find all backtraces for a tenant id's uuid\n4. Identify issues with infrastructure components, such as RabbitMQ, mariadb, etc\n5. Identify issues with Kubernetes components, such as: etcd, CNI, scheduler, etc\n6. Organizational auditing needs\n7. Visualize logged events to determine if an event is recurring or an outlier\n8. Find all logged events that match a pattern (service, pod, behavior, etc)\n\nMonitoring Use Cases\n--------------------\n\nExample OpenStack-Helm metrics requiring monitoring include:\n\n1. Host utilization: memory usage, CPU usage, disk I/O, network I/O, etc\n2. Kubernetes metrics: pod status, replica availability, job status, etc\n3. Ceph metrics: total pool usage, latency, health, etc\n4. OpenStack metrics: tenants, networks, flavors, floating IPs, quotas, etc\n5. Proactive monitoring of stack traces across all deployed infrastructure\n\nExamples of how these metrics can be used include:\n\n1. Add or remove nodes depending on utilization\n2. Trigger alerts when desired replicas fall below required number\n3. Trigger alerts when services become unavailable or unresponsive\n4. Identify etcd performance that could lead to cluster instability\n5. Visualize performance to identify trends in traffic or utilization over time\n\nProposed Change\n===============\n\nLogging\n-------\n\nFluentd, Elasticsearch, and Kibana meet OpenStack-Helm's logging requirements\nfor capture, storage and visualization of logged events.  Fluentd runs as a\ndaemonset on each node and mounts the /var/lib/docker/containers directory.\nThe Docker container runtime engine directs events posted to stdout and stderr\nto this directory on the host.  Fluentd should then declare the contents of\nthat directory as an input stream, and use the fluent-plugin-elasticsearch\nplugin to apply the Logstash format to the logs.  Fluentd will also use the\nfluentd-plugin-kubernetes-metadata plugin to write Kubernetes metadata to the\nlog record.  Fluentd will then forward the results to Elasticsearch, which\nindexes the logs in a logstash-* index by default.  The resulting logs can then\nbe queried directly through Elasticsearch, or they can be viewed via Kibana.\nKibana offers a dashboard that can create custom views on logged events, and\nKibana integrates well with Elasticsearch by default.\n\nThe proposal includes the following:\n\n1. Helm chart for Fluentd\n2. Helm chart for Elasticsearch\n3. Helm chart for Kibana\n\nAll three charts must include sensible configuration values to make the\nlogging platform usable by default.  These include: proper input configurations\nfor Fluentd, proper metadata and formats applied to the logs via Fluentd,\nsensible indexes created for Elasticsearch, and proper configuration values for\nKibana to query the Elasticsearch indexes previously created.\n\nMonitoring\n----------\n\nPrometheus and Grafana meet OpenStack-Helm's monitoring requirements.  The\nPrometheus monitoring tool provides the ability to scrape targets for metrics\nover HTTP, and it stores these metrics in Prometheus's time-series database.\nThe monitoring targets can be discovered via static configuration in Prometheus\nor through service discovery.  Prometheus includes a querying language that\nprovides meaningful queries against the metrics gathered and supports the\ncreation of rules to measure these metrics against for alerting purposes.  It\nalso supports a wide range of Prometheus exporters for existing services,\nincluding Ceph and OpenStack.  Grafana supports Prometheus as a data source, and\nprovides the ability to view the metrics gathered by Prometheus in a single pane\ndashboard.  Grafana can be bootstrapped with dashboards for each target scraped,\nor the dashboards can be added via Grafana's web interface directly.  To meet\nOpenStack-Helm's alerting needs, Alertmanager can be used to interface with\nPrometheus and send alerts based on Prometheus rule evaluations.\n\nThe proposal includes the following:\n\n1. Helm chart for Prometheus\n2. Helm chart for Alertmanager\n3. Helm chart for Grafana\n4. Helm charts for any appropriate Prometheus exporters\n\nAll charts must include sensible configuration values to make the monitoring\nplatform usable by default.  These include:  static Prometheus configurations\nfor the included exporters, static dashboards for Grafana mounted via configMaps\nand configurations for Alertmanager out of the box.\n\nSecurity Impact\n---------------\n\nAll services running within the platform should be subject to the\nsecurity practices applied to the other OpenStack-Helm charts.\n\nPerformance Impact\n------------------\n\nTo minimize the performance impacts, the following should be considered:\n\n1. Sane defaults for log retention and rotation policies\n2. Identify opportunities for improving Prometheus's operation over time\n3. Elasticsearch configured to prevent memory swapping to disk\n4. Elasticsearch configured in a highly available manner with sane defaults\n\n\nImplementation\n==============\n\nAssignee(s)\n-----------\n\nPrimary assignees:\n  srwilker (Steve Wilkerson)\n  portdirect (Pete Birley)\n  lr699s (Larry Rensing)\n\n\nWork Items\n----------\n\n1. Fluentd chart\n2. Elasticsearch chart\n3. Kibana chart\n4. Prometheus chart\n5. Alertmanager chart\n6. Grafana chart\n7. Charts for exporters: kube-state-metrics, ceph-exporter, openstack-exporter?\n\nAll charts should follow design approaches applied to all other OpenStack-Helm\ncharts, including the use of helm-toolkit.\n\nAll charts require valid and sensible default values to provide operational\nvalue out of the box.\n\nTesting\n=======\nTesting should include Helm tests for each of the included charts as well as an\nintegration test in the gate.\n\n\nDocumentation Impact\n====================\nDocumentation should be included for each of the included charts as well as\ndocumentation detailing the requirements for a usable monitoring platform,\npreferably with sane default values out of the box.\n"
  },
  {
    "path": "doc/source/specs/support-OCI-image-registry-with-authentication-turned-on.rst",
    "content": "..\n This work is licensed under a Creative Commons Attribution 3.0 Unported\n License.\n\n http://creativecommons.org/licenses/by/3.0/legalcode\n\n..\n\n========================================================\nSupport OCI image registry with authentication turned on\n========================================================\n\nBlueprint:\nsupport-oci-image-registry-with-authentication-turned-on_\n\n.. _support-oci-image-registry-with-authentication-turned-on: https://blueprints.launchpad.net/openstack-helm/+spec/support-oci-image-registry-with-authentication-turned-on\n\nProblem Description\n===================\nIn the current openstack-helm, all charts provide an ``images:`` section in\ntheir ``values.yaml`` that have the container images references. By default,\nthe container images are all downloaded from a registry hosted by Docker or Quay.\nHowever, the image references can be overridden by operators to download images\nfrom any OCI image registry. In the case that the OCI image registry has\nauthentication turned on, kubelet would fail to download the images because the\ncurrent Openstack-Helm does not provide a way to pass the OCI image registry\ncredentials to kubernetes when pulling images.\n\n\nUse case\n========\nOperators should be able to use Openstack-Helm to deploy containerized openstack\nservices with a docker registry has authentication turned on.\n\n\nProposed Change\n===============\nTo be able to pull images from an OCI image registry which has the authentication\nturned on, kubernetes needs credentials. For each chart, a new ``endpoints:``\nentry could be added in ``values.yaml`` to provide image credentials, a secret\nneeds to be generated to hold the credentials and the ``imagePullSecrets:`` field\nshould be added in each service account to specify which secret should be used\nto get the credentials from when pulling images by kubelet.\n\nThe detailed proposes change are described as following:\n\n1. For each chart, add a new entry ``oci_image_registry:`` under ``endpoints:`` in\n``values.yaml``. The entry ``oci_image_registry:`` has the ``auth:`` section which\nprovides the credentials for accessing registry images and an option ``enabled:``\nto determine whether images authentication is required or not. The registry basic\ninformation would also be included for generating the registry URL by the endpoint\nlookup functions. Also add a new entry ``oci_image_registry:`` under ``secrets:``\nto indicate the secret name. In order to create the secret that holds the provided\ncredentials, add a new component ``secret_registry`` in ``manifests:`` section.\nFor example:\n\n.. code-block:: yaml\n\n   secrets:\n     oci_image_registry:\n       nova: nova-oci-image-registry-key\n\n   endpoints:\n     ...\n     oci_image_registry:\n       name: oci-image-registry\n       namespace: oci-image-registry\n       auth:\n         enabled: false\n         nova:\n           username: nova\n           password: password\n       hosts:\n         default: localhost\n       host_fqdn_override:\n         default: null\n       port:\n         registry:\n           default: 5000\n\n   manifests:\n     secret_registry: true\n\nThe option ``enabled:`` under ``auth:`` and the manifest ``secret_registry:``\nprovide the ability for operator to determine whether they would like to have\nsecrets generated and passed to kubernetes for pulling images.\n\nThe secret would not be created with the default option ``enabled: false`` and\n``secret_registry: true``. To enable secret creation, operator should override\n``enabled:`` to true. The above example shows the default credentials, operator\nshould override the ``username:`` and ``password:`` under ``auth:`` section to\nprovide their own credentials.\n\nThen, add manifest ``secret-registry.yaml`` in ``templates/`` to leverage\nthe function that will be added in helm-toolkit to create the secret. For example:\n\n.. code-block:: yaml\n\n   {{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n   {{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n   {{- end }}\n\n2. Add a helm-toolkit function ``helm-toolkit.manifests.secret_registry`` to create a\n   manifest for secret generation. For example:\n\n.. code-block:: rst\n\n   {{- define \"helm-toolkit.manifests.secret_registry\" -}}\n   {{- $envAll := index . \"envAll\" }}\n   {{- $registryUser := index . \"registryUser\" }}\n   {{- $secretName := index $envAll.Values.secrets.oci_image_registry $registryUser }}\n   {{- $registryHost := tuple \"oci_image_registry\" \"internal\" $envAll | include \"helm-toolkit.endpoints.endpoint_host_lookup\" }}\n   {{- $registryPort := tuple \"oci_image_registry\" \"internal\" \"registry\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n   {{- $imageCredentials := index $envAll.Values.endpoints.oci_image_registry.auth $registryUser }}\n   {{- $dockerAuthToken := printf \"%s:%s\" $imageCredentials.username $imageCredentials.password | b64enc }}\n   {{- $dockerAuth := printf \"{\\\"auths\\\": {\\\"%s:%s\\\": {\\\"auth\\\": \\\"%s\\\"}}}\" $registryHost $registryPort $dockerAuthToken | b64enc }}\n   ---\n   apiVersion: v1\n   kind: Secret\n   metadata:\n     name: {{ $secretName }}\n   type: kubernetes.io/dockerconfigjson\n   data:\n     .dockerconfigjson: {{ $dockerAuth }}\n   {{- end }}\n\n3. Reference the created secret by adding the ``imagePullSecrets:`` field to ServiceAccount\n   resource template [2]_ in ``helm-toolkit/snippets/_kubernetes_pod_rbac_serviceaccount.tpl``.\n   To handle it as optional, the field is wrapped in a conditional. For example,\n\n.. code-block:: yaml\n\n   ---\n   apiVersion: v1\n   kind: ServiceAccount\n   ...\n   {{- if $envAll.Values.endpoints.oci_image_registry.auth.enabled }}\n   imagePullSecrets:\n     - name: {{ index $envAll.Values.secrets.oci_image_registry $envAll.Chart.Name }}\n   {{- end }}\n\nIf .Values.endpoints.oci_image_registry.auth.enabled will be set to true, then any\ncontainers created with the current service account will have the ``imagePullSecrets``\nautomatically added to their spec and the secret will be passed to kubelet to be\nused for pulling images.\n\n\nSecurity Impact\n---------------\nThe credentials for the registry could be exposed by running the kubectl command:\nkubectl get secret <secret-name> --output=\"jsonpath={.data.\\.dockerconfigjson}\" | base64 --decode\n\nAuthentication should be enabled for normal users to access Kube API server via\neither kubectl command or kube REST API call.\n\n\nPerformance Impact\n------------------\nNo performance impact\n\n\nAlternatives\n------------\nBefore using Openstack-Helm to deploy openstack services,\n\n1. Put .docker/config.json in docker/kubelet root directory on all nodes\n2. Pre-pulling images on all nodes\n\nBut above alternatives have limitations and security impact. i.e...require root access\nto configure on all nodes, all pods can read any configured private registries, all pods\ncan use any images cached on a node [1]_\n\n\nImplementation\n==============\n\nAssignee(s)\n-----------\n\nPrimary assignees:\n\n* Angie Wang (angiewang)\n\n\nWork Items\n----------\n#. Provide the credentials and add the manifest across all charts in OSH and OSH-infra\n#. Update helm-toolkit to provide manifest to create secret for registry authentication\n#. Update helm-toolkit serviceaccount template to pass the secret in a conditional\n\n\nTesting\n=======\nNone\n\nDocumentation Impact\n====================\nDocumentation of how to enable the registry secret generation\n\n\nReferences\n==========\n.. [1] https://kubernetes.io/docs/concepts/containers/images\n.. [2] https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account\n"
  },
  {
    "path": "doc/source/specs/support-linux-bridge-on-neutron.rst",
    "content": "..\n This work is licensed under a Creative Commons Attribution 3.0 Unported\n License.\n\n http://creativecommons.org/licenses/by/3.0/legalcode\n\n..\n\n==========================================\nSupport linux bridge on neutron helm chart\n==========================================\n\nBlueprint:\nsupport-linux-bridge-on-neutron_\n\n.. _support-linux-bridge-on-neutron: https://blueprints.launchpad.net/openstack-helm/+spec/support-linux-bridge-on-neutron\n\nProblem Description\n===================\n\nThis specification will address enablement of LinuxBridge network virtualization\nfor OpenStack Helm (OSH). LinuxBridge is second available networking technology\nin Neutron's reference architecture. The first one is OVS, that is already\nimplemented in OSH.\n\nThe LinuxBridge (LB) is Neutron's L2 agent, using linux kernel bridges as network\nconfiguration for VMs. Both OVS and LB are part of Neutron's Modular Layer 2 (ML2)\nframework, allowing to simultaneously utilize the variety of layer 2 networking\ntechnologies.\n\nOther services inside Neutron reference stack (L3/DHCP/metadata agents) are\ndependent on L2 connectivity agent. Thus, replacing OVS with LB would cause\nchanges in mentioned services configuration.\n\nProposed Change\n===============\n\nLinuxBridge installation with neutron chart takes advantaged of decomposable\nneutron chart in OSH. LinuxBridge agent will be added as daemonset, similarly\nhow OVS is implemented. New value :code:`daemonset_lb_agent` should be added in\n:code:`neutron/values.yaml` in :code:`manifests` section:\n\n.. code-block:: yaml\n\n    manifests:\n      (...)\n      daemonset_dhcp_agent: true\n      daemonset_l3_agent: true\n      daemonset_lb_agent: false\n      daemonset_metadata_agent: true\n      daemonset_ovs_agent: true\n      daemonset_ovs_db: true\n      daemonset_ovs_vswitchd: true\n      (...)\n\nBy default, :code:`daemonset_lb_agent` will be set to false to remain default\nbehaviour of installing OVS as networking agent.\n\nInstalling OVS requires Kubernetes worker node labeling with tag\n:code:`openvswitch=enabled`. To mark nodes where LB should be used, new tag\nwill be introduced: :code:`linuxbridge=enabled`.\n\nLinuxBridge should support external bridge configuration, as well as auto\nbridge add mechanism implemented for OVS.\n\nAs mentioned before, configuration of L3/DHCP/metadata agent should be adjusted\nto use LinuxBridge, sample configuration override:\n\n.. code-block:: yaml\n\n    conf:\n      neutron:\n        default:\n          agent:\n            interface_driver: linuxbridge\n      ml2_conf:\n        ml2_type_flat:\n          neutron:\n            ml2:\n              mechanism_drivers: linuxbridge, l2population\n      dhcp_agent:\n        default:\n          neutron:\n            base:\n              agent:\n                interface_driver: linuxbridge\n      l3_agent:\n        default:\n          neutron:\n            base:\n              agent:\n                interface_driver: linuxbridge\n\nHaving services configured, also the services pod dependencies should be\nupdated to reflect the new kind on L2 agent:\n\n.. code-block:: yaml\n\n    dependencies:\n      dhcp:\n        pod:\n          - requireSameNode: true\n            labels:\n              application: neutron\n              component: neutron-lb-agent\n      metadata:\n        pod:\n          - requireSameNode: true\n            labels:\n              application: neutron\n              component: neutron-lb-agent\n      l3:\n        pod:\n          - requireSameNode: true\n            labels:\n              application: neutron\n              component: neutron-lb-agent\n\nLinuxBridge should be also enabled in :code:`manifests` section:\n\n.. code-block:: yaml\n\n    manifests:\n      daemonset_lb_agent: true\n      daemonset_ovs_agent: false\n      daemonset_ovs_db: false\n      daemonset_ovs_vswitchd: false\n\nIn above example OVS and Neutron OVS agent are disabled.\n\nAnother place where Neutron L2 agent should be pointed is dependencies list\nin other OpenStack projects. Currently, :code:`nova-compute` has dependency for\n:code:`ovs-agent` in :code:`nova/values.yaml`, it should be changed to:\n\n.. code-block:: yaml\n\n    dependencies:\n      compute:\n        daemonset:\n        - lb-agent\n\nSecurity Impact\n---------------\nNo security impact.\n\nPerformance Impact\n------------------\nVM networking performance would be dependent on linux bridge implementation.\n\nAlternatives\n------------\nOVS is an alternative in Neutron reference architecture. It is already in tree.\n\nImplementation\n==============\n\nAssignee(s)\n-----------\n\nPrimary assignees:\n\n* korzen (Artur Korzeniewski)\n\n\nWork Items\n----------\n\n#. Add LinuxBridge daemonset\n#. Add gate job testing VM network connectivity\n#. Add documentation on how to use LinuxBridge\n\nTesting\n=======\nGate job testing VM network connectivity.\n\nDocumentation Impact\n====================\nDocumentation on how to use LinuxBridge with Neutron chart.\n\nReferences\n==========\n"
  },
  {
    "path": "doc/source/specs/tenant-ceph.rst",
    "content": "================================\nDeploying multuple Ceph clusters\n================================\n\nThis guide shows how to setup multiple Ceph clusters. One Ceph cluster will be\nused for k8s RBD storage and while other Ceph cluster will be for tenant facing\nstorage backend for Cinder and Glance.\n\nCeph Clusters:\n==============\n\nCeph for RBD:\n-------------\n\nThis Ceph cluster will be used for k8s RBD storage (pvc). This can be used by\nentire Kubernetes cluster.\n\n- k8s namespace: ceph\n- mon endpoint port: 6789\n- mgr endpoint port: 7000\n- metric port: 9283\n- storage classes: general (rbd based for pvc)\n- no ceph-mds and ceph-rgw\n\nCeph for Tenant:\n----------------\n\nThis Ceph cluster will be used by Cinder and Glance as storage backend.\n\n- k8s namespace: tenant-ceph\n- mon endpoint port: 6790\n- mgr endpoint port: 7001\n- metric port: 9284\n- no storage classes\n- no ceph-mds\n\nEnv Setup:\n==========\n6 VM based hosts (node1, node2, node3, node4, node5, node6)\n\nk8s node labels:\n----------------\n``Ceph for RBD related labels:``\n\nLabels assigned to nodes: node1, node2, node3:\n\nopenstack-control-plane=enabled,\nceph-mon=enabled,\nceph-mgr=enabled,\nceph-rgw=enabled,\nceph-mds=enabled,\nceph-osd=enabled\n\n``Ceph for Tenant related labels:``\n\nLabels assigned to nodes: node1, node2, node3:\n\ntenant-ceph-control-plane=enabled,\nceph-mon-tenant=enabled,\nceph-mgr-tenant=enabled,\nceph-rgw-tenant=enabled\n\nLabels assigned to nodes: node4, node5, node6:\n\nopenstack-data-plane=enabled,\nopenstack-compute-node=enabled,\nceph-osd-tenant=enabled,\nopenstack-data-plane=enabled\n\n\n\n``k8s node list with labels``\nAfter applying above labels, node labels should look like following.\n\n.. code-block:: console\n\n  ubuntu@node1:~$ kubectl get nodes --show-labels=true\n  NAME      STATUS    ROLES     AGE       VERSION   LABELS\n  node1     Ready     <none>    9m        v1.10.6   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,ceph-mds=enabled,ceph-mgr-tenant=enabled,ceph-mgr=enabled,ceph-mon-tenant=enabled,ceph-mon=enabled,ceph-osd=enabled,ceph-rgw-tenant=enabled,ceph-rgw=enabled,kubernetes.io/hostname=node1,linuxbridge=enabled,openstack-control-plane=enabled,openstack-helm-node-class=primary,openvswitch=enabled,tenant-ceph-control-plane=enabled\n  node2     Ready     <none>    6m        v1.10.6   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,ceph-mds=enabled,ceph-mgr-tenant=enabled,ceph-mgr=enabled,ceph-mon-tenant=enabled,ceph-mon=enabled,ceph-osd=enabled,ceph-rgw-tenant=enabled,ceph-rgw=enabled,kubernetes.io/hostname=node2,linuxbridge=enabled,openstack-control-plane=enabled,openstack-helm-node-class=general,openvswitch=enabled,tenant-ceph-control-plane=enabled\n  node3     Ready     <none>    6m        v1.10.6   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,ceph-mds=enabled,ceph-mgr-tenant=enabled,ceph-mgr=enabled,ceph-mon-tenant=enabled,ceph-mon=enabled,ceph-osd=enabled,ceph-rgw-tenant=enabled,ceph-rgw=enabled,kubernetes.io/hostname=node3,linuxbridge=enabled,openstack-control-plane=enabled,openstack-helm-node-class=general,openvswitch=enabled,tenant-ceph-control-plane=enabled\n  node4     Ready     <none>    7m        v1.10.6   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,ceph-osd-tenant=enabled,kubernetes.io/hostname=node4,linuxbridge=enabled,openstack-compute-node=enabled,openstack-data-plane=enabled,openstack-helm-node-class=general,openvswitch=enabled\n  node5     Ready     <none>    6m        v1.10.6   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,ceph-osd-tenant=enabled,kubernetes.io/hostname=node5,linuxbridge=enabled,openstack-compute-node=enabled,openstack-data-plane=enabled,openstack-helm-node-class=general,openvswitch=enabled\n  node6     Ready     <none>    6m        v1.10.6   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,ceph-osd-tenant=enabled,kubernetes.io/hostname=node6,linuxbridge=enabled,openstack-compute-node=enabled,openstack-data-plane=enabled,openstack-helm-node-class=general,openvswitch=enabled\n\n\nTest Steps:\n===========\n\n1) Prepare scripts:\n-------------------\n\nOpenStack-Helm multinode guide includes scripts which are used to specify\noverrides and deploy charts.\n\nDuplicate scripts as shows below for later use.\n\n.. code-block:: console\n\n  cd tools/deployment/multinode/\n  cp 030-ceph.sh 030-tenant-ceph.sh\n  cp 040-ceph-ns-activate.sh 040-tenant-ceph-ns-activate.sh\n  cp 090-ceph-radosgateway.sh 090-tenant-ceph-radosgateway.sh\n\n\n2) Deploy ingress chart:\n------------------------\n\nScript to update and execute: ``020-ingress.sh``\n\nUpdate script to include namespace ``tenant-ceph`` as shown\nbelow.\n\n.. code-block:: yaml\n\n  for NAMESPACE in openstack ceph tenant-ceph; do\n\nExecute script.\n\n3) Deploy Ceph for RBD:\n-----------------------\n\nScript to update and execute: ``030-ceph.sh``\n\nUpdate script with following overrides. Note: The original RBD provisioner\nis now deprecated. The CSI RBD provisioner is selected here. If you prefer\nthe original non-CSI RBD provisioner, then set rbd_provisioner to true instead.\n\n.. code-block:: yaml\n\n  deployment:\n    storage_secrets: true\n    ceph: true\n    rbd_provisioner: false\n    csi_rbd_provisioner: true\n    cephfs_provisioner: false\n    client_secrets: false\n  endpoints:\n    ceph_mon:\n      namespace: ceph\n      port:\n        mon:\n          default: 6789\n    ceph_mgr:\n      namespace: ceph\n      port:\n        mgr:\n          default: 7000\n        metrics:\n          default: 9283\n  manifests:\n    deployment_mds: false\n  bootstrap:\n    enabled: true\n  conf:\n    pool:\n      target:\n        osd: 3\n  storageclass:\n    rbd:\n      ceph_configmap_name: ceph-etc\n    cephfs:\n      provision_storage_class: false\n  ceph_mgr_modules_config:\n    prometheus:\n      server_port: 9283\n  monitoring:\n    prometheus:\n      enabled: true\n      ceph_mgr:\n        port: 9283\n\n.. note::\n  ``cephfs_provisioner: false`` and ``provision_storage_class: false`` are set\n  to false to disable cephfs.\n  ``deployment_mds: false`` is set to disable ceph-mds\n\nExecute script.\n\n4) Deploy MariaDB, RabbitMQ, Memcached and Keystone:\n----------------------------------------------------\n\nUse default overrides and execute following scripts as per OSH guide steps:\n\n- ``040-ceph-ns-activate.sh``\n- ``050-mariadb.sh``\n- ``060-rabbitmq.sh``\n- ``070-memcached.sh``\n- ``080-keystone.sh``\n\n\nResult from Steps 2, 3, 4:\n--------------------------\n\n``Ceph Pods``\n\n.. code-block:: console\n\n  ubuntu@node1:~$  kubectl get pods -n ceph -o wide\n  NAME                                    READY     STATUS      RESTARTS   AGE       IP              NODE\n  ceph-bootstrap-g45qc                    0/1       Completed   0          28m       192.168.5.16    node3\n  ceph-mds-keyring-generator-gsw4m        0/1       Completed   0          28m       192.168.2.11    node2\n  ceph-mgr-5746dd89db-mmrg4               1/1       Running     0          23m       10.0.0.12       node2\n  ceph-mgr-5746dd89db-q25lt               1/1       Running     0          23m       10.0.0.9        node3\n  ceph-mgr-keyring-generator-t4s8l        0/1       Completed   0          28m       192.168.2.9     node2\n  ceph-mon-6n4hk                          1/1       Running     0          28m       10.0.0.9        node3\n  ceph-mon-b2d9w                          1/1       Running     0          28m       10.0.0.12       node2\n  ceph-mon-check-d85994946-2dcpg          1/1       Running     0          28m       192.168.5.17    node3\n  ceph-mon-keyring-generator-rmvfz        0/1       Completed   0          28m       192.168.2.10    node2\n  ceph-mon-svkdl                          1/1       Running     0          28m       10.0.0.16       node1\n  ceph-osd-default-83945928-2mhrj         1/1       Running     0          25m       10.0.0.9        node3\n  ceph-osd-default-83945928-gqbd9         1/1       Running     0          25m       10.0.0.16       node1\n  ceph-osd-default-83945928-krrl8         1/1       Running     0          25m       10.0.0.12       node2\n  ceph-osd-keyring-generator-zg8s5        0/1       Completed   0          28m       192.168.0.195   node1\n  ceph-rbd-pool-92nbv                     0/1       Completed   0          23m       192.168.5.18    node3\n  ceph-rbd-provisioner-599895579c-jl6qk   1/1       Running     0          21m       192.168.2.15    node2\n  ceph-rbd-provisioner-599895579c-n4hbk   1/1       Running     0          21m       192.168.5.19    node3\n  ceph-rgw-keyring-generator-2wv4j        0/1       Completed   0          28m       192.168.5.15    node3\n  ceph-storage-keys-generator-8vzrx       0/1       Completed   0          28m       192.168.2.12    node2\n  ingress-796d8cf8d6-9khkm                1/1       Running     0          28m       192.168.2.6     node2\n  ingress-796d8cf8d6-nznvc                1/1       Running     0          28m       192.168.5.12    node3\n  ingress-error-pages-54454dc79b-bgc5m    1/1       Running     0          28m       192.168.2.5     node2\n  ingress-error-pages-54454dc79b-hwnv4    1/1       Running     0          28m       192.168.5.7     node3\n\n``Openstack Pods:``\n\n.. code-block:: console\n\n  ubuntu@node1:~$ kubectl get pods -n openstack -o wide\n  NAME                                                READY     STATUS      RESTARTS   AGE       IP              NODE\n  ceph-openstack-config-ceph-ns-key-generator-mcxrs   0/1       Completed   0          11m       192.168.2.16    node2\n  ingress-7b4bc84cdd-7wslz                            1/1       Running     0          30m       192.168.5.5     node3\n  ingress-7b4bc84cdd-z6t2z                            1/1       Running     0          30m       192.168.2.4     node2\n  ingress-error-pages-586c7f86d6-7m58l                1/1       Running     0          30m       192.168.5.6     node3\n  ingress-error-pages-586c7f86d6-n9tzv                1/1       Running     0          30m       192.168.2.3     node2\n  keystone-api-7974676d5d-5k27d                       1/1       Running     0          6m        192.168.5.24    node3\n  keystone-api-7974676d5d-cd9kv                       1/1       Running     0          6m        192.168.2.21    node2\n  keystone-bootstrap-twfrj                            0/1       Completed   0          6m        192.168.0.197   node1\n  keystone-credential-setup-txf5p                     0/1       Completed   0          6m        192.168.5.25    node3\n  keystone-db-init-tjxgm                              0/1       Completed   0          6m        192.168.2.20    node2\n  keystone-db-sync-zl9t4                              0/1       Completed   0          6m        192.168.2.22    node2\n  keystone-domain-manage-thwdm                        0/1       Completed   0          6m        192.168.0.198   node1\n  keystone-fernet-setup-qm424                         0/1       Completed   0          6m        192.168.5.26    node3\n  keystone-rabbit-init-6699r                          0/1       Completed   0          6m        192.168.2.23    node2\n  keystone-test                                       0/1       Completed   0          4m        192.168.3.3     node4\n  mariadb-ingress-84894687fd-wfc9b                    1/1       Running     0          11m       192.168.2.17    node2\n  mariadb-ingress-error-pages-78fb865f84-bg8sg        1/1       Running     0          11m       192.168.5.20    node3\n  mariadb-server-0                                    1/1       Running     0          11m       192.168.5.22    node3\n  memcached-memcached-5db74ddfd5-m5gw2                1/1       Running     0          7m        192.168.2.19    node2\n  rabbitmq-rabbitmq-0                                 1/1       Running     0          8m        192.168.2.18    node2\n  rabbitmq-rabbitmq-1                                 1/1       Running     0          8m        192.168.5.23    node3\n  rabbitmq-rabbitmq-2                                 1/1       Running     0          8m        192.168.0.196   node1\n\n``Ceph Status``\n\n.. code-block:: console\n\n  ubuntu@node1:~$ kubectl exec -n ceph ceph-mon-b2d9w -- ceph -s\n    cluster:\n      id:     3e53e3b7-e5d9-4bab-9701-134687f4954e\n      health: HEALTH_OK\n\n    services:\n      mon: 3 daemons, quorum node3,node2,node1\n      mgr: node3(active), standbys: node2\n      osd: 3 osds: 3 up, 3 in\n\n    data:\n      pools:   18 pools, 93 pgs\n      objects: 127 objects, 218 MB\n      usage:   46820 MB used, 186 GB / 232 GB avail\n      pgs:     93 active+clean\n\n\n``Ceph ConfigMaps``\n\n.. code-block:: console\n\n  ubuntu@node1:~$ kubectl get cm -n ceph\n  NAME                                      DATA      AGE\n  ceph-client-bin                           7         25m\n  ceph-client-etc                           1         25m\n  ceph-etc                                  1         23m\n  ceph-mon-bin                              10        29m\n  ceph-mon-etc                              1         29m\n  ceph-osd-bin                              7         27m\n  ceph-osd-default                          1         27m\n  ceph-osd-etc                              1         27m\n  ceph-provisioners-ceph-provisioners-bin   4         23m\n  ceph-templates                            6         29m\n  ingress-bin                               2         30m\n  ingress-ceph-nginx                        0         30m\n  ingress-conf                              3         30m\n  ingress-services-tcp                      0         30m\n  ingress-services-udp                      0         30m\n\n\n``ceph-mon-etc (ceph.conf)``\n\n.. code-block:: console\n\n  ubuntu@node1:~$ kubectl get cm -n ceph ceph-mon-etc -o yaml\n\n.. code-block:: yaml\n\n  apiVersion: v1\n  data:\n    ceph.conf: |\n      [global]\n      cephx = true\n      cephx_cluster_require_signatures = true\n      cephx_require_signatures = false\n      cephx_service_require_signatures = false\n      fsid = 3e53e3b7-e5d9-4bab-9701-134687f4954e\n      mon_addr = :6789\n      mon_host = ceph-mon-discovery.ceph.svc.cluster.local:6789\n      [osd]\n      cluster_network = 10.0.0.0/24\n      ms_bind_port_max = 7100\n      ms_bind_port_min = 6800\n      osd_max_object_name_len = 256\n      osd_mkfs_options_xfs = -f -i size=2048\n      osd_mkfs_type = xfs\n      public_network = 10.0.0.0/24\n  kind: ConfigMap\n  metadata:\n    creationTimestamp: 2018-08-27T04:55:32Z\n    name: ceph-mon-etc\n    namespace: ceph\n    resourceVersion: \"3218\"\n    selfLink: /api/v1/namespaces/ceph/configmaps/ceph-mon-etc\n    uid: 6d9fdcba-a9b5-11e8-bb1d-fa163ec12213\n\n.. note::\n  Note that mon_addr and mon_host have default mon port 6789.\n\n``k8s storageclass``\n\n.. code-block:: console\n\n  ubuntu@node1:~$ kubectl get storageclasses\n  NAME      PROVISIONER    AGE\n  general   ceph.com/rbd   14m\n\n``Ceph services``\n\n.. code-block:: console\n\n  ubuntu@node1:~$ kubectl get svc -n ceph\n  NAME                  TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)             AGE\n  ceph-mgr              ClusterIP   10.111.185.73    <none>        7000/TCP,9283/TCP   27m\n  ceph-mon              ClusterIP   None             <none>        6789/TCP            31m\n  ceph-mon-discovery    ClusterIP   None             <none>        6789/TCP            31m\n  ingress               ClusterIP   10.100.23.32     <none>        80/TCP,443/TCP      32m\n  ingress-error-pages   ClusterIP   None             <none>        80/TCP              32m\n  ingress-exporter      ClusterIP   10.109.196.155   <none>        10254/TCP           32m\n\n``Ceph endpoints``\n\n.. code-block:: console\n\n  ubuntu@node1:~$ kubectl get endpoints -n ceph\n  NAME                  ENDPOINTS                                                    AGE\n  ceph-mgr              10.0.0.12:9283,10.0.0.9:9283,10.0.0.12:7000 + 1 more...      27m\n  ceph-mon              10.0.0.12:6789,10.0.0.16:6789,10.0.0.9:6789                  31m\n  ceph-mon-discovery    10.0.0.12:6789,10.0.0.16:6789,10.0.0.9:6789                  31m\n  ingress               192.168.2.6:80,192.168.5.12:80,192.168.2.6:443 + 1 more...   32m\n  ingress-error-pages   192.168.2.5:8080,192.168.5.7:8080                            32m\n  ingress-exporter      192.168.2.6:10254,192.168.5.12:10254                         32m\n\n``netstat ceph mon port``\n\n.. code-block:: console\n\n  ubuntu@node1: netstat -ntlp | grep 6789\n  (Not all processes could be identified, non-owned process info\n   will not be shown, you would have to be root to see it all.)\n  tcp        0      0 10.0.0.16:6789          0.0.0.0:*               LISTEN      -\n\n  ubuntu@node1: netstat -ntlp | grep 6790\n  (Not all processes could be identified, non-owned process info\n   will not be shown, you would have to be root to see it all.)\n\n``Ceph secrets``\n\n.. code-block:: console\n\n  ubuntu@node1:~$ kubectl get secrets -n ceph\n  NAME                                                 TYPE                                  DATA      AGE\n  ceph-bootstrap-mds-keyring                           Opaque                                1         34m\n  ceph-bootstrap-mgr-keyring                           Opaque                                1         34m\n  ceph-bootstrap-osd-keyring                           Opaque                                1         34m\n  ceph-bootstrap-rgw-keyring                           Opaque                                1         34m\n  ceph-bootstrap-token-w2sqp                           kubernetes.io/service-account-token   3         34m\n  ceph-client-admin-keyring                            Opaque                                1         34m\n  ceph-mds-keyring-generator-token-s9kst               kubernetes.io/service-account-token   3         34m\n  ceph-mgr-keyring-generator-token-h5sw6               kubernetes.io/service-account-token   3         34m\n  ceph-mgr-token-hr88m                                 kubernetes.io/service-account-token   3         30m\n  ceph-mon-check-token-bfvgk                           kubernetes.io/service-account-token   3         34m\n  ceph-mon-keyring                                     Opaque                                1         34m\n  ceph-mon-keyring-generator-token-5gs5q               kubernetes.io/service-account-token   3         34m\n  ceph-mon-token-zsd6w                                 kubernetes.io/service-account-token   3         34m\n  ceph-osd-keyring-generator-token-h97wb               kubernetes.io/service-account-token   3         34m\n  ceph-osd-token-4wfm5                                 kubernetes.io/service-account-token   3         32m\n  ceph-provisioners-ceph-rbd-provisioner-token-f92tw   kubernetes.io/service-account-token   3         28m\n  ceph-rbd-pool-token-p2nxt                            kubernetes.io/service-account-token   3         30m\n  ceph-rgw-keyring-generator-token-wmfx6               kubernetes.io/service-account-token   3         34m\n  ceph-storage-keys-generator-token-dq5ts              kubernetes.io/service-account-token   3         34m\n  default-token-j8h48                                  kubernetes.io/service-account-token   3         35m\n  ingress-ceph-ingress-token-68rws                     kubernetes.io/service-account-token   3         35m\n  ingress-error-pages-token-mpvhm                      kubernetes.io/service-account-token   3         35m\n  pvc-ceph-conf-combined-storageclass                  kubernetes.io/rbd                     1         34m\n\n``Openstack secrets``\n\n.. code-block:: console\n\n  ubuntu@node1:~$ kubectl get secrets -n openstack\n  NAME                                                      TYPE                                  DATA      AGE\n  ceph-openstack-config-ceph-ns-key-cleaner-token-jj7n6     kubernetes.io/service-account-token   3         17m\n  ceph-openstack-config-ceph-ns-key-generator-token-5sqfw   kubernetes.io/service-account-token   3         17m\n  default-token-r5knr                                       kubernetes.io/service-account-token   3         35m\n  ingress-error-pages-token-xxjxt                           kubernetes.io/service-account-token   3         35m\n  ingress-openstack-ingress-token-hrvv8                     kubernetes.io/service-account-token   3         35m\n  keystone-api-token-xwczg                                  kubernetes.io/service-account-token   3         12m\n  keystone-bootstrap-token-dhnb6                            kubernetes.io/service-account-token   3         12m\n  keystone-credential-keys                                  Opaque                                2         12m\n  keystone-credential-rotate-token-68lnk                    kubernetes.io/service-account-token   3         12m\n  keystone-credential-setup-token-b2smc                     kubernetes.io/service-account-token   3         12m\n  keystone-db-admin                                         Opaque                                1         12m\n  keystone-db-init-token-brzkj                              kubernetes.io/service-account-token   3         12m\n  keystone-db-sync-token-xzqj9                              kubernetes.io/service-account-token   3         12m\n  keystone-db-user                                          Opaque                                1         12m\n  keystone-domain-manage-token-48gn5                        kubernetes.io/service-account-token   3         12m\n  keystone-etc                                              Opaque                                9         12m\n  keystone-fernet-keys                                      Opaque                                2         12m\n  keystone-fernet-rotate-token-djtzb                        kubernetes.io/service-account-token   3         12m\n  keystone-fernet-setup-token-n9st2                         kubernetes.io/service-account-token   3         12m\n  keystone-keystone-admin                                   Opaque                                8         12m\n  keystone-keystone-test                                    Opaque                                8         12m\n  keystone-rabbit-init-token-pt5b2                          kubernetes.io/service-account-token   3         12m\n  keystone-rabbitmq-admin                                   Opaque                                1         12m\n  keystone-rabbitmq-user                                    Opaque                                1         12m\n  keystone-test-token-z8mb6                                 kubernetes.io/service-account-token   3         12m\n  mariadb-db-root-password                                  Opaque                                1         17m\n  mariadb-ingress-error-pages-token-cnrqp                   kubernetes.io/service-account-token   3         17m\n  mariadb-ingress-token-gfrg4                               kubernetes.io/service-account-token   3         17m\n  mariadb-secrets                                           Opaque                                1         17m\n  mariadb-token-pr5lp                                       kubernetes.io/service-account-token   3         17m\n  memcached-memcached-token-gq96p                           kubernetes.io/service-account-token   3         13m\n  pvc-ceph-client-key                                       kubernetes.io/rbd                     1         17m\n  rabbitmq-rabbitmq-token-5bj85                             kubernetes.io/service-account-token   3         14m\n  rabbitmq-test-token-w4clj                                 kubernetes.io/service-account-token   3         14m\n\n``Openstack PV list``\n\n.. code-block:: console\n\n  ubuntu@node1:~$ kubectl get pv -n openstack\n  NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS    CLAIM                                         STORAGECLASS   REASON    AGE\n  pvc-348f4c52-a9b8-11e8-bb1d-fa163ec12213   256Mi      RWO            Delete           Bound     openstack/rabbitmq-data-rabbitmq-rabbitmq-0   general                  15m\n  pvc-4418c745-a9b8-11e8-bb1d-fa163ec12213   256Mi      RWO            Delete           Bound     openstack/rabbitmq-data-rabbitmq-rabbitmq-1   general                  14m\n  pvc-524d4213-a9b8-11e8-bb1d-fa163ec12213   256Mi      RWO            Delete           Bound     openstack/rabbitmq-data-rabbitmq-rabbitmq-2   general                  14m\n  pvc-da9c9dd2-a9b7-11e8-bb1d-fa163ec12213   5Gi        RWO            Delete           Bound     openstack/mysql-data-mariadb-server-0         general                  17m\n\n``Openstack endpoints``\n\n.. code-block:: console\n\n  ubuntu@node1:~$ openstack endpoint list\n  +----------------------------------+-----------+--------------+--------------+---------+-----------+---------------------------------------------------------+\n  | ID                               | Region    | Service Name | Service Type | Enabled | Interface | URL                                                     |\n  +----------------------------------+-----------+--------------+--------------+---------+-----------+---------------------------------------------------------+\n  | 480cc7360752498e822cbbc7211d213a | RegionOne | keystone     | identity     | True    | internal  | http://keystone-api.openstack.svc.cluster.local:5000/v3 |\n  | 8dfe4e4725b84e51a5eda564dee0960c | RegionOne | keystone     | identity     | True    | public    | http://keystone.openstack.svc.cluster.local:80/v3       |\n  | 9b3526e36307400b9accfc7cc834cf99 | RegionOne | keystone     | identity     | True    | admin     | http://keystone.openstack.svc.cluster.local:80/v3       |\n  +----------------------------------+-----------+--------------+--------------+---------+-----------+---------------------------------------------------------+\n\n``Openstack services``\n\n.. code-block:: console\n\n  ubuntu@node1:~$ openstack service list\n  +----------------------------------+----------+----------+\n  | ID                               | Name     | Type     |\n  +----------------------------------+----------+----------+\n  | 67cc6b945e934246b25d31a9374a64af | keystone | identity |\n  +----------------------------------+----------+----------+\n\n\n\n5) Deploy Ceph for Tenant:\n--------------------------\n\nScript to update and execute: ``030-tenant-ceph.sh``\n\nMake following changes to script:\n1 Replace occurrence of ``ceph-fs-uuid.txt`` with ``tenant-ceph-fs-uuid.txt``\n\n2 Replace occurrence of ``ceph.yaml`` with ``tenant-ceph.yaml``\n\n3 For tenant Ceph, no need to deploy ceph-provisioners. Update script\nto ``for CHART in ceph-mon ceph-osd ceph-client; do``\n\n\nUpdate script's override section with following:\n\n\n.. code-block:: yaml\n\n  endpoints:\n    identity:\n      namespace: openstack\n    object_store:\n      namespace: openstack\n    ceph_mon:\n      namespace: tenant-ceph\n      port:\n        mon:\n          default: 6790\n    ceph_mgr:\n      namespace: tenant-ceph\n      port:\n        mgr:\n          default: 7001\n        metrics:\n          default: 9284\n  network:\n    public: ${CEPH_PUBLIC_NETWORK}\n    cluster: ${CEPH_CLUSTER_NETWORK}\n  deployment:\n    storage_secrets: true\n    ceph: true\n    rbd_provisioner: false\n    csi_rbd_provisioner: false\n    cephfs_provisioner: false\n    client_secrets: false\n  labels:\n    mon:\n      node_selector_key: ceph-mon-tenant\n    osd:\n      node_selector_key: ceph-osd-tenant\n    rgw:\n      node_selector_key: ceph-rgw-tenant\n    mgr:\n      node_selector_key: ceph-mgr-tenant\n    job:\n      node_selector_key: tenant-ceph-control-plane\n  storageclass:\n    rbd:\n      ceph_configmap_name: tenant-ceph-etc\n      provision_storage_class: false\n      name: tenant-rbd\n      admin_secret_name: pvc-tenant-ceph-conf-combined-storageclass\n      admin_secret_namespace: tenant-ceph\n      user_secret_name: pvc-tenant-ceph-client-key\n    cephfs:\n      provision_storage_class: false\n      name: cephfs\n      user_secret_name: pvc-tenant-ceph-cephfs-client-key\n      admin_secret_name: pvc-tenant-ceph-conf-combined-storageclass\n      admin_secret_namespace: tenant-ceph\n  bootstrap:\n    enabled: true\n  manifests:\n    deployment_mds: false\n  ceph_mgr_modules_config:\n    prometheus:\n      server_port: 9284\n  monitoring:\n    prometheus:\n      enabled: true\n      ceph_mgr:\n        port: 9284\n  conf:\n    ceph:\n      global:\n        fsid: ${CEPH_FS_ID}\n    rgw_ks:\n      enabled: true\n    pool:\n      crush:\n        tunables: ${CRUSH_TUNABLES}\n      target:\n        osd: 3\n        pg_per_osd: 100\n    storage:\n      osd:\n        - data:\n            type: directory\n            location: /var/lib/openstack-helm/tenant-ceph/osd/osd-one\n          journal:\n            type: directory\n            location: /var/lib/openstack-helm/tenant-ceph/osd/journal-one\n      mon:\n        directory: /var/lib/openstack-helm/tenant-ceph/mon\n\n\n.. note::\n  - Port numbers for Ceph_Mon and Ceph_Mgr are different from default.\n  - We are disabling rbd and cephfs provisioners.\n  - Labels for mon, osd, rgw, mgr and job have been updated for tenant Ceph.\n  - Under storageclass section, values for following have been updated:\n    ceph_configmap_name, admin_secret_name, admin_secret_namespace, user_secret_name\n  - Under storage: mon directory have been updated.\n\nFor Tenant Ceph, we will not be provisioning storage classes therefor, update\nscript to not install ceph-provisioners chart as following.\n\n``for CHART in ceph-mon ceph-osd ceph-client; do``\n\nExecute script.\n\n6) Enable Openstack namespace to use Tenant Ceph:\n-------------------------------------------------\n\nScript to update and execute: ``040-tenant-ceph-ns-activate.sh``\n\nUpdate script as following:\n\n.. code-block:: console\n\n  ...\n  tee /tmp/tenant-ceph-openstack-config.yaml <<EOF\n  endpoints:\n    identity:\n      namespace: openstack\n    object_store:\n      namespace: openstack\n    ceph_mon:\n      namespace: tenant-ceph\n      port:\n        mon:\n          default: 6790\n  network:\n    public: ${CEPH_PUBLIC_NETWORK}\n    cluster: ${CEPH_CLUSTER_NETWORK}\n  deployment:\n    storage_secrets: false\n    ceph: false\n    rbd_provisioner: false\n    csi_rbd_provisioner: false\n    cephfs_provisioner: false\n    client_secrets: true\n  bootstrap:\n    enabled: false\n  conf:\n    rgw_ks:\n      enabled: true\n  storageclass:\n    rbd:\n      ceph_configmap_name: tenant-ceph-etc\n      provision_storage_class: false\n      name: tenant-rbd\n      admin_secret_name: pvc-tenant-ceph-conf-combined-storageclass\n      admin_secret_namespace: tenant-ceph\n      user_secret_name: pvc-tenant-ceph-client-key\n    cephfs:\n      provision_storage_class: false\n      name: cephfs\n      admin_secret_name: pvc-tenant-ceph-conf-combined-storageclass\n      admin_secret_namespace: tenant-ceph\n      user_secret_name: pvc-tenant-ceph-cephfs-client-key\n  EOF\n  helm upgrade --install tenant-ceph-openstack-config ./ceph-provisioners \\\n    --namespace=openstack \\\n    --values=/tmp/tenant-ceph-openstack-config.yaml \\\n    ${OSH_EXTRA_HELM_ARGS} \\\n    ${OSH_EXTRA_HELM_ARGS_CEPH_NS_ACTIVATE}\n\n  #NOTE: Wait for deploy\n  ./tools/deployment/common/wait-for-pods.sh openstack\n\n  #NOTE: Validate Deployment info\n  helm status tenant-ceph-openstack-config\n\nExecute script.\n\n7) Tenant Ceph: Deploy Rados Gateway:\n-------------------------------------\n\nScript to update: ``090-tenant-ceph-radosgateway.sh``\n\nUpdate script with following overrides:\n\n.. code-block:: console\n\n  tee /tmp/tenant-radosgw-openstack.yaml <<EOF\n  endpoints:\n    identity:\n      namespace: openstack\n    object_store:\n      namespace: openstack\n    ceph_mon:\n      namespace: tenant-ceph\n      port:\n        mon:\n          default: 6790\n  network:\n    public: ${CEPH_PUBLIC_NETWORK}\n    cluster: ${CEPH_CLUSTER_NETWORK}\n  deployment:\n    storage_secrets: false\n    ceph: true\n    rbd_provisioner: false\n    csi_rbd_provisioner: false\n    cephfs_provisioner: false\n    client_secrets: false\n  bootstrap:\n    enabled: false\n  conf:\n    rgw_ks:\n      enabled: true\n  secrets:\n    keyrings:\n      admin: pvc-tenant-ceph-client-key\n      rgw: os-ceph-bootstrap-rgw-keyring\n    identity:\n      admin: ceph-keystone-admin\n      swift: ceph-keystone-user\n      user_rgw: ceph-keystone-user-rgw\n  ceph_client:\n    configmap: tenant-ceph-etc\n  EOF\n  helm upgrade --install tenant-radosgw-openstack ./ceph-rgw \\\n    --namespace=openstack \\\n    --values=/tmp/tenant-radosgw-openstack.yaml \\\n    ${OSH_EXTRA_HELM_ARGS} \\\n    ${OSH_EXTRA_HELM_ARGS_HEAT}\n\n  #NOTE: Wait for deploy\n  ./tools/deployment/common/wait-for-pods.sh openstack\n\n  #NOTE: Validate Deployment info\n  helm status tenant-radosgw-openstack\n\n\nExecute script\n\n.. code-block:: console\n\n  + openstack service list\n  +----------------------------------+----------+--------------+\n  | ID                               | Name     | Type         |\n  +----------------------------------+----------+--------------+\n  | 0eddeb6af4fd43ea8f73f63a1ae01438 | swift    | object-store |\n  | 67cc6b945e934246b25d31a9374a64af | keystone | identity     |\n  +----------------------------------+----------+--------------+\n\n.. code-block:: console\n\n  ubuntu@node1: openstack endpoint list\n  +----------------------------------+-----------+--------------+--------------+---------+-----------+-----------------------------------------------------------------------------+\n  | ID                               | Region    | Service Name | Service Type | Enabled | Interface | URL                                                                         |\n  +----------------------------------+-----------+--------------+--------------+---------+-----------+-----------------------------------------------------------------------------+\n  | 265212a5856e4a0aba8eb294508279c7 | RegionOne | swift        | object-store | True    | admin     | http://ceph-rgw.openstack.svc.cluster.local:8088/swift/v1/KEY_$(tenant_id)s |\n  | 430174e280444598b676d503c5ed9799 | RegionOne | swift        | object-store | True    | internal  | http://ceph-rgw.openstack.svc.cluster.local:8088/swift/v1/KEY_$(tenant_id)s |\n  | 480cc7360752498e822cbbc7211d213a | RegionOne | keystone     | identity     | True    | internal  | http://keystone-api.openstack.svc.cluster.local:5000/v3                     |\n  | 8dfe4e4725b84e51a5eda564dee0960c | RegionOne | keystone     | identity     | True    | public    | http://keystone.openstack.svc.cluster.local:80/v3                           |\n  | 948552a0d90940f7944f8c2eba7ef462 | RegionOne | swift        | object-store | True    | public    | http://radosgw.openstack.svc.cluster.local:80/swift/v1/KEY_$(tenant_id)s    |\n  | 9b3526e36307400b9accfc7cc834cf99 | RegionOne | keystone     | identity     | True    | admin     | http://keystone.openstack.svc.cluster.local:80/v3                           |\n  +----------------------------------+-----------+--------------+--------------+---------+-----------+-----------------------------------------------------------------------------+\n\nResults from Step 5, 6, 7:\n--------------------------\n\n``Storage on node1, node2, node3:``\n\n.. code-block:: console\n\n  ubuntu@node1:~$ ls -l /var/lib/openstack-helm/\n  total 8\n  drwxr-xr-x 4 root root 4096 Aug 27 04:57 ceph\n  drwxr-xr-x 3 root root 4096 Aug 27 05:47 tenant-ceph\n\n``Storage on node4, node5, node6:``\n\n.. code-block:: console\n\n  ubuntu@node6:~$ ls -l /var/lib/openstack-helm/\n  total 4\n  drwxr-xr-x 3 root root 4096 Aug 27 05:49 tenant-ceph\n\n``Ceph Status``\n\n.. code-block:: console\n\n  ubuntu@node1: kubectl exec -n tenant-ceph ceph-mon-2g6km -- ceph -s\n    cluster:\n      id:     38339a5a-d976-49dd-88a0-2ac092c271c7\n      health: HEALTH_OK\n\n    services:\n      mon: 3 daemons, quorum node3,node2,node1\n      mgr: node2(active), standbys: node1\n      osd: 3 osds: 3 up, 3 in\n      rgw: 2 daemons active\n\n    data:\n      pools:   18 pools, 93 pgs\n      objects: 193 objects, 37421 bytes\n      usage:   33394 MB used, 199 GB / 232 GB avail\n      pgs:     93 active+clean\n\n\n.. code-block:: console\n\n  ubuntu@node1: kubectl get cm -n openstack\n  NAME                                                 DATA      AGE\n  ceph-etc                                             1         2h\n  ceph-openstack-config-ceph-prov-bin-clients          2         2h\n  ceph-rgw-bin                                         5         3m\n  ceph-rgw-bin-ks                                      3         3m\n  ceph-rgw-etc                                         1         3m\n  tenant-ceph-etc                                      1         1h\n  tenant-ceph-openstack-config-ceph-prov-bin-clients   2         1h\n  tenant-radosgw-openstack-ceph-templates              1         3m\n  ...\n\n.. code-block:: console\n\n  ubuntu@node1: kubectl get cm -n openstack ceph-rgw-etc -o yaml\n\n.. code-block:: yaml\n\n  apiVersion: v1\n  data:\n    ceph.conf: |\n      [global]\n      cephx = true\n      cephx_cluster_require_signatures = true\n      cephx_require_signatures = false\n      cephx_service_require_signatures = false\n      mon_addr = :6790\n      mon_host = ceph-mon.tenant-ceph.svc.cluster.local:6790\n      [osd]\n      cluster_network = 10.0.0.0/24\n      ms_bind_port_max = 7100\n      ms_bind_port_min = 6800\n      osd_max_object_name_len = 256\n      osd_mkfs_options_xfs = -f -i size=2048\n      osd_mkfs_type = xfs\n      public_network = 10.0.0.0/24\n  kind: ConfigMap\n  metadata:\n    creationTimestamp: 2018-08-27T07:47:59Z\n    name: ceph-rgw-etc\n    namespace: openstack\n    resourceVersion: \"30058\"\n    selfLink: /api/v1/namespaces/openstack/configmaps/ceph-rgw-etc\n    uid: 848df05c-a9cd-11e8-bb1d-fa163ec12213\n\n.. note::\n  mon_addr and mon_host have non default mon port 6790.\n\n.. code-block:: console\n\n  ubuntu@node1: kubectl get secrets -n openstack\n  NAME                                                             TYPE                                  DATA      AGE\n  ceph-keystone-admin                                              Opaque                                8         4m\n  ceph-keystone-user                                               Opaque                                8         4m\n  ceph-keystone-user-rgw                                           Opaque                                8         4m\n  ceph-ks-endpoints-token-crnrr                                    kubernetes.io/service-account-token   3         4m\n  ceph-ks-service-token-9bnr8                                      kubernetes.io/service-account-token   3         4m\n  ceph-openstack-config-ceph-ns-key-cleaner-token-jj7n6            kubernetes.io/service-account-token   3         2h\n  ceph-openstack-config-ceph-ns-key-generator-token-5sqfw          kubernetes.io/service-account-token   3         2h\n  ceph-rgw-storage-init-token-mhqdw                                kubernetes.io/service-account-token   3         4m\n  ceph-rgw-token-9s6nd                                             kubernetes.io/service-account-token   3         4m\n  os-ceph-bootstrap-rgw-keyring                                    Opaque                                1         36m\n  pvc-ceph-client-key                                              kubernetes.io/rbd                     1         2h\n  pvc-tenant-ceph-client-key                                       kubernetes.io/rbd                     1         1h\n  swift-ks-user-token-9slvc                                        kubernetes.io/service-account-token   3         4m\n  tenant-ceph-openstack-config-ceph-ns-key-cleaner-token-r6v9v     kubernetes.io/service-account-token   3         1h\n  tenant-ceph-openstack-config-ceph-ns-key-generator-token-dt472   kubernetes.io/service-account-token   3         1h\n  ...\n\n.. code-block:: console\n\n  ubuntu@node1: kubectl get svc -n tenant-ceph\n  NAME                  TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)             AGE\n  ceph-mgr              ClusterIP   10.107.183.4     <none>        7001/TCP,9284/TCP   2h\n  ceph-mon              ClusterIP   None             <none>        6790/TCP            2h\n  ceph-mon-discovery    ClusterIP   None             <none>        6790/TCP            2h\n  ingress               ClusterIP   10.109.105.140   <none>        80/TCP,443/TCP      3h\n  ingress-error-pages   ClusterIP   None             <none>        80/TCP              3h\n  ingress-exporter      ClusterIP   10.102.110.153   <none>        10254/TCP           3h\n\n.. code-block:: console\n\n  ubuntu@node1: kubectl get endpoints -n tenant-ceph\n  NAME                  ENDPOINTS                                                    AGE\n  ceph-mgr              10.0.0.12:9284,10.0.0.16:9284,10.0.0.12:7001 + 1 more...     2h\n  ceph-mon              10.0.0.12:6790,10.0.0.16:6790,10.0.0.9:6790                  2h\n  ceph-mon-discovery    10.0.0.12:6790,10.0.0.16:6790,10.0.0.9:6790                  2h\n  ingress               192.168.2.7:80,192.168.5.14:80,192.168.2.7:443 + 1 more...   3h\n  ingress-error-pages   192.168.2.8:8080,192.168.5.13:8080                           3h\n  ingress-exporter      192.168.2.7:10254,192.168.5.14:10254                         3h\n\n.. code-block:: console\n\n  ubuntu@node1: kubectl get endpoints -n openstack\n  NAME                          ENDPOINTS                                                               AGE\n  ceph-rgw                      192.168.2.42:8088,192.168.5.44:8088                                     20m\n  ingress                       192.168.2.4:80,192.168.5.5:80,192.168.2.4:443 + 1 more...               3h\n  ingress-error-pages           192.168.2.3:8080,192.168.5.6:8080                                       3h\n  ingress-exporter              192.168.2.4:10254,192.168.5.5:10254                                     3h\n  keystone                      192.168.2.4:80,192.168.5.5:80,192.168.2.4:443 + 1 more...               2h\n  keystone-api                  192.168.2.21:5000,192.168.5.24:5000                                     2h\n  mariadb                       192.168.2.17:3306                                                       2h\n  mariadb-discovery             192.168.5.22:4567,192.168.5.22:3306                                     2h\n  mariadb-ingress-error-pages   192.168.5.20:8080                                                       2h\n  mariadb-server                192.168.5.22:3306                                                       2h\n  memcached                     192.168.2.19:11211                                                      2h\n  rabbitmq                      192.168.0.196:15672,192.168.2.18:15672,192.168.5.23:15672 + 6 more...   2h\n  rabbitmq-dsv-7b1733           192.168.0.196:15672,192.168.2.18:15672,192.168.5.23:15672 + 6 more...   2h\n  rabbitmq-mgr-7b1733           192.168.2.4:80,192.168.5.5:80,192.168.2.4:443 + 1 more...               2h\n  radosgw                       192.168.2.4:80,192.168.5.5:80,192.168.2.4:443 + 1 more...               20m\n\n.. code-block:: console\n\n  ubuntu@node1: kubectl get svc -n openstack\n  NAME                          TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                        AGE\n  ceph-rgw                      ClusterIP   10.102.173.130   <none>        8088/TCP                       20m\n  ingress                       ClusterIP   10.102.1.71      <none>        80/TCP,443/TCP                 3h\n  ingress-error-pages           ClusterIP   None             <none>        80/TCP                         3h\n  ingress-exporter              ClusterIP   10.105.29.29     <none>        10254/TCP                      3h\n  keystone                      ClusterIP   10.108.94.108    <none>        80/TCP,443/TCP                 2h\n  keystone-api                  ClusterIP   10.99.50.35      <none>        5000/TCP                       2h\n  mariadb                       ClusterIP   10.111.140.93    <none>        3306/TCP                       2h\n  mariadb-discovery             ClusterIP   None             <none>        3306/TCP,4567/TCP              2h\n  mariadb-ingress-error-pages   ClusterIP   None             <none>        80/TCP                         2h\n  mariadb-server                ClusterIP   10.101.237.241   <none>        3306/TCP                       2h\n  memcached                     ClusterIP   10.111.175.130   <none>        11211/TCP                      2h\n  rabbitmq                      ClusterIP   10.96.78.137     <none>        5672/TCP,25672/TCP,15672/TCP   2h\n  rabbitmq-dsv-7b1733           ClusterIP   None             <none>        5672/TCP,25672/TCP,15672/TCP   2h\n  rabbitmq-mgr-7b1733           ClusterIP   10.104.105.46    <none>        80/TCP,443/TCP                 2h\n  radosgw                       ClusterIP   10.101.237.167   <none>        80/TCP,443/TCP                 20m\n\n.. code-block:: console\n\n  ubuntu@node1: kubectl get storageclasses\n  NAME      PROVISIONER    AGE\n  general   ceph.com/rbd   1h\n\n\n8) Deploy Glance:\n-----------------\n\nScript to update and execute: ``100-glance.sh``\n\nUpdate script overrides as following:\n\n.. code-block:: yaml\n\n  endpoints:\n    object_store:\n      namespace: tenant-ceph\n    ceph_object_store:\n      namespace: tenant-ceph\n  ceph_client:\n    configmap: tenant-ceph-etc\n    user_secret_name: tenant-pvc-ceph-client-key\n\n.. code-block:: console\n\n    ubuntu@node1: openstack service list\n    +----------------------------------+----------+--------------+\n    | ID                               | Name     | Type         |\n    +----------------------------------+----------+--------------+\n    | 0eddeb6af4fd43ea8f73f63a1ae01438 | swift    | object-store |\n    | 67cc6b945e934246b25d31a9374a64af | keystone | identity     |\n    | 81a61ec8eff74070bb3c2f0118c1bcd5 | glance   | image        |\n    +----------------------------------+----------+--------------+\n\n.. code-block:: console\n\n    ubuntu@node1: openstack endpoint list\n    +----------------------------------+-----------+--------------+--------------+---------+-----------+-----------------------------------------------------------------------------+\n    | ID                               | Region    | Service Name | Service Type | Enabled | Interface | URL                                                                         |\n    +----------------------------------+-----------+--------------+--------------+---------+-----------+-----------------------------------------------------------------------------+\n    | 265212a5856e4a0aba8eb294508279c7 | RegionOne | swift        | object-store | True    | admin     | http://ceph-rgw.openstack.svc.cluster.local:8088/swift/v1/KEY_$(tenant_id)s |\n    | 3fd88bc6e4774ff78c94bfa8aaaec3cf | RegionOne | glance       | image        | True    | admin     | http://glance-api.openstack.svc.cluster.local:9292/                         |\n    | 430174e280444598b676d503c5ed9799 | RegionOne | swift        | object-store | True    | internal  | http://ceph-rgw.openstack.svc.cluster.local:8088/swift/v1/KEY_$(tenant_id)s |\n    | 47505d5186ab448e9213f67bc833d2f1 | RegionOne | glance       | image        | True    | public    | http://glance.openstack.svc.cluster.local:80/                               |\n    | 480cc7360752498e822cbbc7211d213a | RegionOne | keystone     | identity     | True    | internal  | http://keystone-api.openstack.svc.cluster.local:5000/v3                     |\n    | 8dfe4e4725b84e51a5eda564dee0960c | RegionOne | keystone     | identity     | True    | public    | http://keystone.openstack.svc.cluster.local:80/v3                           |\n    | 937c2eacce8b4159bf918f4005c2b0ab | RegionOne | glance       | image        | True    | internal  | http://glance-api.openstack.svc.cluster.local:9292/                         |\n    | 948552a0d90940f7944f8c2eba7ef462 | RegionOne | swift        | object-store | True    | public    | http://radosgw.openstack.svc.cluster.local:80/swift/v1/KEY_$(tenant_id)s    |\n    | 9b3526e36307400b9accfc7cc834cf99 | RegionOne | keystone     | identity     | True    | admin     | http://keystone.openstack.svc.cluster.local:80/v3                           |\n    +----------------------------------+-----------+--------------+--------------+---------+-----------+-----------------------------------------------------------------------------+\n\n.. note::\n  Above output shows ``http://ceph-rgw.openstack.svc.cluster.local`` which shows\n  that swift is pointing to tenant-ceph.\n\n9) Deploy Cinder:\n-----------------\n\nScript to update and execute: ``110-cinder.sh``\n\nUpdate script overrides as following:\n\n.. code-block:: yaml\n\n  backup:\n    posix:\n      volume:\n        class_name: rbd-tenant\n  ceph_client:\n    configmap: tenant-ceph-etc\n    user_secret_name: pvc-tenant-ceph-client-key\n\n\n.. code-block:: console\n\n    + OS_CLOUD=openstack_helm\n    + openstack service list\n    +----------------------------------+----------+--------------+\n    | ID                               | Name     | Type         |\n    +----------------------------------+----------+--------------+\n    | 0eddeb6af4fd43ea8f73f63a1ae01438 | swift    | object-store |\n    | 66bd0179eada4ab8899a58356fd4d508 | cinder   | volume       |\n    | 67cc6b945e934246b25d31a9374a64af | keystone | identity     |\n    | 81a61ec8eff74070bb3c2f0118c1bcd5 | glance   | image        |\n    | c126046fc5ec4c52acfc8fee0e2f4dda | cinderv2 | volumev2     |\n    | f89b99a31a124b7790e3bb60387380b1 | cinderv3 | volumev3     |\n    +----------------------------------+----------+--------------+\n    + sleep 30\n    + openstack volume type list\n    +--------------------------------------+------+-----------+\n    | ID                                   | Name | Is Public |\n    +--------------------------------------+------+-----------+\n    | d1734540-38e7-4ef8-b74d-36a2c71df8e5 | rbd1 | True      |\n    +--------------------------------------+------+-----------+\n    + helm test cinder --timeout 900\n    RUNNING: cinder-test\n    PASSED: cinder-test\n\n.. code-block:: console\n\n  ubuntu@node1: kubectl exec -n tenant-ceph ceph-mon-2g6km -- ceph osd lspools\n  1 rbd,2 cephfs_metadata,3 cephfs_data,4 .rgw.root,5 default.rgw.control,\n  6 default.rgw.data.root,7 default.rgw.gc,8 default.rgw.log,\n  9 default.rgw.intent-log,10 default.rgw.meta,\n  11 default.rgw.usage,12 default.rgw.users.keys,\n  13 default.rgw.users.email,14 default.rgw.users.swift,\n  15 default.rgw.users.uid,16 default.rgw.buckets.extra,\n  17 default.rgw.buckets.index,18 default.rgw.buckets.data,\n  19 cinder.volumes,\n\n.. note::\n  Above output shows that tenant ceph now has 19 pools including one for Cinder.\n\n.. code-block:: console\n\n  ubuntu@node1: kubectl exec -n tenant-ceph ceph-mon-2g6km -- ceph -s\n    cluster:\n      id:     38339a5a-d976-49dd-88a0-2ac092c271c7\n      health: HEALTH_OK\n\n    services:\n      mon: 3 daemons, quorum node3,node2,node1\n      mgr: node2(active), standbys: node1\n      osd: 3 osds: 3 up, 3 in\n      rgw: 2 daemons active\n\n    data:\n      pools:   19 pools, 101 pgs\n      objects: 233 objects, 52644 bytes\n      usage:   33404 MB used, 199 GB / 232 GB avail\n      pgs:     101 active+clean\n\n    io:\n      client:   27544 B/s rd, 0 B/s wr, 26 op/s rd, 17 op/s wr\n"
  },
  {
    "path": "doc/source/specs/values-ordering.rst",
    "content": "====================\nValues File Ordering\n====================\n\nProblem Description\n===================\n\nEach chart's values.yaml file contains various settings such as docker\nimage definition, chart structure setting, form of the resources being\ndistributed, and process configuration.  Currently, the structure of the yaml\nfile is complicated, and finding keys between charts proves difficult due to the\nlack of uniform values organization across charts.\n\nThis specification proposes introducing a uniform values.yaml structure across\nall charts in openstack-helm, openstack-helm-infra, and openstack-helm-addons,\nwith the goal of reducing the complexities of working across multiple charts and\nreducing the effort for creating new charts.\n\nProposed Change\n===============\n\nThis specification proposes defining entries in the values.yaml file into two\ncategories: top-level keys, and their children (sub-level) keys.\n\n* The top-level keys are based on the organizational keys common to all charts\n  in the openstack-helm repositories.  The top-level keys are strictly ordered\n  according to function, which creates a common organization pattern between all\n  charts.\n* All keys under top-level keys are listed in alphabetical order, with the\n  exception of the conf key.  As some configuration files require a strict\n  ordering of their content, excluding this key from any alphabetical\n  organization is required.\n\nThis specification also proposes to restrict the addition of any new top-level\nkeys in charts across all OpenStack-Helm repositories, in order to maintain the\ncommon structure the ordering creates.  The addition of a new top-level key\nshall be agreed upon by the OpenStack-Helm team on a case-by-case basis.  The\naddition of any new top-level keys should be documented, and this specification\nshall be amended to account for any added keys.\n\nTop-level keys are placed in this order:\n\n* images\n  * sub-keys (alphabetical order)\n* labels\n  * sub-keys (alphabetical order)\n* dependencies\n  * sub-keys (alphabetical order)\n* pod\n  * sub-keys (alphabetical order)\n* secrets\n  * sub-keys (alphabetical order)\n* endpoints\n  * sub-keys (alphabetical order)\n* bootstrap\n  * sub-keys (alphabetical order)\n* network\n  * sub-keys (alphabetical order)\n* manifests\n  * sub-keys (alphabetical order)\n* monitoring\n  * sub-keys (alphabetical order)\n* conf\n  * sub-keys (up-to-chart-developer)\n\nSecurity Impact\n---------------\n\nNo security impact.\n\nPerformance Impact\n------------------\n\nThis feature will not affect the performance of OpenStack-Helm.\n\nAlternatives\n------------\n\nThe alternative is to provide no organization layout for charts across all\nopenstack-helm repositories.\n\nImplementation\n==============\n\nAssignee(s)\n-----------\n\nPrimary assignees:\n  powerds0111 (DaeSeong Kim <daeseong.kim@sk.com>)\n  srwilkers (Steve Wilkerson <sw5822@att.com>)\n\nWork Items\n----------\n\nThe following work items need to be completed for this specification to be\nimplemented.\n\n* Update of developer documentation\n* Add a template highlighting the updated values ordering for use in chart\n  development\n* Change ordering of keys across all charts in openstack-helm,\n  openstack-helm-infra, and openstack-helm-addons\n\nTesting\n=======\n\nTo successfully enforce the ordering defined here, our gates need a method for\nvalidating the ordering and the schema of all values.yaml files.  Without such\na mechanism, the overhead associated with properly reviewing and validating any\nchanges to the structure will be substantial.  A tool, such as yamllint, would\nprovide this functionality and remove the need to write a custom validation tool\n\nDocumentation Impact\n====================\n\nThe developer documentation in OpenStack-Helm should be updated to guide key\nordering on value files.\n"
  },
  {
    "path": "doc/source/testing/ceph-node-resiliency.rst",
    "content": "==================================================\nCeph - Node Reduction, Expansion and Ceph Recovery\n==================================================\n\nThis document captures steps and result from node reduction and expansion as\nwell as ceph recovery.\n\nTest Scenarios:\n===============\n1) Node reduction: Shutdown 1 of 3 nodes to simulate node failure. Capture effect\nof node failure on Ceph as well as other OpenStack services that are using Ceph.\n\n2) Node expansion: Apply Ceph and OpenStack related labels to another unused k8\nnode. Node expansion should provide more resources for k8 to schedule PODs for\nCeph and OpenStack services.\n\n3) Fix Ceph Cluster: After node expansion, perform maintenance on Ceph cluster\nto ensure quorum is reached and Ceph is HEALTH_OK.\n\nSetup:\n======\n- 6 Nodes (VM based) env\n- Only 3 nodes will have Ceph and OpenStack related labels. Each of these 3\n  nodes will have one MON and one OSD running on them.\n- Followed OSH multinode guide steps to setup nodes and install K8s cluster\n- Followed OSH multinode guide steps to install Ceph and OpenStack charts up to\n  Cinder.\n\nSteps:\n======\n1) Initial Ceph and OpenStack deployment:\nInstall Ceph and OpenStack charts on 3 nodes (mnode1, mnode2 and mnode3).\nCapture Ceph cluster status as well as K8s PODs status.\n\n2) Node reduction (failure):\nShutdown 1 of 3 nodes (mnode3) to test node failure. This should cause\nCeph cluster to go in HEALTH_WARN state as it has lost 1 MON and 1 OSD.\nCapture Ceph cluster status as well as K8s PODs status.\n\n3) Node expansion:\nAdd Ceph and OpenStack related labels to 4th node (mnode4) for expansion.\nCeph cluster would show new MON and OSD being added to cluster. However Ceph\ncluster would continue to show HEALTH_WARN because 1 MON and 1 OSD are still\nmissing.\n\n4) Ceph cluster recovery:\nPerform Ceph maintenance to make Ceph cluster HEALTH_OK. Remove lost MON and\nOSD from Ceph cluster.\n\n\nStep 1: Initial Ceph and OpenStack deployment\n=============================================\n\n.. note::\n  Make sure only 3 nodes (mnode1, mnode2, mnode3) have Ceph and OpenStack\n  related labels. K8s would only schedule PODs on these 3 nodes.\n\n``Ceph status:``\n\n.. code-block:: console\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl exec -n ceph ceph-mon-5qn68 -- ceph -s\n    cluster:\n      id:     54d9af7e-da6d-4980-9075-96bb145db65c\n      health: HEALTH_OK\n\n    services:\n      mon: 3 daemons, quorum mnode1,mnode2,mnode3\n      mgr: mnode2(active), standbys: mnode3\n      mds: cephfs-1/1/1 up  {0=mds-ceph-mds-6f66956547-c25cx=up:active}, 1 up:standby\n      osd: 3 osds: 3 up, 3 in\n      rgw: 2 daemons active\n\n    data:\n      pools:   19 pools, 101 pgs\n      objects: 354 objects, 260 MB\n      usage:   77807 MB used, 70106 MB / 144 GB avail\n      pgs:     101 active+clean\n\n    io:\n      client:   48769 B/s wr, 0 op/s rd, 12 op/s wr\n\n``Ceph MON Status:``\n\n.. code-block:: console\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl exec -n ceph ceph-mon-5qn68 -- ceph mon_status -f json-pretty\n\n.. code-block:: json\n\n  {\n      \"name\": \"mnode2\",\n      \"rank\": 1,\n      \"state\": \"peon\",\n      \"election_epoch\": 92,\n      \"quorum\": [\n          0,\n          1,\n          2\n      ],\n      \"features\": {\n          \"required_con\": \"153140804152475648\",\n          \"required_mon\": [\n              \"kraken\",\n              \"luminous\"\n          ],\n          \"quorum_con\": \"2305244844532236283\",\n          \"quorum_mon\": [\n              \"kraken\",\n              \"luminous\"\n          ]\n      },\n      \"outside_quorum\": [],\n      \"extra_probe_peers\": [],\n      \"sync_provider\": [],\n      \"monmap\": {\n          \"epoch\": 1,\n          \"fsid\": \"54d9af7e-da6d-4980-9075-96bb145db65c\",\n          \"modified\": \"2018-08-14 21:02:24.330403\",\n          \"created\": \"2018-08-14 21:02:24.330403\",\n          \"features\": {\n              \"persistent\": [\n                  \"kraken\",\n                  \"luminous\"\n              ],\n              \"optional\": []\n          },\n          \"mons\": [\n              {\n                  \"rank\": 0,\n                  \"name\": \"mnode1\",\n                  \"addr\": \"192.168.10.246:6789/0\",\n                  \"public_addr\": \"192.168.10.246:6789/0\"\n              },\n              {\n                  \"rank\": 1,\n                  \"name\": \"mnode2\",\n                  \"addr\": \"192.168.10.247:6789/0\",\n                  \"public_addr\": \"192.168.10.247:6789/0\"\n              },\n              {\n                  \"rank\": 2,\n                  \"name\": \"mnode3\",\n                  \"addr\": \"192.168.10.248:6789/0\",\n                  \"public_addr\": \"192.168.10.248:6789/0\"\n              }\n          ]\n      },\n      \"feature_map\": {\n          \"mon\": {\n              \"group\": {\n                  \"features\": \"0x1ffddff8eea4fffb\",\n                  \"release\": \"luminous\",\n                  \"num\": 1\n              }\n          },\n          \"mds\": {\n              \"group\": {\n                  \"features\": \"0x1ffddff8eea4fffb\",\n                  \"release\": \"luminous\",\n                  \"num\": 1\n              }\n          },\n          \"osd\": {\n              \"group\": {\n                  \"features\": \"0x1ffddff8eea4fffb\",\n                  \"release\": \"luminous\",\n                  \"num\": 1\n              }\n          },\n          \"client\": {\n              \"group\": {\n                  \"features\": \"0x7010fb86aa42ada\",\n                  \"release\": \"jewel\",\n                  \"num\": 1\n              },\n              \"group\": {\n                  \"features\": \"0x1ffddff8eea4fffb\",\n                  \"release\": \"luminous\",\n                  \"num\": 1\n              }\n          }\n      }\n  }\n\n\n``Ceph quorum status:``\n\n.. code-block:: console\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl exec -n ceph ceph-mon-5qn68 -- ceph quorum_status -f json-pretty\n\n.. code-block:: json\n\n  {\n      \"election_epoch\": 92,\n      \"quorum\": [\n          0,\n          1,\n          2\n      ],\n      \"quorum_names\": [\n          \"mnode1\",\n          \"mnode2\",\n          \"mnode3\"\n      ],\n      \"quorum_leader_name\": \"mnode1\",\n      \"monmap\": {\n          \"epoch\": 1,\n          \"fsid\": \"54d9af7e-da6d-4980-9075-96bb145db65c\",\n          \"modified\": \"2018-08-14 21:02:24.330403\",\n          \"created\": \"2018-08-14 21:02:24.330403\",\n          \"features\": {\n              \"persistent\": [\n                  \"kraken\",\n                  \"luminous\"\n              ],\n              \"optional\": []\n          },\n          \"mons\": [\n              {\n                  \"rank\": 0,\n                  \"name\": \"mnode1\",\n                  \"addr\": \"192.168.10.246:6789/0\",\n                  \"public_addr\": \"192.168.10.246:6789/0\"\n              },\n              {\n                  \"rank\": 1,\n                  \"name\": \"mnode2\",\n                  \"addr\": \"192.168.10.247:6789/0\",\n                  \"public_addr\": \"192.168.10.247:6789/0\"\n              },\n              {\n                  \"rank\": 2,\n                  \"name\": \"mnode3\",\n                  \"addr\": \"192.168.10.248:6789/0\",\n                  \"public_addr\": \"192.168.10.248:6789/0\"\n              }\n          ]\n      }\n  }\n\n\n``Ceph PODs:``\n\n.. code-block:: console\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl get pods -n ceph --show-all=false -o wide\n  NAME                                       READY     STATUS    RESTARTS   AGE       IP               NODE\n  ceph-mds-6f66956547-5x4ng                  1/1       Running   0          1h        192.168.4.14     mnode2\n  ceph-mds-6f66956547-c25cx                  1/1       Running   0          1h        192.168.3.14     mnode3\n  ceph-mgr-5746dd89db-9dbmv                  1/1       Running   0          1h        192.168.10.248   mnode3\n  ceph-mgr-5746dd89db-qq4nl                  1/1       Running   0          1h        192.168.10.247   mnode2\n  ceph-mon-5qn68                             1/1       Running   0          1h        192.168.10.248   mnode3\n  ceph-mon-check-d85994946-4g5xc             1/1       Running   0          1h        192.168.4.8      mnode2\n  ceph-mon-mwkj9                             1/1       Running   0          1h        192.168.10.247   mnode2\n  ceph-mon-ql9zp                             1/1       Running   0          1h        192.168.10.246   mnode1\n  ceph-osd-default-83945928-c7gdd            1/1       Running   0          1h        192.168.10.248   mnode3\n  ceph-osd-default-83945928-s6gs6            1/1       Running   0          1h        192.168.10.246   mnode1\n  ceph-osd-default-83945928-vsc5b            1/1       Running   0          1h        192.168.10.247   mnode2\n  ceph-rbd-provisioner-5bfb577ffd-j6hlx      1/1       Running   0          1h        192.168.4.16     mnode2\n  ceph-rbd-provisioner-5bfb577ffd-zdx2d      1/1       Running   0          1h        192.168.3.16     mnode3\n  ceph-rgw-6c64b444d7-7bgqs                  1/1       Running   0          1h        192.168.3.12     mnode3\n  ceph-rgw-6c64b444d7-hv6vn                  1/1       Running   0          1h        192.168.4.13     mnode2\n  ingress-796d8cf8d6-4txkq                   1/1       Running   0          1h        192.168.2.6      mnode5\n  ingress-796d8cf8d6-9t7m8                   1/1       Running   0          1h        192.168.5.4      mnode4\n  ingress-error-pages-54454dc79b-hhb4f       1/1       Running   0          1h        192.168.2.5      mnode5\n  ingress-error-pages-54454dc79b-twpgc       1/1       Running   0          1h        192.168.4.4      mnode2\n\n\n``OpenStack PODs:``\n\n.. code-block:: console\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl get pods -n openstack --show-all=false -o wide\n  NAME                                           READY     STATUS    RESTARTS   AGE       IP              NODE\n  cinder-api-66f4f9678-2lgwk                     1/1       Running   0          12m       192.168.3.41    mnode3\n  cinder-api-66f4f9678-flvr5                     1/1       Running   0          12m       192.168.0.202   mnode1\n  cinder-backup-659b68b474-582kr                 1/1       Running   0          12m       192.168.4.39    mnode2\n  cinder-scheduler-6778f6f88c-mm9mt              1/1       Running   0          12m       192.168.0.201   mnode1\n  cinder-volume-79b9bd8bb9-qsxdk                 1/1       Running   0          12m       192.168.4.40    mnode2\n  glance-api-676fd49d4d-j4bdb                    1/1       Running   0          16m       192.168.3.37    mnode3\n  glance-api-676fd49d4d-wtxqt                    1/1       Running   0          16m       192.168.4.31    mnode2\n  ingress-7b4bc84cdd-9fs78                       1/1       Running   0          1h        192.168.5.3     mnode4\n  ingress-7b4bc84cdd-wztz7                       1/1       Running   0          1h        192.168.1.4     mnode6\n  ingress-error-pages-586c7f86d6-2jl5q           1/1       Running   0          1h        192.168.2.4     mnode5\n  ingress-error-pages-586c7f86d6-455j5           1/1       Running   0          1h        192.168.3.3     mnode3\n  keystone-api-5bcc7cb698-dzm8q                  1/1       Running   0          25m       192.168.4.24    mnode2\n  keystone-api-5bcc7cb698-vvwwr                  1/1       Running   0          25m       192.168.3.25    mnode3\n  mariadb-ingress-84894687fd-dfnkm               1/1       Running   2          1h        192.168.3.20    mnode3\n  mariadb-ingress-error-pages-78fb865f84-p8lpg   1/1       Running   0          1h        192.168.4.17    mnode2\n  mariadb-server-0                               1/1       Running   0          1h        192.168.4.18    mnode2\n  memcached-memcached-5db74ddfd5-wfr9q           1/1       Running   0          29m       192.168.3.23    mnode3\n  rabbitmq-rabbitmq-0                            1/1       Running   0          1h        192.168.3.21    mnode3\n  rabbitmq-rabbitmq-1                            1/1       Running   0          1h        192.168.4.19    mnode2\n  rabbitmq-rabbitmq-2                            1/1       Running   0          1h        192.168.0.195   mnode1\n\n``Result/Observation:``\n\n- Ceph cluster is in HEALTH_OK state with 3 MONs and 3 OSDs.\n- All PODs are in running state.\n\n\nStep 2: Node reduction (failure):\n=================================\n\nShutdown 1 of 3 nodes (mnode1, mnode2, mnode3) to simulate node failure/lost.\n\nIn this test env, let's shutdown ``mnode3`` node.\n\n``Following are PODs scheduled on mnode3 before shutdown:``\n\n.. code-block:: console\n\n  ceph                       ceph-mds-6f66956547-c25cx                   0 (0%)        0 (0%)      0 (0%)           0 (0%)\n  ceph                       ceph-mgr-5746dd89db-9dbmv                   0 (0%)        0 (0%)      0 (0%)           0 (0%)\n  ceph                       ceph-mon-5qn68                              0 (0%)        0 (0%)      0 (0%)           0 (0%)\n  ceph                       ceph-osd-default-83945928-c7gdd             0 (0%)        0 (0%)      0 (0%)           0 (0%)\n  ceph                       ceph-rbd-provisioner-5bfb577ffd-zdx2d       0 (0%)        0 (0%)      0 (0%)           0 (0%)\n  ceph                       ceph-rgw-6c64b444d7-7bgqs                   0 (0%)        0 (0%)      0 (0%)           0 (0%)\n  kube-system                ingress-ggckm                               0 (0%)        0 (0%)      0 (0%)           0 (0%)\n  kube-system                kube-flannel-ds-hs29q                       0 (0%)        0 (0%)      0 (0%)           0 (0%)\n  kube-system                kube-proxy-gqpz5                            0 (0%)        0 (0%)      0 (0%)           0 (0%)\n  openstack                  cinder-api-66f4f9678-2lgwk                  0 (0%)        0 (0%)      0 (0%)           0 (0%)\n  openstack                  glance-api-676fd49d4d-j4bdb                 0 (0%)        0 (0%)      0 (0%)           0 (0%)\n  openstack                  ingress-error-pages-586c7f86d6-455j5        0 (0%)        0 (0%)      0 (0%)           0 (0%)\n  openstack                  keystone-api-5bcc7cb698-vvwwr               0 (0%)        0 (0%)      0 (0%)           0 (0%)\n  openstack                  mariadb-ingress-84894687fd-dfnkm            0 (0%)        0 (0%)      0 (0%)           0 (0%)\n  openstack                  memcached-memcached-5db74ddfd5-wfr9q        0 (0%)        0 (0%)      0 (0%)           0 (0%)\n  openstack                  rabbitmq-rabbitmq-0                         0 (0%)        0 (0%)      0 (0%)           0 (0%)\n\n.. note::\n  In this test env, MariaDB chart is deployed with only 1 replica. In order to\n  test properly, the node with MariaDB server POD (mnode2) should not be shutdown.\n\n.. note::\n  In this test env, each node has Ceph and OpenStack related PODs. Due to this,\n  shutting down a Node will cause issue with Ceph as well as OpenStack services.\n  These PODs level failures are captured following subsequent screenshots.\n\n``Check node status:``\n\n.. code-block:: console\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl get nodes\n  NAME      STATUS     ROLES     AGE       VERSION\n  mnode1    Ready      <none>    1h        v1.10.6\n  mnode2    Ready      <none>    1h        v1.10.6\n  mnode3    NotReady   <none>    1h        v1.10.6\n  mnode4    Ready      <none>    1h        v1.10.6\n  mnode5    Ready      <none>    1h        v1.10.6\n  mnode6    Ready      <none>    1h        v1.10.6\n\n``Ceph status:``\n\n.. code-block:: console\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl exec -n ceph ceph-mon-ql9zp -- ceph -s\n    cluster:\n      id:     54d9af7e-da6d-4980-9075-96bb145db65c\n      health: HEALTH_WARN\n              insufficient standby MDS daemons available\n              1 osds down\n              1 host (1 osds) down\n              Degraded data redundancy: 354/1062 objects degraded (33.333%), 46 pgs degraded, 101 pgs undersized\n              1/3 mons down, quorum mnode1,mnode2\n\n    services:\n      mon: 3 daemons, quorum mnode1,mnode2, out of quorum: mnode3\n      mgr: mnode2(active)\n      mds: cephfs-1/1/1 up  {0=mds-ceph-mds-6f66956547-5x4ng=up:active}\n      osd: 3 osds: 2 up, 3 in\n      rgw: 1 daemon active\n\n    data:\n      pools:   19 pools, 101 pgs\n      objects: 354 objects, 260 MB\n      usage:   77845 MB used, 70068 MB / 144 GB avail\n      pgs:     354/1062 objects degraded (33.333%)\n               55 active+undersized\n               46 active+undersized+degraded\n\n``Ceph quorum status:``\n\n.. code-block:: console\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl exec -n ceph ceph-mon-ql9zp -- ceph quorum_status -f json-pretty\n\n.. code-block:: json\n\n  {\n      \"election_epoch\": 96,\n      \"quorum\": [\n          0,\n          1\n      ],\n      \"quorum_names\": [\n          \"mnode1\",\n          \"mnode2\"\n      ],\n      \"quorum_leader_name\": \"mnode1\",\n      \"monmap\": {\n          \"epoch\": 1,\n          \"fsid\": \"54d9af7e-da6d-4980-9075-96bb145db65c\",\n          \"modified\": \"2018-08-14 21:02:24.330403\",\n          \"created\": \"2018-08-14 21:02:24.330403\",\n          \"features\": {\n              \"persistent\": [\n                  \"kraken\",\n                  \"luminous\"\n              ],\n              \"optional\": []\n          },\n          \"mons\": [\n              {\n                  \"rank\": 0,\n                  \"name\": \"mnode1\",\n                  \"addr\": \"192.168.10.246:6789/0\",\n                  \"public_addr\": \"192.168.10.246:6789/0\"\n              },\n              {\n                  \"rank\": 1,\n                  \"name\": \"mnode2\",\n                  \"addr\": \"192.168.10.247:6789/0\",\n                  \"public_addr\": \"192.168.10.247:6789/0\"\n              },\n              {\n                  \"rank\": 2,\n                  \"name\": \"mnode3\",\n                  \"addr\": \"192.168.10.248:6789/0\",\n                  \"public_addr\": \"192.168.10.248:6789/0\"\n              }\n          ]\n      }\n  }\n\n\n``Ceph MON Status:``\n\n.. code-block:: console\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl exec -n ceph ceph-mon-ql9zp -- ceph mon_status -f json-pretty\n\n.. code-block:: json\n\n  {\n      \"name\": \"mnode1\",\n      \"rank\": 0,\n      \"state\": \"leader\",\n      \"election_epoch\": 96,\n      \"quorum\": [\n          0,\n          1\n      ],\n      \"features\": {\n          \"required_con\": \"153140804152475648\",\n          \"required_mon\": [\n              \"kraken\",\n              \"luminous\"\n          ],\n          \"quorum_con\": \"2305244844532236283\",\n          \"quorum_mon\": [\n              \"kraken\",\n              \"luminous\"\n          ]\n      },\n      \"outside_quorum\": [],\n      \"extra_probe_peers\": [],\n      \"sync_provider\": [],\n      \"monmap\": {\n          \"epoch\": 1,\n          \"fsid\": \"54d9af7e-da6d-4980-9075-96bb145db65c\",\n          \"modified\": \"2018-08-14 21:02:24.330403\",\n          \"created\": \"2018-08-14 21:02:24.330403\",\n          \"features\": {\n              \"persistent\": [\n                  \"kraken\",\n                  \"luminous\"\n              ],\n              \"optional\": []\n          },\n          \"mons\": [\n              {\n                  \"rank\": 0,\n                  \"name\": \"mnode1\",\n                  \"addr\": \"192.168.10.246:6789/0\",\n                  \"public_addr\": \"192.168.10.246:6789/0\"\n              },\n              {\n                  \"rank\": 1,\n                  \"name\": \"mnode2\",\n                  \"addr\": \"192.168.10.247:6789/0\",\n                  \"public_addr\": \"192.168.10.247:6789/0\"\n              },\n              {\n                  \"rank\": 2,\n                  \"name\": \"mnode3\",\n                  \"addr\": \"192.168.10.248:6789/0\",\n                  \"public_addr\": \"192.168.10.248:6789/0\"\n              }\n          ]\n      },\n      \"feature_map\": {\n          \"mon\": {\n              \"group\": {\n                  \"features\": \"0x1ffddff8eea4fffb\",\n                  \"release\": \"luminous\",\n                  \"num\": 1\n              }\n          },\n          \"mds\": {\n              \"group\": {\n                  \"features\": \"0x1ffddff8eea4fffb\",\n                  \"release\": \"luminous\",\n                  \"num\": 1\n              }\n          },\n          \"osd\": {\n              \"group\": {\n                  \"features\": \"0x1ffddff8eea4fffb\",\n                  \"release\": \"luminous\",\n                  \"num\": 1\n              }\n          },\n          \"client\": {\n              \"group\": {\n                  \"features\": \"0x7010fb86aa42ada\",\n                  \"release\": \"jewel\",\n                  \"num\": 1\n              },\n              \"group\": {\n                  \"features\": \"0x1ffddff8eea4fffb\",\n                  \"release\": \"luminous\",\n                  \"num\": 5\n              }\n          }\n      }\n  }\n\n``Ceph PODs:``\n\n.. code-block:: console\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl get pods -n ceph --show-all=false -o wide\n  NAME                                       READY     STATUS     RESTARTS   AGE       IP               NODE\n  ceph-mds-6f66956547-57tf9                  1/1       Running    0          1m        192.168.0.207    mnode1\n  ceph-mds-6f66956547-5x4ng                  1/1       Running    0          1h        192.168.4.14     mnode2\n  ceph-mds-6f66956547-c25cx                  1/1       Unknown    0          1h        192.168.3.14     mnode3\n  ceph-mgr-5746dd89db-9dbmv                  1/1       Unknown    0          1h        192.168.10.248   mnode3\n  ceph-mgr-5746dd89db-d5fcw                  1/1       Running    0          1m        192.168.10.246   mnode1\n  ceph-mgr-5746dd89db-qq4nl                  1/1       Running    0          1h        192.168.10.247   mnode2\n  ceph-mon-5qn68                             1/1       NodeLost   0          1h        192.168.10.248   mnode3\n  ceph-mon-check-d85994946-4g5xc             1/1       Running    0          1h        192.168.4.8      mnode2\n  ceph-mon-mwkj9                             1/1       Running    0          1h        192.168.10.247   mnode2\n  ceph-mon-ql9zp                             1/1       Running    0          1h        192.168.10.246   mnode1\n  ceph-osd-default-83945928-c7gdd            1/1       NodeLost   0          1h        192.168.10.248   mnode3\n  ceph-osd-default-83945928-s6gs6            1/1       Running    0          1h        192.168.10.246   mnode1\n  ceph-osd-default-83945928-vsc5b            1/1       Running    0          1h        192.168.10.247   mnode2\n  ceph-rbd-provisioner-5bfb577ffd-j6hlx      1/1       Running    0          1h        192.168.4.16     mnode2\n  ceph-rbd-provisioner-5bfb577ffd-kdmrv      1/1       Running    0          1m        192.168.0.209    mnode1\n  ceph-rbd-provisioner-5bfb577ffd-zdx2d      1/1       Unknown    0          1h        192.168.3.16     mnode3\n  ceph-rgw-6c64b444d7-4qgkw                  1/1       Running    0          1m        192.168.0.210    mnode1\n  ceph-rgw-6c64b444d7-7bgqs                  1/1       Unknown    0          1h        192.168.3.12     mnode3\n  ceph-rgw-6c64b444d7-hv6vn                  1/1       Running    0          1h        192.168.4.13     mnode2\n  ingress-796d8cf8d6-4txkq                   1/1       Running    0          1h        192.168.2.6      mnode5\n  ingress-796d8cf8d6-9t7m8                   1/1       Running    0          1h        192.168.5.4      mnode4\n  ingress-error-pages-54454dc79b-hhb4f       1/1       Running    0          1h        192.168.2.5      mnode5\n  ingress-error-pages-54454dc79b-twpgc       1/1       Running    0          1h        192.168.4.4      mnode2\n\n``OpenStack PODs:``\n\n.. code-block:: console\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl get pods -n openstack --show-all=false -o wide\n  NAME                                           READY     STATUS    RESTARTS   AGE       IP              NODE\n  cinder-api-66f4f9678-2lgwk                     1/1       Unknown   0          22m       192.168.3.41    mnode3\n  cinder-api-66f4f9678-flvr5                     1/1       Running   0          22m       192.168.0.202   mnode1\n  cinder-api-66f4f9678-w5xhd                     1/1       Running   0          1m        192.168.4.45    mnode2\n  cinder-backup-659b68b474-582kr                 1/1       Running   0          22m       192.168.4.39    mnode2\n  cinder-scheduler-6778f6f88c-mm9mt              1/1       Running   0          22m       192.168.0.201   mnode1\n  cinder-volume-79b9bd8bb9-qsxdk                 1/1       Running   0          22m       192.168.4.40    mnode2\n  cinder-volume-usage-audit-1534286100-mm8r7     1/1       Running   0          4m        192.168.4.44    mnode2\n  glance-api-676fd49d4d-4tnm6                    1/1       Running   0          1m        192.168.0.212   mnode1\n  glance-api-676fd49d4d-j4bdb                    1/1       Unknown   0          26m       192.168.3.37    mnode3\n  glance-api-676fd49d4d-wtxqt                    1/1       Running   0          26m       192.168.4.31    mnode2\n  ingress-7b4bc84cdd-9fs78                       1/1       Running   0          1h        192.168.5.3     mnode4\n  ingress-7b4bc84cdd-wztz7                       1/1       Running   0          1h        192.168.1.4     mnode6\n  ingress-error-pages-586c7f86d6-2jl5q           1/1       Running   0          1h        192.168.2.4     mnode5\n  ingress-error-pages-586c7f86d6-455j5           1/1       Unknown   0          1h        192.168.3.3     mnode3\n  ingress-error-pages-586c7f86d6-55j4x           1/1       Running   0          1m        192.168.4.47    mnode2\n  keystone-api-5bcc7cb698-dzm8q                  1/1       Running   0          35m       192.168.4.24    mnode2\n  keystone-api-5bcc7cb698-vvwwr                  1/1       Unknown   0          35m       192.168.3.25    mnode3\n  keystone-api-5bcc7cb698-wx5l6                  1/1       Running   0          1m        192.168.0.213   mnode1\n  mariadb-ingress-84894687fd-9lmpx               1/1       Running   0          1m        192.168.4.48    mnode2\n  mariadb-ingress-84894687fd-dfnkm               1/1       Unknown   2          1h        192.168.3.20    mnode3\n  mariadb-ingress-error-pages-78fb865f84-p8lpg   1/1       Running   0          1h        192.168.4.17    mnode2\n  mariadb-server-0                               1/1       Running   0          1h        192.168.4.18    mnode2\n  memcached-memcached-5db74ddfd5-926ln           1/1       Running   0          1m        192.168.4.49    mnode2\n  memcached-memcached-5db74ddfd5-wfr9q           1/1       Unknown   0          38m       192.168.3.23    mnode3\n  rabbitmq-rabbitmq-0                            1/1       Unknown   0          1h        192.168.3.21    mnode3\n  rabbitmq-rabbitmq-1                            1/1       Running   0          1h        192.168.4.19    mnode2\n  rabbitmq-rabbitmq-2                            1/1       Running   0          1h        192.168.0.195   mnode1\n\n\n``Result/Observation:``\n\n- PODs that were scheduled on mnode3 node has status of NodeLost/Unknown.\n- Ceph status shows HEALTH_WARN as expected\n- Ceph status shows 1 Ceph MON and 1 Ceph OSD missing.\n- OpenStack PODs that were scheduled mnode3 also shows NodeLost/Unknown.\n\nStep 3: Node Expansion\n======================\n\nLet's add more resources for K8s to schedule PODs on.\n\nIn this test env, let's use ``mnode4`` and apply Ceph and OpenStack related\nlabels.\n\n.. note::\n  Since the node that was shutdown earlier had both Ceph and OpenStack PODs,\n  mnode4 should get Ceph and OpenStack related labels as well.\n\nAfter applying labels, let's check status\n\n``Ceph status:``\n\n.. code-block:: console\n\n  ubuntu@mnode1:~$ kubectl exec -n ceph ceph-mon-ql9zp -- ceph -s\n    cluster:\n      id:     54d9af7e-da6d-4980-9075-96bb145db65c\n      health: HEALTH_WARN\n              1/4 mons down, quorum mnode1,mnode2,mnode4\n\n    services:\n      mon: 4 daemons, quorum mnode1,mnode2,mnode4, out of quorum: mnode3\n      mgr: mnode2(active), standbys: mnode1\n      mds: cephfs-1/1/1 up  {0=mds-ceph-mds-6f66956547-5x4ng=up:active}, 1 up:standby\n      osd: 4 osds: 3 up, 3 in\n      rgw: 2 daemons active\n\n    data:\n      pools:   19 pools, 101 pgs\n      objects: 354 objects, 260 MB\n      usage:   74684 MB used, 73229 MB / 144 GB avail\n      pgs:     101 active+clean\n\n\n``Ceph MON Status``\n\n.. code-block:: console\n\n  ubuntu@mnode1:~$ kubectl exec -n ceph ceph-mon-ql9zp -- ceph mon_status -f json-pretty\n\n.. code-block:: json\n\n  {\n      \"name\": \"mnode2\",\n      \"rank\": 1,\n      \"state\": \"peon\",\n      \"election_epoch\": 100,\n      \"quorum\": [\n          0,\n          1,\n          3\n      ],\n      \"features\": {\n          \"required_con\": \"153140804152475648\",\n          \"required_mon\": [\n              \"kraken\",\n              \"luminous\"\n          ],\n          \"quorum_con\": \"2305244844532236283\",\n          \"quorum_mon\": [\n              \"kraken\",\n              \"luminous\"\n          ]\n      },\n      \"outside_quorum\": [],\n      \"extra_probe_peers\": [\n          \"192.168.10.249:6789/0\"\n      ],\n      \"sync_provider\": [],\n      \"monmap\": {\n          \"epoch\": 2,\n          \"fsid\": \"54d9af7e-da6d-4980-9075-96bb145db65c\",\n          \"modified\": \"2018-08-14 22:43:31.517568\",\n          \"created\": \"2018-08-14 21:02:24.330403\",\n          \"features\": {\n              \"persistent\": [\n                  \"kraken\",\n                  \"luminous\"\n              ],\n              \"optional\": []\n          },\n          \"mons\": [\n              {\n                  \"rank\": 0,\n                  \"name\": \"mnode1\",\n                  \"addr\": \"192.168.10.246:6789/0\",\n                  \"public_addr\": \"192.168.10.246:6789/0\"\n              },\n              {\n                  \"rank\": 1,\n                  \"name\": \"mnode2\",\n                  \"addr\": \"192.168.10.247:6789/0\",\n                  \"public_addr\": \"192.168.10.247:6789/0\"\n              },\n              {\n                  \"rank\": 2,\n                  \"name\": \"mnode3\",\n                  \"addr\": \"192.168.10.248:6789/0\",\n                  \"public_addr\": \"192.168.10.248:6789/0\"\n              },\n              {\n                  \"rank\": 3,\n                  \"name\": \"mnode4\",\n                  \"addr\": \"192.168.10.249:6789/0\",\n                  \"public_addr\": \"192.168.10.249:6789/0\"\n              }\n          ]\n      },\n      \"feature_map\": {\n          \"mon\": {\n              \"group\": {\n                  \"features\": \"0x1ffddff8eea4fffb\",\n                  \"release\": \"luminous\",\n                  \"num\": 1\n              }\n          },\n          \"mds\": {\n              \"group\": {\n                  \"features\": \"0x1ffddff8eea4fffb\",\n                  \"release\": \"luminous\",\n                  \"num\": 1\n              }\n          },\n          \"osd\": {\n              \"group\": {\n                  \"features\": \"0x1ffddff8eea4fffb\",\n                  \"release\": \"luminous\",\n                  \"num\": 2\n              }\n          },\n          \"client\": {\n              \"group\": {\n                  \"features\": \"0x7010fb86aa42ada\",\n                  \"release\": \"jewel\",\n                  \"num\": 1\n              },\n              \"group\": {\n                  \"features\": \"0x1ffddff8eea4fffb\",\n                  \"release\": \"luminous\",\n                  \"num\": 1\n              }\n          }\n      }\n  }\n\n\n``Ceph quorum status:``\n\n.. code-block:: console\n\n  ubuntu@mnode1:~$ kubectl exec -n ceph ceph-mon-ql9zp -- ceph quorum_status -f json-pretty\n\n.. code-block:: json\n\n  {\n      \"election_epoch\": 100,\n      \"quorum\": [\n          0,\n          1,\n          3\n      ],\n      \"quorum_names\": [\n          \"mnode1\",\n          \"mnode2\",\n          \"mnode4\"\n      ],\n      \"quorum_leader_name\": \"mnode1\",\n      \"monmap\": {\n          \"epoch\": 2,\n          \"fsid\": \"54d9af7e-da6d-4980-9075-96bb145db65c\",\n          \"modified\": \"2018-08-14 22:43:31.517568\",\n          \"created\": \"2018-08-14 21:02:24.330403\",\n          \"features\": {\n              \"persistent\": [\n                  \"kraken\",\n                  \"luminous\"\n              ],\n              \"optional\": []\n          },\n          \"mons\": [\n              {\n                  \"rank\": 0,\n                  \"name\": \"mnode1\",\n                  \"addr\": \"192.168.10.246:6789/0\",\n                  \"public_addr\": \"192.168.10.246:6789/0\"\n              },\n              {\n                  \"rank\": 1,\n                  \"name\": \"mnode2\",\n                  \"addr\": \"192.168.10.247:6789/0\",\n                  \"public_addr\": \"192.168.10.247:6789/0\"\n              },\n              {\n                  \"rank\": 2,\n                  \"name\": \"mnode3\",\n                  \"addr\": \"192.168.10.248:6789/0\",\n                  \"public_addr\": \"192.168.10.248:6789/0\"\n              },\n              {\n                  \"rank\": 3,\n                  \"name\": \"mnode4\",\n                  \"addr\": \"192.168.10.249:6789/0\",\n                  \"public_addr\": \"192.168.10.249:6789/0\"\n              }\n          ]\n      }\n  }\n\n\n``Ceph PODs:``\n\n.. code-block:: console\n\n  ubuntu@mnode1:~$ kubectl get pods -n ceph --show-all=false -o wide\n  Flag --show-all has been deprecated, will be removed in an upcoming release\n  NAME                                       READY     STATUS     RESTARTS   AGE       IP               NODE\n  ceph-mds-6f66956547-57tf9                  1/1       Running    0          10m       192.168.0.207    mnode1\n  ceph-mds-6f66956547-5x4ng                  1/1       Running    0          1h        192.168.4.14     mnode2\n  ceph-mds-6f66956547-c25cx                  1/1       Unknown    0          1h        192.168.3.14     mnode3\n  ceph-mgr-5746dd89db-9dbmv                  1/1       Unknown    0          1h        192.168.10.248   mnode3\n  ceph-mgr-5746dd89db-d5fcw                  1/1       Running    0          10m       192.168.10.246   mnode1\n  ceph-mgr-5746dd89db-qq4nl                  1/1       Running    0          1h        192.168.10.247   mnode2\n  ceph-mon-5krkd                             1/1       Running    0          4m        192.168.10.249   mnode4\n  ceph-mon-5qn68                             1/1       NodeLost   0          1h        192.168.10.248   mnode3\n  ceph-mon-check-d85994946-4g5xc             1/1       Running    0          1h        192.168.4.8      mnode2\n  ceph-mon-mwkj9                             1/1       Running    0          1h        192.168.10.247   mnode2\n  ceph-mon-ql9zp                             1/1       Running    0          1h        192.168.10.246   mnode1\n  ceph-osd-default-83945928-c7gdd            1/1       NodeLost   0          1h        192.168.10.248   mnode3\n  ceph-osd-default-83945928-kf5tj            1/1       Running    0          4m        192.168.10.249   mnode4\n  ceph-osd-default-83945928-s6gs6            1/1       Running    0          1h        192.168.10.246   mnode1\n  ceph-osd-default-83945928-vsc5b            1/1       Running    0          1h        192.168.10.247   mnode2\n  ceph-rbd-provisioner-5bfb577ffd-j6hlx      1/1       Running    0          1h        192.168.4.16     mnode2\n  ceph-rbd-provisioner-5bfb577ffd-kdmrv      1/1       Running    0          10m       192.168.0.209    mnode1\n  ceph-rbd-provisioner-5bfb577ffd-zdx2d      1/1       Unknown    0          1h        192.168.3.16     mnode3\n  ceph-rgw-6c64b444d7-4qgkw                  1/1       Running    0          10m       192.168.0.210    mnode1\n  ceph-rgw-6c64b444d7-7bgqs                  1/1       Unknown    0          1h        192.168.3.12     mnode3\n  ceph-rgw-6c64b444d7-hv6vn                  1/1       Running    0          1h        192.168.4.13     mnode2\n  ingress-796d8cf8d6-4txkq                   1/1       Running    0          1h        192.168.2.6      mnode5\n  ingress-796d8cf8d6-9t7m8                   1/1       Running    0          1h        192.168.5.4      mnode4\n  ingress-error-pages-54454dc79b-hhb4f       1/1       Running    0          1h        192.168.2.5      mnode5\n  ingress-error-pages-54454dc79b-twpgc       1/1       Running    0          1h        192.168.4.4      mnode2\n\n``OpenStack PODs:``\n\n.. code-block:: console\n\n  ubuntu@mnode1:~$ kubectl get pods -n openstack --show-all=false -o wide\n  Flag --show-all has been deprecated, will be removed in an upcoming release\n  NAME                                           READY     STATUS    RESTARTS   AGE       IP              NODE\n  cinder-api-66f4f9678-2lgwk                     1/1       Unknown   0          32m       192.168.3.41    mnode3\n  cinder-api-66f4f9678-flvr5                     1/1       Running   0          32m       192.168.0.202   mnode1\n  cinder-api-66f4f9678-w5xhd                     1/1       Running   0          11m       192.168.4.45    mnode2\n  cinder-backup-659b68b474-582kr                 1/1       Running   0          32m       192.168.4.39    mnode2\n  cinder-scheduler-6778f6f88c-mm9mt              1/1       Running   0          32m       192.168.0.201   mnode1\n  cinder-volume-79b9bd8bb9-qsxdk                 1/1       Running   0          32m       192.168.4.40    mnode2\n  glance-api-676fd49d4d-4tnm6                    1/1       Running   0          11m       192.168.0.212   mnode1\n  glance-api-676fd49d4d-j4bdb                    1/1       Unknown   0          36m       192.168.3.37    mnode3\n  glance-api-676fd49d4d-wtxqt                    1/1       Running   0          36m       192.168.4.31    mnode2\n  ingress-7b4bc84cdd-9fs78                       1/1       Running   0          1h        192.168.5.3     mnode4\n  ingress-7b4bc84cdd-wztz7                       1/1       Running   0          1h        192.168.1.4     mnode6\n  ingress-error-pages-586c7f86d6-2jl5q           1/1       Running   0          1h        192.168.2.4     mnode5\n  ingress-error-pages-586c7f86d6-455j5           1/1       Unknown   0          1h        192.168.3.3     mnode3\n  ingress-error-pages-586c7f86d6-55j4x           1/1       Running   0          11m       192.168.4.47    mnode2\n  keystone-api-5bcc7cb698-dzm8q                  1/1       Running   0          45m       192.168.4.24    mnode2\n  keystone-api-5bcc7cb698-vvwwr                  1/1       Unknown   0          45m       192.168.3.25    mnode3\n  keystone-api-5bcc7cb698-wx5l6                  1/1       Running   0          11m       192.168.0.213   mnode1\n  mariadb-ingress-84894687fd-9lmpx               1/1       Running   0          11m       192.168.4.48    mnode2\n  mariadb-ingress-84894687fd-dfnkm               1/1       Unknown   2          1h        192.168.3.20    mnode3\n  mariadb-ingress-error-pages-78fb865f84-p8lpg   1/1       Running   0          1h        192.168.4.17    mnode2\n  mariadb-server-0                               1/1       Running   0          1h        192.168.4.18    mnode2\n  memcached-memcached-5db74ddfd5-926ln           1/1       Running   0          11m       192.168.4.49    mnode2\n  memcached-memcached-5db74ddfd5-wfr9q           1/1       Unknown   0          48m       192.168.3.23    mnode3\n  rabbitmq-rabbitmq-0                            1/1       Unknown   0          1h        192.168.3.21    mnode3\n  rabbitmq-rabbitmq-1                            1/1       Running   0          1h        192.168.4.19    mnode2\n  rabbitmq-rabbitmq-2                            1/1       Running   0          1h        192.168.0.195   mnode1\n\n\n``Result/Observation:``\n\n- Ceph MON and OSD PODs got scheduled on mnode4 node.\n- Ceph status shows that MON and OSD count has been increased.\n- Ceph status still shows HEALTH_WARN as one MON and OSD are still down.\n\nStep 4: Ceph cluster recovery\n=============================\n\nNow that we have added new node for Ceph and OpenStack PODs, let's perform\nmaintenance on Ceph cluster.\n\n1) Remove out of quorum MON:\n----------------------------\n\nUsing ``ceph mon_status`` and ``ceph -s`` commands, confirm ID of MON that is out of quorum.\n\nIn this test env, ``mnode3`` is out of quorum.\n\n.. note::\n  In this test env, since out of quorum MON is no longer available due to node failure, we can\n  processed with removing it from Ceph cluster.\n\n``Remove MON from Ceph cluster``\n\n.. code-block:: console\n\n  ubuntu@mnode1:~$ kubectl exec -n ceph ceph-mon-ql9zp -- ceph mon remove mnode3\n  removing mon.mnode3 at 192.168.10.248:6789/0, there will be 3 monitors\n\n``Ceph Status:``\n\n.. code-block:: console\n\n  ubuntu@mnode1:~$ kubectl exec -n ceph ceph-mon-ql9zp -- ceph -s\n    cluster:\n      id:     54d9af7e-da6d-4980-9075-96bb145db65c\n      health: HEALTH_OK\n\n    services:\n      mon: 3 daemons, quorum mnode1,mnode2,mnode4\n      mgr: mnode2(active), standbys: mnode1\n      mds: cephfs-1/1/1 up  {0=mds-ceph-mds-6f66956547-5x4ng=up:active}, 1 up:standby\n      osd: 4 osds: 3 up, 3 in\n      rgw: 2 daemons active\n\n    data:\n      pools:   19 pools, 101 pgs\n      objects: 354 objects, 260 MB\n      usage:   74705 MB used, 73208 MB / 144 GB avail\n      pgs:     101 active+clean\n\n    io:\n      client:   132 kB/s wr, 0 op/s rd, 23 op/s wr\n\nAs shown above, Ceph status is now HEALTH_OK and shows 3 MONs available.\n\n``Ceph MON Status``\n\n.. code-block:: console\n\n  ubuntu@mnode1:~$ kubectl exec -n ceph ceph-mon-ql9zp -- ceph mon_status -f json-pretty\n\n.. code-block:: json\n\n    {\n        \"name\": \"mnode4\",\n        \"rank\": 2,\n        \"state\": \"peon\",\n        \"election_epoch\": 106,\n        \"quorum\": [\n            0,\n            1,\n            2\n        ],\n        \"features\": {\n            \"required_con\": \"153140804152475648\",\n            \"required_mon\": [\n                \"kraken\",\n                \"luminous\"\n            ],\n            \"quorum_con\": \"2305244844532236283\",\n            \"quorum_mon\": [\n                \"kraken\",\n                \"luminous\"\n            ]\n        },\n        \"outside_quorum\": [],\n        \"extra_probe_peers\": [],\n        \"sync_provider\": [],\n        \"monmap\": {\n            \"epoch\": 3,\n            \"fsid\": \"54d9af7e-da6d-4980-9075-96bb145db65c\",\n            \"modified\": \"2018-08-14 22:55:41.256612\",\n            \"created\": \"2018-08-14 21:02:24.330403\",\n            \"features\": {\n                \"persistent\": [\n                    \"kraken\",\n                    \"luminous\"\n                ],\n                \"optional\": []\n            },\n            \"mons\": [\n                {\n                    \"rank\": 0,\n                    \"name\": \"mnode1\",\n                    \"addr\": \"192.168.10.246:6789/0\",\n                    \"public_addr\": \"192.168.10.246:6789/0\"\n                },\n                {\n                    \"rank\": 1,\n                    \"name\": \"mnode2\",\n                    \"addr\": \"192.168.10.247:6789/0\",\n                    \"public_addr\": \"192.168.10.247:6789/0\"\n                },\n                {\n                    \"rank\": 2,\n                    \"name\": \"mnode4\",\n                    \"addr\": \"192.168.10.249:6789/0\",\n                    \"public_addr\": \"192.168.10.249:6789/0\"\n                }\n            ]\n        },\n        \"feature_map\": {\n            \"mon\": {\n                \"group\": {\n                    \"features\": \"0x1ffddff8eea4fffb\",\n                    \"release\": \"luminous\",\n                    \"num\": 1\n                }\n            },\n            \"client\": {\n                \"group\": {\n                    \"features\": \"0x1ffddff8eea4fffb\",\n                    \"release\": \"luminous\",\n                    \"num\": 1\n                }\n            }\n        }\n    }\n\n``Ceph quorum status``\n\n.. code-block:: console\n\n  ubuntu@mnode1:~$ kubectl exec -n ceph ceph-mon-ql9zp -- ceph quorum_status -f json-pretty\n\n.. code-block:: json\n\n\n    {\n        \"election_epoch\": 106,\n        \"quorum\": [\n            0,\n            1,\n            2\n        ],\n        \"quorum_names\": [\n            \"mnode1\",\n            \"mnode2\",\n            \"mnode4\"\n        ],\n        \"quorum_leader_name\": \"mnode1\",\n        \"monmap\": {\n            \"epoch\": 3,\n            \"fsid\": \"54d9af7e-da6d-4980-9075-96bb145db65c\",\n            \"modified\": \"2018-08-14 22:55:41.256612\",\n            \"created\": \"2018-08-14 21:02:24.330403\",\n            \"features\": {\n                \"persistent\": [\n                    \"kraken\",\n                    \"luminous\"\n                ],\n                \"optional\": []\n            },\n            \"mons\": [\n                {\n                    \"rank\": 0,\n                    \"name\": \"mnode1\",\n                    \"addr\": \"192.168.10.246:6789/0\",\n                    \"public_addr\": \"192.168.10.246:6789/0\"\n                },\n                {\n                    \"rank\": 1,\n                    \"name\": \"mnode2\",\n                    \"addr\": \"192.168.10.247:6789/0\",\n                    \"public_addr\": \"192.168.10.247:6789/0\"\n                },\n                {\n                    \"rank\": 2,\n                    \"name\": \"mnode4\",\n                    \"addr\": \"192.168.10.249:6789/0\",\n                    \"public_addr\": \"192.168.10.249:6789/0\"\n                }\n            ]\n        }\n    }\n\n\n2) Remove down OSD from Ceph cluster:\n-------------------------------------\n\nAs shown in Ceph status above, ``osd: 4 osds: 3 up, 3 in`` 1 of 4 OSDs is still\ndown. Let's remove that OSD.\n\nFirst, run ``ceph osd tree`` command to get list of OSDs.\n\n.. code-block:: console\n\n  ubuntu@mnode1:~$ kubectl exec -n ceph ceph-mon-ql9zp -- ceph osd tree\n  ID CLASS WEIGHT  TYPE NAME       STATUS REWEIGHT PRI-AFF\n  -1       0.19995 root default\n  -7       0.04999     host mnode1\n   2   hdd 0.04999         osd.2       up  1.00000 1.00000\n  -2       0.04999     host mnode2\n   0   hdd 0.04999         osd.0       up  1.00000 1.00000\n  -3       0.04999     host mnode3\n   1   hdd 0.04999         osd.1     down        0 1.00000\n  -9       0.04999     host mnode4\n   3   hdd 0.04999         osd.3       up  1.00000 1.00000\n\nAbove output shows that ``osd.1`` is down.\n\nRun ``ceph osd purge`` command to remove OSD from ceph cluster.\n\n.. code-block:: console\n\n  ubuntu@mnode1:~$ kubectl exec -n ceph ceph-mon-ql9zp -- ceph osd purge osd.1 --yes-i-really-mean-it\n  purged osd.1\n\n``Ceph status``\n\n.. code-block:: console\n\n  ubuntu@mnode1:~$ kubectl exec -n ceph ceph-mon-ql9zp -- ceph -s\n    cluster:\n      id:     54d9af7e-da6d-4980-9075-96bb145db65c\n      health: HEALTH_OK\n\n    services:\n      mon: 3 daemons, quorum mnode1,mnode2,mnode4\n      mgr: mnode2(active), standbys: mnode1\n      mds: cephfs-1/1/1 up  {0=mds-ceph-mds-6f66956547-5x4ng=up:active}, 1 up:standby\n      osd: 3 osds: 3 up, 3 in\n      rgw: 2 daemons active\n\n    data:\n      pools:   19 pools, 101 pgs\n      objects: 354 objects, 260 MB\n      usage:   74681 MB used, 73232 MB / 144 GB avail\n      pgs:     101 active+clean\n\n    io:\n      client:   57936 B/s wr, 0 op/s rd, 14 op/s wr\n\nAbove output shows Ceph cluster in HEALTH_OK with all OSDs and MONs up and running.\n\n``Ceph PODs``\n\n.. code-block:: console\n\n  ubuntu@mnode1:~$ kubectl get pods -n ceph --show-all=false -o wide\n  Flag --show-all has been deprecated, will be removed in an upcoming release\n  NAME                                       READY     STATUS     RESTARTS   AGE       IP               NODE\n  ceph-mds-6f66956547-57tf9                  1/1       Running    0          25m       192.168.0.207    mnode1\n  ceph-mds-6f66956547-5x4ng                  1/1       Running    0          1h        192.168.4.14     mnode2\n  ceph-mds-6f66956547-c25cx                  1/1       Unknown    0          1h        192.168.3.14     mnode3\n  ceph-mgr-5746dd89db-9dbmv                  1/1       Unknown    0          1h        192.168.10.248   mnode3\n  ceph-mgr-5746dd89db-d5fcw                  1/1       Running    0          25m       192.168.10.246   mnode1\n  ceph-mgr-5746dd89db-qq4nl                  1/1       Running    0          1h        192.168.10.247   mnode2\n  ceph-mon-5krkd                             1/1       Running    0          19m       192.168.10.249   mnode4\n  ceph-mon-5qn68                             1/1       NodeLost   0          2h        192.168.10.248   mnode3\n  ceph-mon-check-d85994946-4g5xc             1/1       Running    0          2h        192.168.4.8      mnode2\n  ceph-mon-mwkj9                             1/1       Running    0          2h        192.168.10.247   mnode2\n  ceph-mon-ql9zp                             1/1       Running    0          2h        192.168.10.246   mnode1\n  ceph-osd-default-83945928-c7gdd            1/1       NodeLost   0          1h        192.168.10.248   mnode3\n  ceph-osd-default-83945928-kf5tj            1/1       Running    0          19m       192.168.10.249   mnode4\n  ceph-osd-default-83945928-s6gs6            1/1       Running    0          1h        192.168.10.246   mnode1\n  ceph-osd-default-83945928-vsc5b            1/1       Running    0          1h        192.168.10.247   mnode2\n  ceph-rbd-provisioner-5bfb577ffd-j6hlx      1/1       Running    0          1h        192.168.4.16     mnode2\n  ceph-rbd-provisioner-5bfb577ffd-kdmrv      1/1       Running    0          25m       192.168.0.209    mnode1\n  ceph-rbd-provisioner-5bfb577ffd-zdx2d      1/1       Unknown    0          1h        192.168.3.16     mnode3\n  ceph-rgw-6c64b444d7-4qgkw                  1/1       Running    0          25m       192.168.0.210    mnode1\n  ceph-rgw-6c64b444d7-7bgqs                  1/1       Unknown    0          1h        192.168.3.12     mnode3\n  ceph-rgw-6c64b444d7-hv6vn                  1/1       Running    0          1h        192.168.4.13     mnode2\n  ingress-796d8cf8d6-4txkq                   1/1       Running    0          2h        192.168.2.6      mnode5\n  ingress-796d8cf8d6-9t7m8                   1/1       Running    0          2h        192.168.5.4      mnode4\n  ingress-error-pages-54454dc79b-hhb4f       1/1       Running    0          2h        192.168.2.5      mnode5\n  ingress-error-pages-54454dc79b-twpgc       1/1       Running    0          2h        192.168.4.4      mnode2\n\n``OpenStack PODs``\n\n.. code-block:: console\n\n  ubuntu@mnode1:~$ kubectl get pods -n openstack --show-all=false -o wide\n  Flag --show-all has been deprecated, will be removed in an upcoming release\n  NAME                                           READY     STATUS    RESTARTS   AGE       IP              NODE\n  cinder-api-66f4f9678-2lgwk                     1/1       Unknown   0          47m       192.168.3.41    mnode3\n  cinder-api-66f4f9678-flvr5                     1/1       Running   0          47m       192.168.0.202   mnode1\n  cinder-api-66f4f9678-w5xhd                     1/1       Running   0          26m       192.168.4.45    mnode2\n  cinder-backup-659b68b474-582kr                 1/1       Running   0          47m       192.168.4.39    mnode2\n  cinder-scheduler-6778f6f88c-mm9mt              1/1       Running   0          47m       192.168.0.201   mnode1\n  cinder-volume-79b9bd8bb9-qsxdk                 1/1       Running   0          47m       192.168.4.40    mnode2\n  glance-api-676fd49d4d-4tnm6                    1/1       Running   0          26m       192.168.0.212   mnode1\n  glance-api-676fd49d4d-j4bdb                    1/1       Unknown   0          51m       192.168.3.37    mnode3\n  glance-api-676fd49d4d-wtxqt                    1/1       Running   0          51m       192.168.4.31    mnode2\n  ingress-7b4bc84cdd-9fs78                       1/1       Running   0          2h        192.168.5.3     mnode4\n  ingress-7b4bc84cdd-wztz7                       1/1       Running   0          2h        192.168.1.4     mnode6\n  ingress-error-pages-586c7f86d6-2jl5q           1/1       Running   0          2h        192.168.2.4     mnode5\n  ingress-error-pages-586c7f86d6-455j5           1/1       Unknown   0          2h        192.168.3.3     mnode3\n  ingress-error-pages-586c7f86d6-55j4x           1/1       Running   0          26m       192.168.4.47    mnode2\n  keystone-api-5bcc7cb698-dzm8q                  1/1       Running   0          1h        192.168.4.24    mnode2\n  keystone-api-5bcc7cb698-vvwwr                  1/1       Unknown   0          1h        192.168.3.25    mnode3\n  keystone-api-5bcc7cb698-wx5l6                  1/1       Running   0          26m       192.168.0.213   mnode1\n  mariadb-ingress-84894687fd-9lmpx               1/1       Running   0          26m       192.168.4.48    mnode2\n  mariadb-ingress-84894687fd-dfnkm               1/1       Unknown   2          1h        192.168.3.20    mnode3\n  mariadb-ingress-error-pages-78fb865f84-p8lpg   1/1       Running   0          1h        192.168.4.17    mnode2\n  mariadb-server-0                               1/1       Running   0          1h        192.168.4.18    mnode2\n  memcached-memcached-5db74ddfd5-926ln           1/1       Running   0          26m       192.168.4.49    mnode2\n  memcached-memcached-5db74ddfd5-wfr9q           1/1       Unknown   0          1h        192.168.3.23    mnode3\n  rabbitmq-rabbitmq-0                            1/1       Unknown   0          1h        192.168.3.21    mnode3\n  rabbitmq-rabbitmq-1                            1/1       Running   0          1h        192.168.4.19    mnode2\n  rabbitmq-rabbitmq-2                            1/1       Running   0          1h        192.168.0.195   mnode1\n"
  },
  {
    "path": "doc/source/testing/ceph-resiliency/README.rst",
    "content": "========================================\nResiliency Tests for OpenStack-Helm/Ceph\n========================================\n\nMission\n=======\n\nThe goal of our resiliency tests for `OpenStack-Helm/Ceph\n<https://github.com/openstack/openstack-helm/tree/master/ceph>`_ is to\nshow symptoms of software/hardware failure and provide the solutions.\n\nCaveats:\n   - Our focus lies on resiliency for various failure scenarios but\n     not on performance or stress testing.\n\nSoftware Failure\n================\n* `Monitor failure <./monitor-failure.html>`_\n* `OSD failure <./osd-failure.html>`_\n\nHardware Failure\n================\n* `Disk failure <./disk-failure.html>`_\n* `Host failure <./host-failure.html>`_\n\n"
  },
  {
    "path": "doc/source/testing/ceph-resiliency/disk-failure.rst",
    "content": "============\nDisk Failure\n============\n\nTest Environment\n================\n\n- Cluster size: 4 host machines\n- Number of disks: 24 (= 6 disks per host * 4 hosts)\n- Kubernetes version: 1.10.5\n- Ceph version: 12.2.3\n- OpenStack-Helm commit: 25e50a34c66d5db7604746f4d2e12acbdd6c1459\n\nCase: A disk fails\n==================\n\nSymptom:\n--------\n\nThis is to test a scenario when a disk failure happens.\nWe monitor the ceph status and notice one OSD (osd.2) on voyager4\nwhich has ``/dev/sdh`` as a backend is down.\n\n.. code-block:: console\n\n  (mon-pod):/# ceph -s\n    cluster:\n      id:     9d4d8c61-cf87-4129-9cef-8fbf301210ad\n      health: HEALTH_WARN\n              too few PGs per OSD (23 < min 30)\n              mon voyager1 is low on available space\n\n    services:\n      mon: 3 daemons, quorum voyager1,voyager2,voyager3\n      mgr: voyager1(active), standbys: voyager3\n      mds: cephfs-1/1/1 up  {0=mds-ceph-mds-65bb45dffc-cslr6=up:active}, 1 up:standby\n      osd: 24 osds: 23 up, 23 in\n      rgw: 2 daemons active\n\n    data:\n      pools:   18 pools, 182 pgs\n      objects: 240 objects, 3359 bytes\n      usage:   2548 MB used, 42814 GB / 42816 GB avail\n      pgs:     182 active+clean\n\n.. code-block:: console\n\n  (mon-pod):/# ceph osd tree\n  ID CLASS WEIGHT   TYPE NAME         STATUS REWEIGHT PRI-AFF\n  -1       43.67981 root default\n  -9       10.91995     host voyager1\n   5   hdd  1.81999         osd.5         up  1.00000 1.00000\n   6   hdd  1.81999         osd.6         up  1.00000 1.00000\n  10   hdd  1.81999         osd.10        up  1.00000 1.00000\n  17   hdd  1.81999         osd.17        up  1.00000 1.00000\n  19   hdd  1.81999         osd.19        up  1.00000 1.00000\n  21   hdd  1.81999         osd.21        up  1.00000 1.00000\n  -3       10.91995     host voyager2\n   1   hdd  1.81999         osd.1         up  1.00000 1.00000\n   4   hdd  1.81999         osd.4         up  1.00000 1.00000\n  11   hdd  1.81999         osd.11        up  1.00000 1.00000\n  13   hdd  1.81999         osd.13        up  1.00000 1.00000\n  16   hdd  1.81999         osd.16        up  1.00000 1.00000\n  18   hdd  1.81999         osd.18        up  1.00000 1.00000\n  -2       10.91995     host voyager3\n   0   hdd  1.81999         osd.0         up  1.00000 1.00000\n   3   hdd  1.81999         osd.3         up  1.00000 1.00000\n  12   hdd  1.81999         osd.12        up  1.00000 1.00000\n  20   hdd  1.81999         osd.20        up  1.00000 1.00000\n  22   hdd  1.81999         osd.22        up  1.00000 1.00000\n  23   hdd  1.81999         osd.23        up  1.00000 1.00000\n  -4       10.91995     host voyager4\n   2   hdd  1.81999         osd.2       down        0 1.00000\n   7   hdd  1.81999         osd.7         up  1.00000 1.00000\n   8   hdd  1.81999         osd.8         up  1.00000 1.00000\n   9   hdd  1.81999         osd.9         up  1.00000 1.00000\n  14   hdd  1.81999         osd.14        up  1.00000 1.00000\n  15   hdd  1.81999         osd.15        up  1.00000 1.00000\n\n\nSolution:\n---------\n\nTo replace the failed OSD, execute the following procedure:\n\n1. From the Kubernetes cluster, remove the failed OSD pod, which is running on ``voyager4``:\n\n.. code-block:: console\n\n  $ kubectl label nodes --all ceph_maintenance_window=inactive\n  $ kubectl label nodes voyager4 --overwrite ceph_maintenance_window=active\n  $ kubectl patch -n ceph ds ceph-osd-default-64779b8c -p='{\"spec\":{\"template\":{\"spec\":{\"nodeSelector\":{\"ceph-osd\":\"enabled\",\"ceph_maintenance_window\":\"inactive\"}}}}}'\n\nNote: To find the daemonset associated with a failed OSD, check out the followings:\n\n.. code-block:: console\n\n  (voyager4)$ ps -ef|grep /usr/bin/ceph-osd\n  (voyager1)$ kubectl get ds -n ceph\n  (voyager1)$ kubectl get ds <daemonset-name> -n ceph -o yaml\n\n\n2. Remove the failed OSD (OSD ID = 2 in this example) from the Ceph cluster:\n\n.. code-block:: console\n\n  (mon-pod):/# ceph osd lost 2\n  (mon-pod):/# ceph osd crush remove osd.2\n  (mon-pod):/# ceph auth del osd.2\n  (mon-pod):/# ceph osd rm 2\n\n3. Find that Ceph is healthy with a lost OSD (i.e., a total of 23 OSDs):\n\n.. code-block:: console\n\n  (mon-pod):/# ceph -s\n    cluster:\n      id:     9d4d8c61-cf87-4129-9cef-8fbf301210ad\n      health: HEALTH_WARN\n              too few PGs per OSD (23 < min 30)\n              mon voyager1 is low on available space\n\n    services:\n      mon: 3 daemons, quorum voyager1,voyager2,voyager3\n      mgr: voyager1(active), standbys: voyager3\n      mds: cephfs-1/1/1 up  {0=mds-ceph-mds-65bb45dffc-cslr6=up:active}, 1 up:standby\n      osd: 23 osds: 23 up, 23 in\n      rgw: 2 daemons active\n\n    data:\n      pools:   18 pools, 182 pgs\n      objects: 240 objects, 3359 bytes\n      usage:   2551 MB used, 42814 GB / 42816 GB avail\n      pgs:     182 active+clean\n\n4. Replace the failed disk with a new one. If you repair (not replace) the failed disk,\nyou may need to run the following:\n\n.. code-block:: console\n\n  (voyager4)$ parted /dev/sdh mklabel msdos\n\n5. Start a new OSD pod on ``voyager4``:\n\n.. code-block:: console\n\n  $ kubectl label nodes voyager4 --overwrite ceph_maintenance_window=inactive\n\n6. Validate the Ceph status (i.e., one OSD is added, so the total number of OSDs becomes 24):\n\n.. code-block:: console\n\n  (mon-pod):/# ceph -s\n    cluster:\n      id:     9d4d8c61-cf87-4129-9cef-8fbf301210ad\n      health: HEALTH_WARN\n              too few PGs per OSD (22 < min 30)\n              mon voyager1 is low on available space\n\n    services:\n      mon: 3 daemons, quorum voyager1,voyager2,voyager3\n      mgr: voyager1(active), standbys: voyager3\n      mds: cephfs-1/1/1 up  {0=mds-ceph-mds-65bb45dffc-cslr6=up:active}, 1 up:standby\n      osd: 24 osds: 24 up, 24 in\n      rgw: 2 daemons active\n\n    data:\n      pools:   18 pools, 182 pgs\n      objects: 240 objects, 3359 bytes\n      usage:   2665 MB used, 44675 GB / 44678 GB avail\n      pgs:     182 active+clean\n"
  },
  {
    "path": "doc/source/testing/ceph-resiliency/failure-domain.rst",
    "content": ".. -*- coding: utf-8 -*-\n\n.. NOTE TO MAINTAINERS: use rst2html script to convert .rst to .html\n   rst2html ./failure-domain.rst ./failure-domain.html\n   open ./failure-domain.html\n\n==============================\n Failure Domains in CRUSH Map\n==============================\n\n.. contents::\n.. sectnum::\n\nOverview\n========\n\nThe `CRUSH Map <http://docs.ceph.com/docs/master/rados/operations/crush-map/?highlight=hammer%20profile>`__ in a Ceph cluster is best visualized\nas an inverted tree.  The hierarchical layout describes the physical\ntopology of the Ceph cluster.  Through the physical topology, failure\ndomains are conceptualized from the different branches in the inverted\ntree.  CRUSH rules are created and map to failure domains with data\nplacement policy to distribute the data.\n\nThe internal nodes (non-leaves and non-root) in the hierarchy are identified\nas buckets.  Each bucket is a hierarchical aggregation of storage locations\nand their assigned weights.  These are the types defined by CRUSH as the\nsupported buckets.\n\n::\n\n  # types\n  type 0 osd\n  type 1 host\n  type 2 chassis\n  type 3 rack\n  type 4 row\n  type 5 pdu\n  type 6 pod\n  type 7 room\n  type 8 datacenter\n  type 9 region\n  type 10 root\n\nThis guide describes the host and rack buckets and their role in constructing\na CRUSH Map with separate failure domains.  Once a Ceph cluster is configured\nwith the expected CRUSh Map and Rule, the PGs of the designated pool are\nverified with a script (**utils-checkPGs.py**) to ensure that the OSDs in all the PGs\nreside in separate failure domains.\n\nCeph Environment\n================\n\nThe ceph commands and scripts described in this write-up are executed as\nLinux user root on one of orchestration nodes and one of the ceph monitors\ndeployed as kubernetes pods. The root user has the credential to execute\nall the ceph commands.\n\nOn a kubernetes cluster, a separate namespace named **ceph** is configured\nfor the ceph cluster.  Include the **ceph** namespace in **kubectl** when\nexecuting this command.\n\nA kubernetes pod is a collection of docker containers sharing a network\nand mount namespace.  It is the basic unit of deployment in the kubernetes\ncluster.  The node in the kubernetes cluster where the orchestration\noperations are performed needs access to the **kubectl** command.  In this\nguide, this node is referred to as the orchestration node.  On this\nnode, you can list all the pods that are deployed.  To execute a command\nin a given pod, use **kubectl** to locate the name of the pod and switch\nto it to execute the command.\n\nOrchestration Node\n------------------\n\nTo gain access to the kubernetes orchestration node, use your login\ncredential and the authentication procedure assigned to you.  For\nenvironments setup with SSH key-based access, your id_rsa.pub (generated\nthrough the ssh-keygen) public key should be in your ~/.ssh/authorized_keys\nfile on the orchestration node.\n\nThe kubernetes and ceph commands require the root login credential to\nexecute.  Your Linux login requires the *sudo* privilege to execute\ncommands as user root.  On the orchestration node, acquire the root's\nprivilege with your Linux login through the *sudo* command.\n\n::\n\n  [orchestration]$ sudo -i\n  <Your Linux login's password>:\n  [orchestration]#\n\nKubernetes Pods\n---------------\n\nOn the orchestration node, execute the **kubectl** command to list the\nspecific set of pods with the **--selector** option.  This **kubectl**\ncommand lists all the ceph monitor pods.\n\n::\n\n  [orchestration]# kubectl get pods -n ceph --selector component=mon\n  NAME             READY     STATUS    RESTARTS   AGE\n  ceph-mon-85mlt   2/2       Running   0          9d\n  ceph-mon-9mpnb   2/2       Running   0          9d\n  ceph-mon-rzzqr   2/2       Running   0          9d\n  ceph-mon-snds8   2/2       Running   0          9d\n  ceph-mon-snzwx   2/2       Running   0          9d\n\nThe following **kubectl** command lists the Ceph OSD pods.\n\n::\n\n  [orchestration]# kubectl get pods -n ceph --selector component=osd\n  NAME                              READY     STATUS    RESTARTS   AGE\n  ceph-osd-default-166a1044-95s74   2/2       Running   0          9d\n  ceph-osd-default-166a1044-bglnm   2/2       Running   0          9d\n  ceph-osd-default-166a1044-lq5qq   2/2       Running   0          9d\n  ceph-osd-default-166a1044-lz6x6   2/2       Running   0          9d\n  . . .\n\nTo list all the pods in all the namespaces, execute this **kubectl** command.\n\n::\n\n  [orchestration]# kubectl get pods --all-namespaces\n  NAMESPACE     NAME                                       READY     STATUS      RESTARTS   AGE\n  ceph          ceph-bootstrap-rpzld                       0/1       Completed   0          10d\n  ceph          ceph-cephfs-client-key-generator-pvzs6     0/1       Completed   0          10d\n\n\nExecute Commands in Pods\n^^^^^^^^^^^^^^^^^^^^^^^^\n\nTo execute multiple commands in a pod, you can switch to the execution\ncontext of the pod with a /bin/bash session.\n\n::\n\n  [orchestration]# kubectl exec -it ceph-mon-85mlt -n ceph -- /bin/bash\n  [ceph-mon]# ceph status\n    cluster:\n      id:     07c31d0f-bcc6-4db4-aadf-2d2a0f13edb8\n      health: HEALTH_OK\n\n    services:\n      mon: 5 daemons, quorum host1,host2,host3,host4,host5\n      mgr: host6(active), standbys: host1\n      mds: cephfs-1/1/1 up  {0=mds-ceph-mds-7cb4f57cc-prh87=up:active}, 1 up:standby\n      osd: 72 osds: 72 up, 72 in\n      rgw: 2 daemons active\n\n    data:\n      pools:   20 pools, 3944 pgs\n      objects: 86970 objects, 323 GB\n      usage:   1350 GB used, 79077 GB / 80428 GB avail\n      pgs:     3944 active+clean\n\n    io:\n      client:   981 kB/s wr, 0 op/s rd, 84 op/s wr\n\nTo verify that you are executing within the context of a pod.  Display the\ncontent of the */proc/self/cgroup* control group file.  The *kubepods* output\nin the cgroup file shows that you're executing in a docker container of a pod.\n\n::\n\n  [ceph-mon]# cat /proc/self/cgroup\n  11:hugetlb:/kubepods/besteffort/podafb3689c-8c5b-11e8-be6a-246e96290f14/ff6cbc58348a44722ee6a493845b9c2903fabdce80d0902d217cc4d6962d7b53\n  . . .\n\nTo exit the pod and resume the orchestration node's execution context.\n\n::\n\n  [ceph-mon]# exit\n  [orchestration]#\n\nTo verify that you are executing on the orchestration node's context, display\nthe */proc/self/cgroup* control group file.  You would not see the *kubepods*\ndocker container in the output.\n\n::\n\n  [orchestration]# cat /proc/self/cgroup\n  11:blkio:/user.slice\n  10:freezer:/\n  9:hugetlb:/\n  . . .\n\nIt is also possible to run the ceph commands via the **kubectl exec**\nwithout switching to a pod's container.\n\n::\n\n  [orchestration]# kubectl exec ceph-mon-9mpnb -n ceph -- ceph status\n    cluster:\n      id:     07c31d0f-bcc6-4db4-aadf-2d2a0f13edb8\n      health: HEALTH_OK\n  . . .\n\n\nFailure Domains\n===============\n\nA failure domain provides the fault isolation for the data and it corresponds\nto a branch on the hierarchical topology.  To protect against data loss, OSDs\nthat are allocated to PGs should be chosen from different failure\ndomains.  Losing a branch takes down all the OSDs in that branch only and\nOSDs in the other branches are not effected.\n\nIn a data center, baremetal hosts are typically installed in a\nrack (refrigerator size cabinet).  Multiple racks with hosts in each rack\nare used to provision the OSDs running on each host.  A rack is envisioned\nas a branch in the CRUSH topology.\n\nTo provide data redundancy, ceph maintains multiple copies of the data.  The\ntotal number of copies to store for each piece of data is determined by the\nceph **osd_pool_default_size** ceph.conf parameter.  With this parameter set\nto 3, each piece of the data has 3 copies that gets stored in a pool.  Each\ncopy is stored on different OSDs allocated from different failure domains.\n\nHost\n----\n\nChoosing host as the failure domain lacks all the protections against\ndata loss.\n\nTo illustrate, a Ceph cluster has been provisioned with six hosts and four\nOSDs on each host.  The hosts are enclosed in respective racks where each\nrack contains two hosts.\n\nIn the configuration of the Ceph cluster, without explicit instructions on\nwhere the host and rack buckets should be placed, Ceph would create a\nCRUSH map without the rack bucket.  A CRUSH rule that get created uses\nthe host as the failure domain.  With the size (replica) of a pool set\nto 3, the OSDs in all the PGs are allocated from different hosts.\n\n::\n\n  root=default\n  ├── host1\n  │   ├── osd.1\n  │   ├── osd.2\n  │   ├── osd.3\n  │   └── osd.4\n  ├── host2\n  │   ├── osd.5\n  │   ├── osd.6\n  │   ├── osd.7\n  │   └── osd.8\n  ├── host3\n  │   ├── osd.9\n  │   ├── osd.10\n  │   ├── osd.11\n  │   └── osd.12\n  ├── host4\n  │   ├── osd.13\n  │   ├── osd.14\n  │   ├── osd.15\n  │   └── osd.16\n  ├── host5\n  │   ├── osd.17\n  │   ├── osd.18\n  │   ├── osd.19\n  │   └── osd.20\n  └── host6\n      ├── osd.21\n      ├── osd.22\n      ├── osd.23\n      └── osd.24\n\nOn this ceph cluster, it has a CRUSH rule that uses the host as the\nfailure domain.\n\n::\n\n  # ceph osd crush rule ls\n  replicated_host\n  # ceph osd crush rule dump replicated_host\n  {\n      \"rule_id\": 0,\n      \"rule_name\": \"replicated_host\",\n      \"ruleset\": 0,\n      \"type\": 1,\n      \"min_size\": 1,\n      \"max_size\": 10,\n      \"steps\": [\n          {\n              \"op\": \"take\",\n              \"item\": -1,\n              \"item_name\": \"default\"\n          },\n          {\n              \"op\": \"chooseleaf_firstn\",\n              \"num\": 0,\n              \"type\": \"host\" },\n          {\n              \"op\": \"emit\"\n          }\n      ]\n  }\n\nVerify the CRUSH rule that is assigned to the ceph pool.  In this\nexample, the rbd pool is used.\n\n::\n\n  # ceph osd pool get rbd crush_rule\n  crush_rule: replicated_host\n  # ceph osd pool get rbd size\n  size: 3\n  # ceph osd pool get rbd pg_num\n  pg_num: 1024\n\n\nTo verify that the OSDs in all the PGs are allocated from different\nhosts, invoke the **utils-checkPGs.py** utility on the ceph pool.  The offending\nPGs are printed to stdout.\n\n::\n\n  # /tmp/utils-checkPGs.py rbd\n  Checking PGs in pool rbd ... Passed\n\nWith host as the failure domain, quite possibly, some of the PGs might\nhave OSDs allocated from different hosts that are located in the same\nrack.  For example, one PG might have OSD numbers [1, 8, 13]. OSDs 1 and 8\nare found on hosts located in rack1.  When rack1 suffers a catastrophe\nfailure, PGs with OSDs allocated from the hosts in rack1 would be severely\ndegraded.\n\nRack\n----\n\nChoosing rack as the failure domain provides better protection against data\nloss.\n\nTo prevent PGs with OSDs allocated from hosts that are located in the same\nrack, configure the CRUSH hierarchy with the rack buckets.  In each rack\nbucket, it contains the hosts that reside in the same physical rack.  A\nCRUSH Rule is configured with rack as the failure domain.\n\nIn the following hierarchical topology, the Ceph cluster was configured with\nthree rack buckets.  Each bucket has two hosts.  In pools that were created\nwith the CRUSH rule set to rack, the OSDs in all the PGs are allocated from\nthe distinct rack.\n\n::\n\n  root=default\n  ├── rack1\n  │   ├── host1\n  │   │   ├── osd.1\n  │   │   ├── osd.2\n  │   │   ├── osd.3\n  │   │   └── osd.4\n  │   └── host2\n  │       ├── osd.5\n  │       ├── osd.6\n  │       ├── osd.7\n  │       └── osd.8\n  ├── rack2\n  │   ├── host3\n  │   │   ├── osd.9\n  │   │   ├── osd.10\n  │   │   ├── osd.11\n  │   │   └── osd.12\n  │   └── host4\n  │       ├── osd.13\n  │       ├── osd.14\n  │       ├── osd.15\n  │       └── osd.16\n  └── rack3\n      ├── host5\n      │   ├── osd.17\n      │   ├── osd.18\n      │   ├── osd.19\n      │   └── osd.20\n      └── host6\n          ├── osd.21\n          ├── osd.22\n          ├── osd.23\n          └── osd.24\n\nVerify the Ceph cluster has a CRUSH rule with rack as the failure domain.\n\n::\n\n  # ceph osd crush rule ls\n  rack_replicated_rule\n  # ceph osd crush rule dump rack_replicated_rule\n  {\n      \"rule_id\": 2,\n      \"rule_name\": \"rack_replicated_rule\",\n      \"ruleset\": 2,\n      \"type\": 1,\n      \"min_size\": 1,\n      \"max_size\": 10,\n      \"steps\": [\n          {\n              \"op\": \"take\",\n              \"item\": -1,\n              \"item_name\": \"default\"\n          },\n          {\n              \"op\": \"chooseleaf_firstn\",\n              \"num\": 0,\n              \"type\": \"rack\"\n          },\n          {\n              \"op\": \"emit\"\n          }\n      ]\n  }\n\nCreate a ceph pool with its CRUSH rule set to the rack's rule.\n\n::\n\n  # ceph osd pool create rbd 2048 2048 replicated rack_replicated_rule\n  pool 'rbd' created\n  # ceph osd pool get rbd crush_rule\n  crush_rule: rack_replicated_rule\n  # ceph osd pool get rbd size\n  size: 3\n  # ceph osd pool get rbd pg_num\n  pg_num: 2048\n\nInvoke the **utils-checkPGs.py** script on the pool to verify that there are no PGs\nwith OSDs allocated from the same rack.  The offending PGs are printed to\nstdout.\n\n::\n\n  # /tmp/utils-checkPGs.py rbd\n  Checking PGs in pool rbd ... Passed\n\n\nCRUSH Map and Rule\n==================\n\nOn a properly configured Ceph cluster, there are different ways to view\nthe CRUSH hierarchy.\n\nceph CLI\n--------\n\nPrint to stdout the CRUSH hierarchy with the ceph CLI.\n\n::\n\n  root@host5:/# ceph osd crush tree\n  ID  CLASS WEIGHT   TYPE NAME\n   -1       78.47974 root default\n  -15       26.15991     rack rack1\n   -2       13.07996         host host1\n    0   hdd  1.09000             osd.0\n    1   hdd  1.09000             osd.1\n    2   hdd  1.09000             osd.2\n    3   hdd  1.09000             osd.3\n    4   hdd  1.09000             osd.4\n    5   hdd  1.09000             osd.5\n    6   hdd  1.09000             osd.6\n    7   hdd  1.09000             osd.7\n    8   hdd  1.09000             osd.8\n    9   hdd  1.09000             osd.9\n   10   hdd  1.09000             osd.10\n   11   hdd  1.09000             osd.11\n   -5       13.07996         host host2\n   12   hdd  1.09000             osd.12\n   13   hdd  1.09000             osd.13\n   14   hdd  1.09000             osd.14\n   15   hdd  1.09000             osd.15\n   16   hdd  1.09000             osd.16\n   17   hdd  1.09000             osd.17\n   18   hdd  1.09000             osd.18\n   19   hdd  1.09000             osd.19\n   20   hdd  1.09000             osd.20\n   21   hdd  1.09000             osd.21\n   22   hdd  1.09000             osd.22\n   23   hdd  1.09000             osd.23\n  -16       26.15991     rack rack2\n  -13       13.07996         host host3\n   53   hdd  1.09000             osd.53\n   54   hdd  1.09000             osd.54\n   58   hdd  1.09000             osd.58\n   59   hdd  1.09000             osd.59\n   64   hdd  1.09000             osd.64\n   65   hdd  1.09000             osd.65\n   66   hdd  1.09000             osd.66\n   67   hdd  1.09000             osd.67\n   68   hdd  1.09000             osd.68\n   69   hdd  1.09000             osd.69\n   70   hdd  1.09000             osd.70\n   71   hdd  1.09000             osd.71\n   -9       13.07996         host host4\n   36   hdd  1.09000             osd.36\n   37   hdd  1.09000             osd.37\n   38   hdd  1.09000             osd.38\n   39   hdd  1.09000             osd.39\n   40   hdd  1.09000             osd.40\n   41   hdd  1.09000             osd.41\n   42   hdd  1.09000             osd.42\n   43   hdd  1.09000             osd.43\n   44   hdd  1.09000             osd.44\n   45   hdd  1.09000             osd.45\n   46   hdd  1.09000             osd.46\n   47   hdd  1.09000             osd.47\n  -17       26.15991     rack rack3\n  -11       13.07996         host host5\n   48   hdd  1.09000             osd.48\n   49   hdd  1.09000             osd.49\n   50   hdd  1.09000             osd.50\n   51   hdd  1.09000             osd.51\n   52   hdd  1.09000             osd.52\n   55   hdd  1.09000             osd.55\n   56   hdd  1.09000             osd.56\n   57   hdd  1.09000             osd.57\n   60   hdd  1.09000             osd.60\n   61   hdd  1.09000             osd.61\n   62   hdd  1.09000             osd.62\n   63   hdd  1.09000             osd.63\n   -7       13.07996         host host6\n   24   hdd  1.09000             osd.24\n   25   hdd  1.09000             osd.25\n   26   hdd  1.09000             osd.26\n   27   hdd  1.09000             osd.27\n   28   hdd  1.09000             osd.28\n   29   hdd  1.09000             osd.29\n   30   hdd  1.09000             osd.30\n   31   hdd  1.09000             osd.31\n   32   hdd  1.09000             osd.32\n   33   hdd  1.09000             osd.33\n   34   hdd  1.09000             osd.34\n   35   hdd  1.09000             osd.35\n  root@host5:/#\n\nTo see weight and affinity of each OSD.\n\n::\n\n  root@host5:/# ceph osd tree\n  ID  CLASS WEIGHT   TYPE NAME                 STATUS REWEIGHT PRI-AFF\n   -1       78.47974 root default\n  -15       26.15991     rack rack1\n   -2       13.07996         host host1\n    0   hdd  1.09000             osd.0             up  1.00000 1.00000\n    1   hdd  1.09000             osd.1             up  1.00000 1.00000\n    2   hdd  1.09000             osd.2             up  1.00000 1.00000\n    3   hdd  1.09000             osd.3             up  1.00000 1.00000\n    4   hdd  1.09000             osd.4             up  1.00000 1.00000\n    5   hdd  1.09000             osd.5             up  1.00000 1.00000\n    6   hdd  1.09000             osd.6             up  1.00000 1.00000\n    7   hdd  1.09000             osd.7             up  1.00000 1.00000\n    8   hdd  1.09000             osd.8             up  1.00000 1.00000\n    9   hdd  1.09000             osd.9             up  1.00000 1.00000\n   10   hdd  1.09000             osd.10            up  1.00000 1.00000\n   11   hdd  1.09000             osd.11            up  1.00000 1.00000\n   -5       13.07996         host host2\n   12   hdd  1.09000             osd.12            up  1.00000 1.00000\n   13   hdd  1.09000             osd.13            up  1.00000 1.00000\n   14   hdd  1.09000             osd.14            up  1.00000 1.00000\n   15   hdd  1.09000             osd.15            up  1.00000 1.00000\n   16   hdd  1.09000             osd.16            up  1.00000 1.00000\n   17   hdd  1.09000             osd.17            up  1.00000 1.00000\n   18   hdd  1.09000             osd.18            up  1.00000 1.00000\n   19   hdd  1.09000             osd.19            up  1.00000 1.00000\n   20   hdd  1.09000             osd.20            up  1.00000 1.00000\n   21   hdd  1.09000             osd.21            up  1.00000 1.00000\n   22   hdd  1.09000             osd.22            up  1.00000 1.00000\n   23   hdd  1.09000             osd.23            up  1.00000 1.00000\n\n\n\ncrushtool CLI\n-------------\n\nTo extract the CRUSH Map from a running cluster and convert it into ascii text.\n\n::\n\n  # ceph osd getcrushmap -o /tmp/cm.bin\n  100\n  # crushtool -d /tmp/cm.bin -o /tmp/cm.rack.ascii\n  # cat /tmp/cm.rack.ascii\n  . . .\n  # buckets\n  host host1 {\n        id -2           # do not change unnecessarily\n        id -3 class hdd         # do not change unnecessarily\n        # weight 13.080\n        alg straw2\n        hash 0  # rjenkins1\n        item osd.0 weight 1.090\n        item osd.1 weight 1.090\n        item osd.2 weight 1.090\n        item osd.3 weight 1.090\n        item osd.4 weight 1.090\n        item osd.5 weight 1.090\n        item osd.6 weight 1.090\n        item osd.7 weight 1.090\n        item osd.8 weight 1.090\n        item osd.9 weight 1.090\n        item osd.10 weight 1.090\n        item osd.11 weight 1.090\n  }\n  host host2 {\n        id -5           # do not change unnecessarily\n        id -6 class hdd         # do not change unnecessarily\n        # weight 13.080\n        alg straw2\n        hash 0  # rjenkins1\n        item osd.12 weight 1.090\n        item osd.13 weight 1.090\n        item osd.14 weight 1.090\n        item osd.15 weight 1.090\n        item osd.16 weight 1.090\n        item osd.18 weight 1.090\n        item osd.19 weight 1.090\n        item osd.17 weight 1.090\n        item osd.20 weight 1.090\n        item osd.21 weight 1.090\n        item osd.22 weight 1.090\n        item osd.23 weight 1.090\n  }\n  rack rack1 {\n        id -15          # do not change unnecessarily\n        id -20 class hdd        # do not change unnecessarily\n        # weight 26.160\n        alg straw2\n        hash 0  # rjenkins1\n        item host1 weight 13.080\n        item host2 weight 13.080\n  }\n  . . .\n  root default {\n        id -1          # do not change unnecessarily\n        id -4 class hdd        # do not change unnecessarily\n        # weight 78.480\n        alg straw2\n        hash 0  # rjenkins1\n        item rack1 weight 26.160\n        item rack2 weight 26.160\n        item rack3 weight 26.160\n  }\n\n  # rules\n  rule replicated_rack {\n        id 2\n        type replicated\n        min_size 1\n        max_size 10\n        step take default\n        step chooseleaf firstn 0 type rack\n        step emit\n  }\n  # end crush map\n\nThe **utils-checkPGs.py** script can read the same data from memory and construct\nthe failure domains with OSDs.  Verify the OSDs in each PG against the\nconstructed failure domains.\n\nConfigure the Failure Domain in CRUSH Map\n=========================================\n\nThe Ceph ceph-osd, ceph-client and cinder charts accept configuration parameters to set the Failure Domain for CRUSH.\nThe options available are **failure_domain**, **failure_domain_by_hostname**, **failure_domain_name** and **crush_rule**\n\n::\n\n ceph-osd specific overrides\n failure_domain: Set the CRUSH bucket type for your OSD to reside in. (DEFAULT: \"host\")\n failure_domain_by_hostname: Specify the portion of the hostname to use for your failure domain bucket name. (DEFAULT: \"false\")\n failure_domain_name: Manually name the failure domain bucket name. This configuration option should only be used when using host based overrides. (DEFAULT: \"false\")\n\n::\n\n ceph-client and cinder specific overrides\n crush_rule**: Set the crush rule for a pool (DEFAULT: \"replicated_rule\")\n\nAn example of a lab enviroment had the following paramters set for the ceph yaml override file to apply a rack level failure domain within CRUSH.\n\n::\n\n  endpoints:\n    identity:\n      namespace: openstack\n    object_store:\n      namespace: ceph\n    ceph_mon:\n      namespace: ceph\n  network:\n    public: 10.0.0.0/24\n    cluster: 10.0.0.0/24\n  deployment:\n    storage_secrets: true\n    ceph: true\n    csi_rbd_provisioner: true\n    rbd_provisioner: false\n    cephfs_provisioner: false\n    client_secrets: false\n    rgw_keystone_user_and_endpoints: false\n  bootstrap:\n    enabled: true\n  conf:\n    ceph:\n      global:\n        fsid: 6c12a986-148d-45a7-9120-0cf0522ca5e0\n    rgw_ks:\n      enabled: true\n    pool:\n      default:\n        crush_rule: rack_replicated_rule\n      crush:\n        tunables: null\n      target:\n        # NOTE(portdirect): 5 nodes, with one osd per node\n        osd: 18\n        pg_per_osd: 100\n    storage:\n      osd:\n        - data:\n            type: block-logical\n            location: /dev/vdb\n          journal:\n            type: block-logical\n            location: /dev/vde1\n        - data:\n            type: block-logical\n            location: /dev/vdc\n          journal:\n            type: block-logical\n            location: /dev/vde2\n        - data:\n            type: block-logical\n            location: /dev/vdd\n          journal:\n            type: block-logical\n            location: /dev/vde3\n    overrides:\n      ceph_osd:\n        hosts:\n          - name: osh-1\n            conf:\n              storage:\n                failure_domain: \"rack\"\n                failure_domain_name: \"rack1\"\n          - name: osh-2\n            conf:\n              storage:\n                failure_domain: \"rack\"\n                failure_domain_name: \"rack1\"\n          - name: osh-3\n            conf:\n              storage:\n                failure_domain: \"rack\"\n                failure_domain_name: \"rack2\"\n          - name: osh-4\n            conf:\n              storage:\n                failure_domain: \"rack\"\n                failure_domain_name: \"rack2\"\n          - name: osh-5\n            conf:\n              storage:\n                failure_domain: \"rack\"\n                failure_domain_name: \"rack3\"\n          - name: osh-6\n            conf:\n              storage:\n                failure_domain: \"rack\"\n                failure_domain_name: \"rack3\"\n\n.. NOTE::\n\n   Note that the cinder chart will need an override configured to ensure the cinder pools in Ceph are using the correct **crush_rule**.\n\n::\n\n  pod:\n    replicas:\n      api: 2\n      volume: 1\n      scheduler: 1\n      backup: 1\n  conf:\n    cinder:\n      DEFAULT:\n        backup_driver: cinder.backup.drivers.swift\n    ceph:\n      pools:\n        backup:\n          replicated: 3\n          crush_rule: rack_replicated_rule\n          chunk_size: 8\n        volume:\n          replicated: 3\n          crush_rule: rack_replicated_rule\n          chunk_size: 8\n\nThe charts can be updated with these overrides pre or post deployment. If this is a post deployment change then the following steps will apply for a gate based openstack-helm deployment.\n\n::\n\n  cd /opt/openstack-helm\n  helm upgrade --install ceph-osd ../openstack-helm/ceph-osd --namespace=ceph --values=/tmp/ceph.yaml\n  kubectl delete jobs/ceph-rbd-pool -n ceph\n  helm upgrade --install ceph-client ../openstack-helm/ceph-client --namespace=ceph --values=/tmp/ceph.yaml\n  helm delete cinder --purge\n  helm upgrade --install cinder ./cinder --namespace=openstack --values=/tmp/cinder.yaml\n\n.. NOTE::\n\n  There will be a brief interuption of I/O and a data movement of placement groups in Ceph while these changes are\n  applied. The data movement operation can take several minutes to several days to complete.\n\nThe utils-checkPGs.py Script\n============================\n\nThe purpose of the **utils-checkPGs.py** script is to check whether a PG has OSDs\nallocated from the same failure domain.  The violating PGs with their\nrespective OSDs are printed to the stdout.\n\nIn this example, a pool was created with the CRUSH rule set to the host\nfailure domain.  The ceph cluster was configured with the rack\nbuckets.  The CRUSH algorithm allocated the OSDs from different hosts\nin each PG.  The rack buckets were ignored and thus the duplicate\nracks which get reported by the script.\n\n::\n\n  root@host5:/# /tmp/utils-checkPGs.py cmTestPool\n  Checking PGs in pool cmTestPool ... Failed\n  OSDs [44, 32, 53] in PG 20.a failed check in rack [u'rack2', u'rack2', u'rack2']\n  OSDs [61, 5, 12] in PG 20.19 failed check in rack [u'rack1', u'rack1', u'rack1']\n  OSDs [69, 9, 15] in PG 20.2a failed check in rack [u'rack1', u'rack1', u'rack1']\n  . . .\n\n\n.. NOTE::\n\n  The **utils-checkPGs.py** utility is executed on-demand.  It is intended to be executed on one of the ceph-mon pods.\n\nIf the **utils-checkPGs.py** script did not find any violation, it prints\nPassed.  In this example, the ceph cluster was configured with the rack\nbuckets.  The rbd pool was created with its CRUSH rule set to the\nrack.  The **utils-checkPGs.py** script did not find duplicate racks in PGs.\n\n::\n\n  root@host5:/# /tmp/utils-checkPGs.py rbd\n  Checking PGs in pool rbd ... Passed\n\nInvoke the **utils-checkPGs.py** script with the --help option to get the\nscript's usage.\n\n::\n\n  root@host5:/# /tmp/utils-checkPGs.py --help\n  usage: utils-checkPGs.py [-h] PoolName [PoolName ...]\n\n  Cross-check the OSDs assigned to the Placement Groups (PGs) of a ceph pool\n  with the CRUSH topology.  The cross-check compares the OSDs in a PG and\n  verifies the OSDs reside in separate failure domains.  PGs with OSDs in\n  the same failure domain are flagged as violation.  The offending PGs are\n  printed to stdout.\n\n  This CLI is executed on-demand on a ceph-mon pod.  To invoke the CLI, you\n  can specify one pool or list of pools to check.  The special pool name\n  All (or all) checks all the pools in the ceph cluster.\n\n  positional arguments:\n    PoolName    List of pools (or All) to validate the PGs and OSDs mapping\n\n  optional arguments:\n    -h, --help  show this help message and exit\n  root@host5:/#\n\n\nThe source for the **utils-checkPGs.py** script is available\nat **openstack-helm/ceph-mon/templates/bin/utils/_checkPGs.py.tpl**.\n\nCeph Deployments\n================\n\nThrough testing and verification, you derive at a CRUSH Map with the buckets\nthat are deemed beneficial to your ceph cluster.  Standardize on the verified\nCRUSH map to have the consistency in all the Ceph deployments across the\ndata centers.\n\nMimicking the hierarchy in your CRUSH Map with the physical hardware setup\nshould provide the needed information on the topology layout.  With the\nracks layout, each rack can store a replica of your data.\n\nTo validate a ceph cluster with the number of replica that is based on\nthe number of racks:\n\n#. The number of physical racks and the number of replicas are 3, respectively.  Create a ceph pool with replica set to 3 and pg_num set to (# of OSDs * 50) / 3 and round the number to the next power-of-2.  For example, if the calculation is 240, round it to 256.  Assuming the pool you just created had 256 PGs.  In each PG, verify the OSDs are chosen from the three racks, respectively.  Use the **utils-checkPGs.py** script to verify the OSDs in all the PGs of the pool.\n\n#. The number of physical racks is 2 and the number of replica is 3.  Create a ceph pool as described in the previous step.  In the pool you created, in each PG, verify two of the OSDs are chosen from the two racks, respectively.  The third OSD can come from one of the two racks but not from the same hosts as the other two OSDs.\n\nData Movement\n=============\n\nChanges to the CRUSH Map always trigger data movement.  It is prudent that\nyou plan accordingly when restructuring the CRUSH Map.  Once started, the\nCRUSH Map restructuring runs to completion and can neither be stopped nor\nsuspended.  On a busy Ceph cluster with live transactions, it is always\nsafer to use divide-and-conquer approach to complete small chunk of works\nin multiple sessions.\n\nWatch the progress of the data movement while the Ceph cluster re-balances\nitself.\n\n::\n\n  # watch ceph status\n    cluster:\n      id:     07c31d0f-bcc6-4db4-aadf-2d2a0f13edb8\n      health: HEALTH_WARN\n              137084/325509 objects misplaced (42.114%)\n              Degraded data redundancy: 28/325509 objects degraded (0.009%), 15 pgs degraded\n\n    services:\n      mon: 5 daemons, quorum host1,host2,host3,host4,host5\n      mgr: host6(active), standbys: host1\n      mds: cephfs-1/1/1 up  {0=mds-ceph-mds-7cb4f57cc-prh87=up:active}, 1 up:standby\n      osd: 72 osds: 72 up, 72 in; 815 remapped pgs\n      rgw: 2 daemons active\n\n    data:\n      pools:   19 pools, 2920 pgs\n      objects: 105k objects, 408 GB\n      usage:   1609 GB used, 78819 GB / 80428 GB avail\n      pgs:     28/325509 objects degraded (0.009%)\n               137084/325509 objects misplaced (42.114%)\n               2085 active+clean\n               790  active+remapped+backfill_wait\n               22   active+remapped+backfilling\n               15   active+recovery_wait+degraded\n               4    active+recovery_wait+remapped\n               4    active+recovery_wait\n\n    io:\n      client:   11934 B/s rd, 3731 MB/s wr, 2 op/s rd, 228 kop/s wr\n      recovery: 636 MB/s, 163 objects/s\n\nAt the time this **ceph status** command was executed, the status's output\nshowed that the ceph cluster was going through re-balancing.  Among the\noverall 2920 pgs, 2085 of them are in **active+clean** state.  The\nremaining pgs are either being remapped or recovered.  As the ceph\ncluster continues its re-balance, the number of pgs\nin **active+clean** increases.\n\n::\n\n  # ceph status\n    cluster:\n      id:     07c31d0f-bcc6-4db4-aadf-2d2a0f13edb8\n      health: HEALTH_OK\n\n    services:\n      mon: 5 daemons, quorum host1,host2,host3,host4,host5\n      mgr: host6(active), standbys: host1\n      mds: cephfs-1/1/1 up  {0=mds-ceph-mds-7cc55c9695-lj22d=up:active}, 1 up:standby\n      osd: 72 osds: 72 up, 72 in\n      rgw: 2 daemons active\n\n    data:\n      pools:   19 pools, 2920 pgs\n      objects: 134k objects, 519 GB\n      usage:   1933 GB used, 78494 GB / 80428 GB avail\n      pgs:     2920 active+clean\n\n    io:\n      client:   1179 B/s rd, 971 kB/s wr, 1 op/s rd, 41 op/s wr\n\nWhen the overall number of pgs is equal to the number\nof **active+clean** pgs, the health of the ceph cluster changes\nto **HEALTH_OK** (assuming there are no other warning conditions).\n"
  },
  {
    "path": "doc/source/testing/ceph-resiliency/host-failure.rst",
    "content": "============\nHost Failure\n============\n\nTest Environment\n================\n\n- Cluster size: 4 host machines\n- Number of disks: 24 (= 6 disks per host * 4 hosts)\n- Kubernetes version: 1.10.5\n- Ceph version: 12.2.3\n- OpenStack-Helm commit: 25e50a34c66d5db7604746f4d2e12acbdd6c1459\n\nCase: One host machine where ceph-mon is running is rebooted\n============================================================\n\nSymptom:\n--------\n\nAfter reboot (node voyager3), the node status changes to ``NotReady``.\n\n.. code-block:: console\n\n  $ kubectl get nodes\n  NAME       STATUS     ROLES     AGE       VERSION\n  voyager1   Ready      master    6d        v1.10.5\n  voyager2   Ready      <none>    6d        v1.10.5\n  voyager3   NotReady   <none>    6d        v1.10.5\n  voyager4   Ready      <none>    6d        v1.10.5\n\nCeph status shows that ceph-mon running on ``voyager3`` becomes out of quorum.\nAlso, six osds running on ``voyager3`` are down; i.e., 18 osds are up out of 24 osds.\n\n.. code-block:: console\n\n  (mon-pod):/# ceph -s\n    cluster:\n      id:     9d4d8c61-cf87-4129-9cef-8fbf301210ad\n      health: HEALTH_WARN\n              6 osds down\n              1 host (6 osds) down\n              Degraded data redundancy: 195/624 objects degraded (31.250%), 8 pgs degraded\n              too few PGs per OSD (17 < min 30)\n              mon voyager1 is low on available space\n              1/3 mons down, quorum voyager1,voyager2\n\n    services:\n      mon: 3 daemons, quorum voyager1,voyager2, out of quorum: voyager3\n      mgr: voyager1(active), standbys: voyager3\n      mds: cephfs-1/1/1 up  {0=mds-ceph-mds-65bb45dffc-cslr6=up:active}, 1 up:standby\n      osd: 24 osds: 18 up, 24 in\n      rgw: 2 daemons active\n\n    data:\n      pools:   18 pools, 182 pgs\n      objects: 208 objects, 3359 bytes\n      usage:   2630 MB used, 44675 GB / 44678 GB avail\n      pgs:     195/624 objects degraded (31.250%)\n               126 active+undersized\n               48  active+clean\n               8   active+undersized+degraded\n\nRecovery:\n---------\nThe node status of ``voyager3`` changes to ``Ready`` after the node is up again.\nAlso, Ceph pods are restarted automatically.\nCeph status shows that the monitor running on ``voyager3`` is now in quorum.\n\n.. code-block:: console\n\n  $ kubectl get nodes\n  NAME       STATUS    ROLES     AGE       VERSION\n  voyager1   Ready     master    6d        v1.10.5\n  voyager2   Ready     <none>    6d        v1.10.5\n  voyager3   Ready     <none>    6d        v1.10.5\n  voyager4   Ready     <none>    6d        v1.10.5\n\n.. code-block:: console\n\n  (mon-pod):/# ceph -s\n    cluster:\n      id:     9d4d8c61-cf87-4129-9cef-8fbf301210ad\n      health: HEALTH_WARN\n              too few PGs per OSD (22 < min 30)\n              mon voyager1 is low on available space\n\n    services:\n      mon: 3 daemons, quorum voyager1,voyager2,voyager3\n      mgr: voyager1(active), standbys: voyager3\n      mds: cephfs-1/1/1 up  {0=mds-ceph-mds-65bb45dffc-cslr6=up:active}, 1 up:standby\n      osd: 24 osds: 24 up, 24 in\n      rgw: 2 daemons active\n\n    data:\n      pools:   18 pools, 182 pgs\n      objects: 208 objects, 3359 bytes\n      usage:   2635 MB used, 44675 GB / 44678 GB avail\n      pgs:     182 active+clean\n\nCase: A host machine where ceph-mon is running is down\n======================================================\n\nThis is for the case when a host machine (where ceph-mon is running) is down.\n\nSymptom:\n--------\n\nAfter the host is down (node voyager3), the node status changes to ``NotReady``.\n\n.. code-block:: console\n\n  $ kubectl get nodes\n  NAME       STATUS     ROLES     AGE       VERSION\n  voyager1   Ready      master    14d       v1.10.5\n  voyager2   Ready      <none>    14d       v1.10.5\n  voyager3   NotReady   <none>    14d       v1.10.5\n  voyager4   Ready      <none>    14d       v1.10.5\n\nCeph status shows that ceph-mon running on ``voyager3`` becomes out of quorum.\nAlso, 6 osds running on ``voyager3`` are down (i.e., 18 out of 24 osds are up).\nSome placement groups become degraded and undersized.\n\n.. code-block:: console\n\n  (mon-pod):/# ceph -s\n    cluster:\n        id:     9d4d8c61-cf87-4129-9cef-8fbf301210ad\n        health: HEALTH_WARN\n                6 osds down\n                1 host (6 osds) down\n                Degraded data redundancy: 227/720 objects degraded (31.528%), 8 pgs\n    degraded\n                too few PGs per OSD (17 < min 30)\n                mon voyager1 is low on available space\n                1/3 mons down, quorum voyager1,voyager2\n\n      services:\n        mon: 3 daemons, quorum voyager1,voyager2, out of quorum: voyager3\n        mgr: voyager1(active), standbys: voyager3\n        mds: cephfs-1/1/1 up  {0=mds-ceph-mds-65bb45dffc-cslr6=up:active}, 1 up:stan\n    dby\n        osd: 24 osds: 18 up, 24 in\n        rgw: 2 daemons active\n\n      data:\n        pools:   18 pools, 182 pgs\n        objects: 240 objects, 3359 bytes\n        usage:   2695 MB used, 44675 GB / 44678 GB avail\n        pgs:     227/720 objects degraded (31.528%)\n                 126 active+undersized\n                 48  active+clean\n                 8   active+undersized+degraded\n\nThe pod status of ceph-mon and ceph-osd shows as ``NodeLost``.\n\n.. code-block:: console\n\n  $ kubectl get pods -n ceph -o wide|grep voyager3\n  ceph-mgr-55f68d44b8-hncrq                  1/1       Unknown     6          8d        135.207.240.43   voyager3\n  ceph-mon-6bbs6                             1/1       NodeLost    8          8d        135.207.240.43   voyager3\n  ceph-osd-default-64779b8c-lbkcd            1/1       NodeLost    1          6d        135.207.240.43   voyager3\n  ceph-osd-default-6ea9de2c-gp7zm            1/1       NodeLost    2          8d        135.207.240.43   voyager3\n  ceph-osd-default-7544b6da-7mfdc            1/1       NodeLost    2          8d        135.207.240.43   voyager3\n  ceph-osd-default-7cfc44c1-hhk8v            1/1       NodeLost    2          8d        135.207.240.43   voyager3\n  ceph-osd-default-83945928-b95qs            1/1       NodeLost    2          8d        135.207.240.43   voyager3\n  ceph-osd-default-f9249fa9-n7p4v            1/1       NodeLost    3          8d        135.207.240.43   voyager3\n\nAfter 10+ miniutes, Ceph starts rebalancing with one node lost (i.e., 6 osds down)\nand the status stablizes with 18 osds.\n\n.. code-block:: console\n\n  (mon-pod):/# ceph -s\n    cluster:\n      id:     9d4d8c61-cf87-4129-9cef-8fbf301210ad\n      health: HEALTH_WARN\n              mon voyager1 is low on available space\n              1/3 mons down, quorum voyager1,voyager2\n\n    services:\n      mon: 3 daemons, quorum voyager1,voyager2, out of quorum: voyager3\n      mgr: voyager1(active), standbys: voyager2\n      mds: cephfs-1/1/1 up  {0=mds-ceph-mds-65bb45dffc-cslr6=up:active}, 1 up:standby\n      osd: 24 osds: 18 up, 18 in\n      rgw: 2 daemons active\n\n    data:\n      pools:   18 pools, 182 pgs\n      objects: 240 objects, 3359 bytes\n      usage:   2025 MB used, 33506 GB / 33508 GB avail\n      pgs:     182 active+clean\n\n\nRecovery:\n---------\n\nThe node status of ``voyager3`` changes to ``Ready`` after the node is up again.\nAlso, Ceph pods are restarted automatically.\nThe Ceph status shows that the monitor running on ``voyager3`` is now in quorum\nand 6 osds gets back up (i.e., a total of 24 osds are up).\n\n.. code-block:: console\n\n  (mon-pod):/# ceph -s\n    cluster:\n      id:     9d4d8c61-cf87-4129-9cef-8fbf301210ad\n      health: HEALTH_WARN\n              too few PGs per OSD (22 < min 30)\n              mon voyager1 is low on available space\n\n    services:\n      mon: 3 daemons, quorum voyager1,voyager2,voyager3\n      mgr: voyager1(active), standbys: voyager2\n      mds: cephfs-1/1/1 up  {0=mds-ceph-mds-65bb45dffc-cslr6=up:active}, 1 up:standby\n      osd: 24 osds: 24 up, 24 in\n      rgw: 2 daemons active\n\n    data:\n      pools:   18 pools, 182 pgs\n      objects: 240 objects, 3359 bytes\n      usage:   2699 MB used, 44675 GB / 44678 GB avail\n      pgs:     182 active+clean\n\nAlso, the pod status of ceph-mon and ceph-osd changes from ``NodeLost`` back to ``Running``.\n\n.. code-block:: console\n\n  $ kubectl get pods -n ceph -o wide|grep voyager3\n  ceph-mon-6bbs6                             1/1       Running     9          8d        135.207.240.43   voyager3\n  ceph-osd-default-64779b8c-lbkcd            1/1       Running     2          7d        135.207.240.43   voyager3\n  ceph-osd-default-6ea9de2c-gp7zm            1/1       Running     3          8d        135.207.240.43   voyager3\n  ceph-osd-default-7544b6da-7mfdc            1/1       Running     3          8d        135.207.240.43   voyager3\n  ceph-osd-default-7cfc44c1-hhk8v            1/1       Running     3          8d        135.207.240.43   voyager3\n  ceph-osd-default-83945928-b95qs            1/1       Running     3          8d        135.207.240.43   voyager3\n  ceph-osd-default-f9249fa9-n7p4v            1/1       Running     4          8d        135.207.240.43   voyager3\n"
  },
  {
    "path": "doc/source/testing/ceph-resiliency/index.rst",
    "content": "===============\nCeph Resiliency\n===============\n\n.. toctree::\n   :maxdepth: 2\n\n   README\n   monitor-failure\n   osd-failure\n   disk-failure\n   host-failure\n   failure-domain\n   validate-object-replication\n   namespace-deletion\n"
  },
  {
    "path": "doc/source/testing/ceph-resiliency/monitor-failure.rst",
    "content": "===============\nMonitor Failure\n===============\n\nTest Environment\n================\n\n- Cluster size: 4 host machines\n- Number of disks: 24 (= 6 disks per host * 4 hosts)\n- Kubernetes version: 1.9.3\n- Ceph version: 12.2.3\n- OpenStack-Helm commit: 28734352741bae228a4ea4f40bcacc33764221eb\n\nWe have 3 Monitors in this Ceph cluster, one on each of the 3 Monitor\nhosts.\n\nCase: 1 out of 3 Monitor Processes is Down\n==========================================\n\nThis is to test a scenario when 1 out of 3 Monitor processes is down.\n\nTo bring down 1 Monitor process (out of 3), we identify a Monitor\nprocess and kill it from the monitor host (not a pod).\n\n.. code-block:: console\n\n  $ ps -ef | grep ceph-mon\n  ceph     16112 16095  1 14:58 ?        00:00:03 /usr/bin/ceph-mon --cluster ceph --setuser ceph --setgroup ceph -d -i voyager2 --mon-data /var/lib/ceph/mon/ceph-voyager2 --public-addr 135.207.240.42:6789\n  $ sudo kill -9 16112\n\nIn the mean time, we monitored the status of Ceph and noted that it\ntakes about 24 seconds for the killed Monitor process to recover from\n``down`` to ``up``. The reason is that Kubernetes automatically\nrestarts pods whenever they are killed.\n\n.. code-block:: console\n\n  (mon-pod):/# ceph -s\n    cluster:\n      id:     fd366aef-b356-4fe7-9ca5-1c313fe2e324\n      health: HEALTH_WARN\n              mon voyager1 is low on available space\n              1/3 mons down, quorum voyager1,voyager3\n\n    services:\n      mon: 3 daemons, quorum voyager1,voyager3, out of quorum: voyager2\n      mgr: voyager4(active)\n      osd: 24 osds: 24 up, 24 in\n\n.. code-block:: console\n\n  (mon-pod):/# ceph -s\n    cluster:\n      id:     fd366aef-b356-4fe7-9ca5-1c313fe2e324\n      health: HEALTH_WARN\n              mon voyager1 is low on available space\n              1/3 mons down, quorum voyager1,voyager2\n\n    services:\n      mon: 3 daemons, quorum voyager1,voyager2,voyager3\n      mgr: voyager4(active)\n      osd: 24 osds: 24 up, 24 in\n\nWe also monitored the status of the Monitor pod through ``kubectl get\npods -n ceph``, and the status of the pod (where a Monitor process is\nkilled) changed as follows: ``Running`` -> ``Error`` -> ``Running``\nand this recovery process takes about 24 seconds.\n\nCase: 2 out of 3 Monitor Processes are Down\n===========================================\n\nThis is to test a scenario when 2 out of 3 Monitor processes are down.\nTo bring down 2 Monitor processes (out of 3), we identify two Monitor\nprocesses and kill them from the 2 monitor hosts (not a pod).\n\nWe monitored the status of Ceph when the Monitor processes are killed\nand noted that the symptoms are similar to when 1 Monitor process is\nkilled:\n\n- It takes longer (about 1 minute) for the killed Monitor processes to\n  recover from ``down`` to ``up``.\n\n- The status of the pods (where the two Monitor processes are killed)\n  changed as follows: ``Running`` -> ``Error`` -> ``CrashLoopBackOff``\n  -> ``Running`` and this recovery process takes about 1 minute.\n\n\nCase: 3 out of 3 Monitor Processes are Down\n===========================================\n\nThis is to test a scenario when 3 out of 3 Monitor processes are down.\nTo bring down 3 Monitor processes (out of 3), we identify all 3\nMonitor processes and kill them from the 3 monitor hosts (not pods).\n\nWe monitored the status of Ceph Monitor pods and noted that the\nsymptoms are similar to when 1 or 2 Monitor processes are killed:\n\n.. code-block:: console\n\n  $ kubectl get pods -n ceph -o wide | grep ceph-mon\n  NAME                                       READY     STATUS    RESTARTS   AGE\n  ceph-mon-8tml7                             0/1       Error     4          10d\n  ceph-mon-kstf8                             0/1       Error     4          10d\n  ceph-mon-z4sl9                             0/1       Error     7          10d\n\n.. code-block:: console\n\n  $ kubectl get pods -n ceph -o wide | grep ceph-mon\n  NAME                                       READY     STATUS               RESTARTS   AGE\n  ceph-mon-8tml7                             0/1       CrashLoopBackOff     4          10d\n  ceph-mon-kstf8                             0/1       Error                4          10d\n  ceph-mon-z4sl9                             0/1       CrashLoopBackOff     7          10d\n\n\n.. code-block:: console\n\n  $ kubectl get pods -n ceph -o wide | grep ceph-mon\n  NAME                                       READY     STATUS    RESTARTS   AGE\n  ceph-mon-8tml7                             1/1       Running   5          10d\n  ceph-mon-kstf8                             1/1       Running   5          10d\n  ceph-mon-z4sl9                             1/1       Running   8          10d\n\nThe status of the pods (where the three Monitor processes are killed)\nchanged as follows: ``Running`` -> ``Error`` -> ``CrashLoopBackOff``\n-> ``Running`` and this recovery process takes about 1 minute.\n\nCase: Monitor database is destroyed\n===================================\n\nWe intentionlly destroy a Monitor database by removing\n``/var/lib/openstack-helm/ceph/mon/mon/ceph-voyager3/store.db``.\n\nSymptom:\n--------\n\nA Ceph Monitor running on voyager3 (whose Monitor database is destroyed) becomes out of quorum,\nand the mon-pod's status stays in ``Running`` -> ``Error`` -> ``CrashLoopBackOff`` while keeps restarting.\n\n.. code-block:: console\n\n  (mon-pod):/# ceph -s\n    cluster:\n      id:     9d4d8c61-cf87-4129-9cef-8fbf301210ad\n      health: HEALTH_WARN\n              too few PGs per OSD (22 < min 30)\n              mon voyager1 is low on available space\n              1/3 mons down, quorum voyager1,voyager2\n\n    services:\n      mon: 3 daemons, quorum voyager1,voyager2, out of quorum: voyager3\n      mgr: voyager1(active), standbys: voyager3\n      mds: cephfs-1/1/1 up  {0=mds-ceph-mds-65bb45dffc-cslr6=up:active}, 1 up:standby\n      osd: 24 osds: 24 up, 24 in\n      rgw: 2 daemons active\n\n    data:\n      pools:   18 pools, 182 pgs\n      objects: 240 objects, 3359 bytes\n      usage:   2675 MB used, 44675 GB / 44678 GB avail\n      pgs:     182 active+clean\n\n.. code-block:: console\n\n  $ kubectl get pods -n ceph -o wide|grep ceph-mon\n  ceph-mon-4gzzw                             1/1       Running            0          6d        135.207.240.42    voyager2\n  ceph-mon-6bbs6                             0/1       CrashLoopBackOff   5          6d        135.207.240.43    voyager3\n  ceph-mon-qgc7p                             1/1       Running            0          6d        135.207.240.41    voyager1\n\nThe logs of the failed mon-pod shows the ceph-mon process cannot run as ``/var/lib/ceph/mon/ceph-voyager3/store.db`` does not exist.\n\n.. code-block:: console\n\n  $ kubectl logs ceph-mon-6bbs6 -n ceph\n  + ceph-mon --setuser ceph --setgroup ceph --cluster ceph -i voyager3 --inject-monmap /etc/ceph/monmap-ceph --keyring /etc/ceph/ceph.mon.keyring --mon-data /var/lib/ceph/mon/ceph-voyager3\n  2018-07-10 18:30:04.546200 7f4ca9ed4f00 -1 rocksdb: Invalid argument: /var/lib/ceph/mon/ceph-voyager3/store.db: does not exist (create_if_missing is false)\n  2018-07-10 18:30:04.546214 7f4ca9ed4f00 -1 error opening mon data directory at '/var/lib/ceph/mon/ceph-voyager3': (22) Invalid argument\n\nRecovery:\n---------\n\nRemove the entire ceph-mon directory on voyager3, and then Ceph will automatically\nrecreate the database by using the other ceph-mons' database.\n\n.. code-block:: console\n\n  $ sudo rm -rf /var/lib/openstack-helm/ceph/mon/mon/ceph-voyager3\n\n.. code-block:: console\n\n  (mon-pod):/# ceph -s\n    cluster:\n      id:     9d4d8c61-cf87-4129-9cef-8fbf301210ad\n      health: HEALTH_WARN\n              too few PGs per OSD (22 < min 30)\n              mon voyager1 is low on available space\n\n    services:\n      mon: 3 daemons, quorum voyager1,voyager2,voyager3\n      mgr: voyager1(active), standbys: voyager3\n      mds: cephfs-1/1/1 up  {0=mds-ceph-mds-65bb45dffc-cslr6=up:active}, 1 up:standby\n      osd: 24 osds: 24 up, 24 in\n      rgw: 2 daemons active\n\n    data:\n      pools:   18 pools, 182 pgs\n      objects: 240 objects, 3359 bytes\n      usage:   2675 MB used, 44675 GB / 44678 GB avail\n      pgs:     182 active+clean\n"
  },
  {
    "path": "doc/source/testing/ceph-resiliency/namespace-deletion.rst",
    "content": "===============================\n3.  Namespace deletion recovery\n===============================\n\nThis document captures steps to bring Ceph back up after deleting it's associated namespace.\n\n3.1  Setup\n==========\n\n.. note::\n   Follow OSH single node or multinode guide to bring up OSH envronment.\n\n3.2  Setup the OSH environment and check ceph  cluster health\n=============================================================\n\n.. note::\n   Ensure a healthy ceph cluster is running.\n\n.. code-block:: console\n\n   kubectl exec -n ceph ceph-mon-dtw6m -- ceph -s\n     cluster:\n       id:     fbaf9ce8-5408-4fce-9bfe-bf7fb938474c\n       health: HEALTH_OK\n\n     services:\n       mon: 5 daemons, quorum osh-1,osh-2,osh-5,osh-4,osh-3\n       mgr: osh-3(active), standbys: osh-4\n       mds: cephfs-1/1/1 up  {0=mds-ceph-mds-77dc68f476-jb5th=up:active}, 1 up:standby\n       osd: 15 osds: 15 up, 15 in\n\n     data:\n       pools:   18 pools, 182 pgs\n       objects: 21 objects, 2246 bytes\n       usage:   3025 MB used, 1496 GB / 1499 GB avail\n       pgs:     182 active+clean\n\n- Ceph cluster is in HEALTH_OK state with 5 MONs and 15 OSDs.\n\n3.3  Delete Ceph namespace\n==========================\n\n.. note::\n   Removing the namespace will delete all pods and secrets associated to Ceph.\n   !! DO NOT PROCEED WITH DELETING THE CEPH NAMESPACES ON A PRODUCTION ENVIRONMENT !!\n\n.. code-block:: console\n\n   CEPH_NAMESPACE=\"ceph\"\n   MON_POD=$(kubectl get pods --namespace=${CEPH_NAMESPACE} \\\n   --selector=\"application=ceph\" --selector=\"component=mon\" \\\n   --no-headers | awk '{ print $1; exit }')\n\n   kubectl exec --namespace=${CEPH_NAMESPACE} ${MON_POD} -- ceph status \\\n   | awk '/id:/{print $2}' | tee /tmp/ceph-fs-uuid.txt\n\n.. code-block:: console\n\n   kubectl delete namespace ${CEPH_NAMESPACE}\n\n.. code-block:: console\n\n   kubectl get pods --namespace ${CEPH_NAMESPACE} -o wide\n   No resources found.\n\n   kubectl get secrets --namespace ${CEPH_NAMESPACE}\n   No resources found.\n\n- Ceph namespace is currently deleted and all associated resources will be not found.\n\n3.4  Reinstall Ceph charts\n==========================\n\n.. note::\n   Instructions are specific to a multinode environment.\n   For AIO environments follow the development guide for reinstalling Ceph.\n\n.. code-block:: console\n\n   helm delete --purge ceph-openstack-config\n\n   for chart in $(helm list --namespace ${CEPH_NAMESPACE} | awk '/ceph-/{print $1}'); do\n     helm delete ${chart} --purge;\n   done\n\n.. note::\n   It will be normal not to see all PODs come back online during a reinstall.\n   Only the ceph-mon helm chart is required.\n\n.. code-block:: console\n\n   cd /opt/openstack-helm/\n   ./tools/deployment/multinode/030-ceph.sh\n\n3.5  Disable CephX authentication\n=================================\n\n.. note::\n   Wait until MON pods are running before proceeding here.\n\n.. code-block:: console\n\n   mkdir -p /tmp/ceph/ceph-templates /tmp/ceph/extracted-keys\n\n   kubectl get -n ${CEPH_NAMESPACE} configmaps ceph-mon-etc -o=jsonpath='{.data.ceph\\.conf}' > /tmp/ceph/ceph-mon.conf\n   sed '/\\[global\\]/a auth_client_required = none' /tmp/ceph/ceph-mon.conf | \\\n   sed '/\\[global\\]/a auth_service_required = none' | \\\n   sed '/\\[global\\]/a auth_cluster_required = none' > /tmp/ceph/ceph-mon-noauth.conf\n\n   kubectl --namespace ${CEPH_NAMESPACE} delete configmap ceph-mon-etc\n   kubectl --namespace ${CEPH_NAMESPACE} create configmap ceph-mon-etc --from-file=ceph.conf=/tmp/ceph/ceph-mon-noauth.conf\n\n   kubectl delete pod --namespace ${CEPH_NAMESPACE} -l application=ceph,component=mon\n\n.. note::\n   Wait until the MON pods are running before proceeding here.\n\n.. code-block:: console\n\n   MON_POD=$(kubectl get pods --namespace=${CEPH_NAMESPACE} \\\n   --selector=\"application=ceph\" --selector=\"component=mon\" \\\n   --no-headers | awk '{ print $1; exit }')\n\n   kubectl exec --namespace=${CEPH_NAMESPACE} ${MON_POD} -- ceph status\n\n- The Ceph cluster will not be healthy and in a HEALTH_WARN or HEALTH_ERR state.\n\n3.6  Replace key secrets with ones extracted from a Ceph MON\n============================================================\n\n.. code-block:: console\n\n   tee /tmp/ceph/ceph-templates/mon <<EOF\n   [mon.]\n     key = $(kubectl --namespace ${CEPH_NAMESPACE} exec ${MON_POD} -- bash -c \"ceph-authtool -l \\\"/var/lib/ceph/mon/ceph-\\$(hostname)/keyring\\\"\" | awk '/key =/ {print $NF}')\n     caps mon = \"allow *\"\n   EOF\n\n   for KEY in mds osd rgw; do\n     tee /tmp/ceph/ceph-templates/${KEY} <<EOF\n       [client.bootstrap-${KEY}]\n         key = $(kubectl --namespace ${CEPH_NAMESPACE} exec ${MON_POD} -- ceph auth get-key client.bootstrap-${KEY})\n         caps mon = \"allow profile bootstrap-${KEY}\"\n     EOF\n   done\n\n   tee /tmp/ceph/ceph-templates/admin <<EOF\n   [client.admin]\n     key = $(kubectl --namespace ${CEPH_NAMESPACE} exec ${MON_POD} -- ceph auth get-key client.admin)\n     auid = 0\n     caps mds = \"allow\"\n     caps mon = \"allow *\"\n     caps osd = \"allow *\"\n     caps mgr = \"allow *\"\n   EOF\n\n.. code-block:: console\n\n   tee /tmp/ceph/ceph-key-relationships <<EOF\n   mon ceph-mon-keyring ceph.mon.keyring mon.\n   mds ceph-bootstrap-mds-keyring ceph.keyring client.bootstrap-mds\n   osd ceph-bootstrap-osd-keyring ceph.keyring client.bootstrap-osd\n   rgw ceph-bootstrap-rgw-keyring ceph.keyring client.bootstrap-rgw\n   admin ceph-client-admin-keyring ceph.client.admin.keyring client.admin\n   EOF\n\n.. code-block:: console\n\n   while read CEPH_KEY_RELATIONS; do\n     KEY_RELATIONS=($(echo ${CEPH_KEY_RELATIONS}))\n     COMPONENT=${KEY_RELATIONS[0]}\n     KUBE_SECRET_NAME=${KEY_RELATIONS[1]}\n     KUBE_SECRET_DATA_KEY=${KEY_RELATIONS[2]}\n     KEYRING_NAME=${KEY_RELATIONS[3]}\n     DATA_PATCH=$(cat /tmp/ceph/ceph-templates/${COMPONENT} | envsubst | base64 -w0)\n     kubectl --namespace ${CEPH_NAMESPACE} patch secret ${KUBE_SECRET_NAME} -p \"{\\\"data\\\":{\\\"${KUBE_SECRET_DATA_KEY}\\\": \\\"${DATA_PATCH}\\\"}}\"\n   done < /tmp/ceph/ceph-key-relationships\n\n3.7  Re-enable CephX Authentication\n===================================\n\n.. code-block:: console\n\n   kubectl --namespace ${CEPH_NAMESPACE} delete configmap ceph-mon-etc\n   kubectl --namespace ${CEPH_NAMESPACE} create configmap ceph-mon-etc --from-file=ceph.conf=/tmp/ceph/ceph-mon.conf\n\n3.8  Reinstall Ceph charts\n==========================\n\n.. note::\n   Instructions are specific to a multinode environment.\n   For AIO environments follow the development guide for reinstalling Ceph.\n\n.. code-block:: console\n\n   for chart in $(helm list --namespace ${CEPH_NAMESPACE} | awk '/ceph-/{print $1}'); do\n     helm delete ${chart} --purge;\n   done\n\n.. code-block:: console\n\n   cd /opt/openstack-helm/\n   ./tools/deployment/multinode/030-ceph.sh\n   ./tools/deployment/multinode/040-ceph-ns-activate.sh\n\n.. code-block:: console\n\n   MON_POD=$(kubectl get pods --namespace=${CEPH_NAMESPACE} \\\n   --selector=\"application=ceph\" --selector=\"component=mon\" \\\n   --no-headers | awk '{ print $1; exit }')\n\n   kubectl exec --namespace=${CEPH_NAMESPACE} ${MON_POD} -- ceph status\n\n.. note::\n   AIO environments will need the following command to repair MDS standby failures.\n\n.. code-block:: console\n\n   kubectl exec --namespace=${CEPH_NAMESPACE} ${MON_POD} -- ceph fs set cephfs standby_count_wanted 0\n\n- Ceph pods are now running and cluster is healthy (HEALTH_OK).\n\n"
  },
  {
    "path": "doc/source/testing/ceph-resiliency/osd-failure.rst",
    "content": "===========\nOSD Failure\n===========\n\nTest Environment\n================\n\n- Cluster size: 4 host machines\n- Number of disks: 24 (= 6 disks per host * 4 hosts)\n- Kubernetes version: 1.9.3\n- Ceph version: 12.2.3\n- OpenStack-Helm commit: 28734352741bae228a4ea4f40bcacc33764221eb\n\nCase: OSD processes are killed\n==============================\n\nThis is to test a scenario when some of the OSDs are down.\n\nTo bring down 6 OSDs (out of 24), we identify the OSD processes and\nkill them from a storage host (not a pod).\n\n.. code-block:: console\n\n  $ ps -ef|grep /usr/bin/ceph-osd\n  ceph     44587 43680  1 18:12 ?        00:00:01 /usr/bin/ceph-osd --cluster ceph --osd-journal /dev/sdb5 -f -i 4 --setuser ceph --setgroup disk\n  ceph     44627 43744  1 18:12 ?        00:00:01 /usr/bin/ceph-osd --cluster ceph --osd-journal /dev/sdb2 -f -i 6 --setuser ceph --setgroup disk\n  ceph     44720 43927  2 18:12 ?        00:00:01 /usr/bin/ceph-osd --cluster ceph --osd-journal /dev/sdb6 -f -i 3 --setuser ceph --setgroup disk\n  ceph     44735 43868  1 18:12 ?        00:00:01 /usr/bin/ceph-osd --cluster ceph --osd-journal /dev/sdb1 -f -i 9 --setuser ceph --setgroup disk\n  ceph     44806 43855  1 18:12 ?        00:00:01 /usr/bin/ceph-osd --cluster ceph --osd-journal /dev/sdb4 -f -i 0 --setuser ceph --setgroup disk\n  ceph     44896 44011  2 18:12 ?        00:00:01 /usr/bin/ceph-osd --cluster ceph --osd-journal /dev/sdb3 -f -i 1 --setuser ceph --setgroup disk\n  root     46144 45998  0 18:13 pts/10   00:00:00 grep --color=auto /usr/bin/ceph-osd\n\n  $ sudo kill -9 44587 44627 44720 44735 44806 44896\n\n.. code-block:: console\n\n  (mon-pod):/# ceph -s\n    cluster:\n      id:     fd366aef-b356-4fe7-9ca5-1c313fe2e324\n      health: HEALTH_WARN\n              6 osds down\n              1 host (6 osds) down\n              Reduced data availability: 8 pgs inactive, 58 pgs peering\n              Degraded data redundancy: 141/1002 objects degraded (14.072%), 133 pgs degraded\n              mon voyager1 is low on available space\n\n    services:\n      mon: 3 daemons, quorum voyager1,voyager2,voyager3\n      mgr: voyager4(active)\n      osd: 24 osds: 18 up, 24 in\n\nIn the mean time, we monitor the status of Ceph and noted that it takes about 30 seconds for the 6 OSDs to recover from ``down`` to ``up``.\nThe reason is that Kubernetes automatically restarts OSD pods whenever they are killed.\n\n.. code-block:: console\n\n  (mon-pod):/# ceph -s\n    cluster:\n      id:     fd366aef-b356-4fe7-9ca5-1c313fe2e324\n      health: HEALTH_WARN\n              mon voyager1 is low on available space\n\n    services:\n      mon: 3 daemons, quorum voyager1,voyager2,voyager3\n      mgr: voyager4(active)\n      osd: 24 osds: 24 up, 24 in\n\nCase: A OSD pod is deleted\n==========================\n\nThis is to test a scenario when an OSD pod is deleted by ``kubectl delete $OSD_POD_NAME``.\nMeanwhile, we monitor the status of Ceph and note that it takes about 90 seconds for the OSD running in deleted pod to recover from ``down`` to ``up``.\n\n.. code-block:: console\n\n  root@voyager3:/# ceph -s\n    cluster:\n      id:     fd366aef-b356-4fe7-9ca5-1c313fe2e324\n      health: HEALTH_WARN\n              1 osds down\n              Degraded data redundancy: 43/945 objects degraded (4.550%), 35 pgs degraded, 109 pgs undersized\n              mon voyager1 is low on available space\n\n    services:\n      mon: 3 daemons, quorum voyager1,voyager2,voyager3\n      mgr: voyager4(active)\n      osd: 24 osds: 23 up, 24 in\n\n.. code-block:: console\n\n  (mon-pod):/# ceph -s\n    cluster:\n      id:     fd366aef-b356-4fe7-9ca5-1c313fe2e324\n      health: HEALTH_WARN\n              mon voyager1 is low on available space\n\n    services:\n      mon: 3 daemons, quorum voyager1,voyager2,voyager3\n      mgr: voyager4(active)\n      osd: 24 osds: 24 up, 24 in\n\nWe also monitored the pod status through ``kubectl get pods -n ceph``\nduring this process. The deleted OSD pod status changed as follows:\n``Terminating`` -> ``Init:1/3`` -> ``Init:2/3`` -> ``Init:3/3`` ->\n``Running``, and this process takes about 90 seconds. The reason is\nthat Kubernetes automatically restarts OSD pods whenever they are\ndeleted.\n"
  },
  {
    "path": "doc/source/testing/ceph-resiliency/validate-object-replication.rst",
    "content": "===========================================\nCeph - Test object replication across hosts\n===========================================\n\nThis document captures steps  to  validate object replcation is happening across\nhosts or  not .\n\n\nSetup:\n======\n- Follow OSH single node or  multinode guide to bring up OSH envronment.\n\n\n\nStep 1:  Setup the OSH environment and check ceph  cluster health\n=================================================================\n\n.. note::\n  Make sure we have healthy ceph cluster running\n\n``Ceph status:``\n\n.. code-block:: console\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl exec -n ceph ceph-mon-5qn68 -- ceph -s\n    cluster:\n      id:     54d9af7e-da6d-4980-9075-96bb145db65c\n      health: HEALTH_OK\n\n    services:\n      mon: 3 daemons, quorum mnode1,mnode2,mnode3\n      mgr: mnode2(active), standbys: mnode3\n      mds: cephfs-1/1/1 up  {0=mds-ceph-mds-6f66956547-c25cx=up:active}, 1 up:standby\n      osd: 3 osds: 3 up, 3 in\n      rgw: 2 daemons active\n\n    data:\n      pools:   19 pools, 101 pgs\n      objects: 354 objects, 260 MB\n      usage:   77807 MB used, 70106 MB / 144 GB avail\n      pgs:     101 active+clean\n\n    io:\n      client:   48769 B/s wr, 0 op/s rd, 12 op/s wr\n\n\n- Ceph cluster is in HEALTH_OK state with 3 MONs and 3 OSDs.\n\n\nStep 2: Run validation script\n=============================\n\n.. note::\n  Exec into ceph mon pod and execute the  validation script  by giving pool name as\n  first argument, as shown below rbd is the pool name .\n\n.. code-block:: console\n\n  ubuntu@mnode1:/opt/openstack-helm$ /tmp/checkObjectReplication.py rbd\n  Test object got replicated on these osds: [1, 0, 2]\n  Test object got replicated on these hosts: [u'mnode1', u'mnode2', u'mnode3']\n  Hosts hosting multiple copies of a placement groups are:[]\n\n- If  there  are any objects replicated  on same host then we will see them in the last\n  line of the script output\n"
  },
  {
    "path": "doc/source/testing/ceph-upgrade.rst",
    "content": "============\nCeph Upgrade\n============\n\nThis guide documents steps showing Ceph version upgrade. The main goal of this\ndocument is to demostrate Ceph chart update without downtime for OSH components.\n\nTest Scenario:\n==============\nUpgrade Ceph component version from ``12.2.4`` to ``12.2.5`` without downtime\nto OSH components.\n\nSetup:\n======\n- 3 Node (VM based) env.\n- Followed OSH multinode guide steps to setup nodes and install K8s cluster\n- Followed OSH multinode guide steps upto Ceph install\n\nPlan:\n=====\n1) Install Ceph charts (12.2.4) by updating Docker images in overrides.\n2) Install OSH components as per OSH multinode guide.\n3) Upgrade Ceph charts to version 12.2.5 by updating docker images in overrides.\n\n\nDocker Images:\n==============\n1) Ceph Luminous point release images for Ceph components\n\n.. code-block:: console\n\n  Ceph 12.2.4: ceph/daemon:master-0351083-luminous-ubuntu-16.04-x86_64\n  Ceph 12.2.5: ceph/daemon:master-a8d20ed-luminous-ubuntu-16.04-x86_64\n\n2) Ceph RBD provisioner docker images.\n\n.. code-block:: console\n\n  quay.io/external_storage/rbd-provisioner:v0.1.0\n  quay.io/external_storage/rbd-provisioner:v0.1.1\n\n3) Ceph Cephfs provisioner docker images.\n\n.. code-block:: console\n\n  quay.io/external_storage/cephfs-provisioner:v0.1.1\n  quay.io/external_storage/cephfs-provisioner:v0.1.2\n\nSteps:\n======\n\n.. note::\n  Follow all steps from OSH multinode guide with below changes.\n\n1) Install Ceph charts (version 12.2.4)\n\n\n  Update ceph install script ``./tools/deployment/multinode/030-ceph.sh``\n  to add ``images:`` section in overrides as shown below.\n\n.. note::\n  OSD count is set to 3 based on env setup.\n\n.. note::\n  Following is a partial part from script to show changes.\n\n.. code-block:: yaml\n\n    tee /tmp/ceph.yaml << EOF\n      ...\n      network:\n        public: ${CEPH_PUBLIC_NETWORK}\n        cluster: ${CEPH_CLUSTER_NETWORK}\n      images:\n        tags:\n          ceph_bootstrap: 'docker.io/ceph/daemon:master-0351083-luminous-ubuntu-16.04-x86_64'\n          ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal'\n          ceph_rbd_pool: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal'\n          ceph_mon_check: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal'\n          ceph_mon: 'docker.io/ceph/daemon:master-0351083-luminous-ubuntu-16.04-x86_64'\n          ceph_osd: 'docker.io/ceph/daemon:master-0351083-luminous-ubuntu-16.04-x86_64'\n          ceph_mds: 'docker.io/ceph/daemon:master-0351083-luminous-ubuntu-16.04-x86_64'\n          ceph_mgr: 'docker.io/ceph/daemon:master-0351083-luminous-ubuntu-16.04-x86_64'\n          ceph_rgw: 'docker.io/ceph/daemon:master-0351083-luminous-ubuntu-16.04-x86_64'\n          ceph_cephfs_provisioner: 'quay.io/external_storage/cephfs-provisioner:v0.1.1'\n          ceph_rbd_provisioner: 'quay.io/external_storage/rbd-provisioner:v0.1.0'\n      conf:\n        ceph:\n          global:\n            fsid: ${CEPH_FS_ID}\n        rgw_ks:\n          enabled: true\n        pool:\n          crush:\n            tunables: ${CRUSH_TUNABLES}\n          target:\n            # NOTE(portdirect): 5 nodes, with one osd per node\n            osd: 5\n            pg_per_osd: 100\n      ...\n    EOF\n\n.. note::\n  ``ceph_bootstrap``, ``ceph-config_helper`` and ``ceph_rbs_pool`` images\n  are used for jobs. ``ceph_mon_check`` has one script that is stable so no\n  need to upgrade.\n\n2) Deploy and Validate Ceph\n\n.. code-block:: console\n\n    + kubectl exec -n ceph ceph-mon-4c8xs -- ceph -s\n      cluster:\n        id:     39061799-d25e-4f3b-8c1a-a350e4c6d06c\n        health: HEALTH_OK\n\n      services:\n        mon: 3 daemons, quorum mnode1,mnode2,mnode3\n        mgr: mnode2(active), standbys: mnode3\n        mds: cephfs-1/1/1 up  {0=mds-ceph-mds-745576757f-4vdn4=up:active}, 1 up:standby\n        osd: 3 osds: 3 up, 3 in\n        rgw: 2 daemons active\n\n      data:\n        pools:   18 pools, 93 pgs\n        objects: 208 objects, 3359 bytes\n        usage:   72175 MB used, 75739 MB / 144 GB avail\n        pgs:     93 active+clean\n\n3) Check Ceph Pods\n\n.. code-block:: console\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl get pods -n ceph\n  NAME                                       READY     STATUS      RESTARTS   AGE\n  ceph-bootstrap-s4jkx                       0/1       Completed   0          10m\n  ceph-cephfs-client-key-generator-6bmzz     0/1       Completed   0          3m\n  ceph-mds-745576757f-4vdn4                  1/1       Running     0          6m\n  ceph-mds-745576757f-bxdcs                  1/1       Running     0          6m\n  ceph-mds-keyring-generator-f5lxf           0/1       Completed   0          10m\n  ceph-mgr-86bdc7c64b-7ptr4                  1/1       Running     0          6m\n  ceph-mgr-86bdc7c64b-xgplj                  1/1       Running     0          6m\n  ceph-mgr-keyring-generator-w7nxq           0/1       Completed   0          10m\n  ceph-mon-4c8xs                             1/1       Running     0          10m\n  ceph-mon-check-d85994946-zzwb4             1/1       Running     0          10m\n  ceph-mon-keyring-generator-jdgfw           0/1       Completed   0          10m\n  ceph-mon-kht8d                             1/1       Running     0          10m\n  ceph-mon-mkpmm                             1/1       Running     0          10m\n  ceph-osd-default-83945928-7jz4s            1/1       Running     0          8m\n  ceph-osd-default-83945928-bh82j            1/1       Running     0          8m\n  ceph-osd-default-83945928-t9szk            1/1       Running     0          8m\n  ceph-osd-keyring-generator-6rg65           0/1       Completed   0          10m\n  ceph-rbd-pool-z8vlc                        0/1       Completed   0          6m\n  ceph-rbd-provisioner-84665cb84f-6s55r      1/1       Running     0          3m\n  ceph-rbd-provisioner-84665cb84f-chwhd      1/1       Running     0          3m\n  ceph-rgw-74559877-h56xs                    1/1       Running     0          6m\n  ceph-rgw-74559877-pfjr5                    1/1       Running     0          6m\n  ceph-rgw-keyring-generator-6rwct           0/1       Completed   0          10m\n  ceph-storage-keys-generator-bgj2t          0/1       Completed   0          10m\n  ingress-796d8cf8d6-nzrd2                   1/1       Running     0          11m\n  ingress-796d8cf8d6-qqvq9                   1/1       Running     0          11m\n  ingress-error-pages-54454dc79b-d5r5w       1/1       Running     0          11m\n  ingress-error-pages-54454dc79b-gfpqv       1/1       Running     0          11m\n\n\n4) Check version of each Ceph components.\n\n.. code-block:: console\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl exec -n ceph ceph-mon-4c8xs -- ceph -v\n  ceph version 12.2.4 (52085d5249a80c5f5121a76d6288429f35e4e77b) luminous (stable)\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl exec -n ceph ceph-osd-default-83945928-7jz4s -- ceph -v\n  ceph version 12.2.4 (52085d5249a80c5f5121a76d6288429f35e4e77b) luminous (stable)\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl exec -n ceph ceph-mgr-86bdc7c64b-7ptr4 -- ceph -v\n  ceph version 12.2.4 (52085d5249a80c5f5121a76d6288429f35e4e77b) luminous (stable)\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl exec -n ceph ceph-mds-745576757f-4vdn4 -- ceph -v\n  ceph version 12.2.4 (52085d5249a80c5f5121a76d6288429f35e4e77b) luminous (stable)\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl exec -n ceph ceph-rgw-74559877-h56xs -- ceph -v\n  ceph version 12.2.4 (52085d5249a80c5f5121a76d6288429f35e4e77b) luminous (stable)\n\n5) Check which images Provisionors and Mon-Check PODs are using\n\n.. note::\n  Showing partial output from kubectl describe command to show which image is Docker\n  container is using\n\n.. code-block:: console\n\n  ubuntu@mnode1:~$ kubectl describe pod -n ceph ceph-rbd-provisioner-84665cb84f-6s55r\n\n  Containers:\n    ceph-rbd-provisioner:\n      Container ID:  docker://383be3d653cecf4cbf0c3c7509774d39dce54102309f1f0bdb07cdc2441e5e47\n      Image:         quay.io/external_storage/rbd-provisioner:v0.1.0\n\n.. code-block:: console\n\n  ubuntu@mnode1:~$ kubectl describe pod -n ceph ceph-mon-check-d85994946-zzwb4\n\n  Containers:\n    ceph-mon:\n      Container ID:  docker://d5a3396f99704038ab8ef6bfe329013ed46472ebb8e26dddc140b621329f0f92\n      Image:         docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal\n\n\n6) Install Openstack charts\n\nContinue with OSH multinode guide to install other Openstack charts.\n\n7) Capture Ceph pods statuses.\n\n.. code-block:: console\n\n  NAME                                       READY     STATUS      RESTARTS   AGE\n  ceph-bootstrap-s4jkx                       0/1       Completed   0          2h\n  ceph-cephfs-client-key-generator-6bmzz     0/1       Completed   0          2h\n  ceph-mds-745576757f-4vdn4                  1/1       Running     0          2h\n  ceph-mds-745576757f-bxdcs                  1/1       Running     0          2h\n  ceph-mds-keyring-generator-f5lxf           0/1       Completed   0          2h\n  ceph-mgr-86bdc7c64b-7ptr4                  1/1       Running     0          2h\n  ceph-mgr-86bdc7c64b-xgplj                  1/1       Running     0          2h\n  ceph-mgr-keyring-generator-w7nxq           0/1       Completed   0          2h\n  ceph-mon-4c8xs                             1/1       Running     0          2h\n  ceph-mon-check-d85994946-zzwb4             1/1       Running     0          2h\n  ceph-mon-keyring-generator-jdgfw           0/1       Completed   0          2h\n  ceph-mon-kht8d                             1/1       Running     0          2h\n  ceph-mon-mkpmm                             1/1       Running     0          2h\n  ceph-osd-default-83945928-7jz4s            1/1       Running     0          2h\n  ceph-osd-default-83945928-bh82j            1/1       Running     0          2h\n  ceph-osd-default-83945928-t9szk            1/1       Running     0          2h\n  ceph-osd-keyring-generator-6rg65           0/1       Completed   0          2h\n  ceph-rbd-pool-z8vlc                        0/1       Completed   0          2h\n  ceph-rbd-provisioner-84665cb84f-6s55r      1/1       Running     0          2h\n  ceph-rbd-provisioner-84665cb84f-chwhd      1/1       Running     0          2h\n  ceph-rgw-74559877-h56xs                    1/1       Running     0          2h\n  ceph-rgw-74559877-pfjr5                    1/1       Running     0          2h\n  ceph-rgw-keyring-generator-6rwct           0/1       Completed   0          2h\n  ceph-storage-keys-generator-bgj2t          0/1       Completed   0          2h\n  ingress-796d8cf8d6-nzrd2                   1/1       Running     0          2h\n  ingress-796d8cf8d6-qqvq9                   1/1       Running     0          2h\n  ingress-error-pages-54454dc79b-d5r5w       1/1       Running     0          2h\n  ingress-error-pages-54454dc79b-gfpqv       1/1       Running     0          2h\n\n8) Capture Openstack pods statuses.\n\n.. code-block:: console\n\n  NAME                                           READY     STATUS    RESTARTS   AGE\n  cinder-api-67495cdffc-24fhs                    1/1       Running   0          51m\n  cinder-api-67495cdffc-kz5fn                    1/1       Running   0          51m\n  cinder-backup-65b7bd9b79-8n9pb                 1/1       Running   0          51m\n  cinder-scheduler-9ddbb7878-rbt4l               1/1       Running   0          51m\n  cinder-volume-75bf4cc9bd-6298x                 1/1       Running   0          51m\n  glance-api-68f6df4d5d-q84hs                    1/1       Running   0          1h\n  glance-api-68f6df4d5d-qbfwb                    1/1       Running   0          1h\n  ingress-7b4bc84cdd-84dtj                       1/1       Running   0          2h\n  ingress-7b4bc84cdd-ws45r                       1/1       Running   0          2h\n  ingress-error-pages-586c7f86d6-dlpm2           1/1       Running   0          2h\n  ingress-error-pages-586c7f86d6-w7cj2           1/1       Running   0          2h\n  keystone-api-7d9759db58-dz6kt                  1/1       Running   0          1h\n  keystone-api-7d9759db58-pvsc2                  1/1       Running   0          1h\n  libvirt-f7ngc                                  1/1       Running   0          24m\n  libvirt-gtjc7                                  1/1       Running   0          24m\n  libvirt-qmwf5                                  1/1       Running   0          24m\n  mariadb-ingress-84894687fd-m8fkr               1/1       Running   0          1h\n  mariadb-ingress-error-pages-78fb865f84-c6th5   1/1       Running   0          1h\n  mariadb-server-0                               1/1       Running   0          1h\n  memcached-memcached-5db74ddfd5-qjgvz           1/1       Running   0          1h\n  neutron-dhcp-agent-default-9bpxc               1/1       Running   0          16m\n  neutron-l3-agent-default-47n7k                 1/1       Running   0          16m\n  neutron-metadata-agent-default-hp46c           1/1       Running   0          16m\n  neutron-ovs-agent-default-6sbtg                1/1       Running   0          16m\n  neutron-ovs-agent-default-nl8fr                1/1       Running   0          16m\n  neutron-ovs-agent-default-tvmc4                1/1       Running   0          16m\n  neutron-server-775c765d9f-cx2gk                1/1       Running   0          16m\n  neutron-server-775c765d9f-ll5ml                1/1       Running   0          16m\n  nova-api-metadata-557c68cb46-8f8d5             1/1       Running   1          16m\n  nova-api-osapi-7658bfd554-7fbtx                1/1       Running   0          16m\n  nova-api-osapi-7658bfd554-v7qcr                1/1       Running   0          16m\n  nova-compute-default-g2jbd                     1/1       Running   0          16m\n  nova-compute-default-ljcbc                     1/1       Running   0          16m\n  nova-compute-default-mr24c                     1/1       Running   0          16m\n  nova-conductor-64457cf995-lbv65                1/1       Running   0          16m\n  nova-conductor-64457cf995-zts48                1/1       Running   0          16m\n  nova-novncproxy-54467b9c66-vp49j               1/1       Running   0          16m\n  nova-scheduler-59647c6d9f-vm78p                1/1       Running   0          16m\n  openvswitch-db-cv47r                           1/1       Running   0          41m\n  openvswitch-db-dq7rc                           1/1       Running   0          41m\n  openvswitch-db-znp6l                           1/1       Running   0          41m\n  openvswitch-vswitchd-8p2j5                     1/1       Running   0          41m\n  openvswitch-vswitchd-v9rrp                     1/1       Running   0          41m\n  openvswitch-vswitchd-wlgkx                     1/1       Running   0          41m\n  rabbitmq-rabbitmq-0                            1/1       Running   0          1h\n  rabbitmq-rabbitmq-1                            1/1       Running   0          1h\n  rabbitmq-rabbitmq-2                            1/1       Running   0          1h\n\n\n9) Upgrade Ceph charts to update version\n\nUse Ceph override file ``ceph.yaml`` that was generated previously and update\nimages section as below\n\n``cp /tmp/ceph.yaml ceph-update.yaml``\n\nUpdate, image section in new overrides ``ceph-update.yaml`` as shown below\n\n.. code-block:: yaml\n\n  images:\n    tags:\n      ceph_bootstrap: 'docker.io/ceph/daemon:master-0351083-luminous-ubuntu-16.04-x86_64'\n      ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal'\n      ceph_rbd_pool: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal'\n      ceph_mon_check: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal'\n      ceph_mon: 'docker.io/ceph/daemon:master-a8d20ed-luminous-ubuntu-16.04-x86_64'\n      ceph_osd: 'docker.io/ceph/daemon:master-a8d20ed-luminous-ubuntu-16.04-x86_64'\n      ceph_mds: 'docker.io/ceph/daemon:master-a8d20ed-luminous-ubuntu-16.04-x86_64'\n      ceph_mgr: 'docker.io/ceph/daemon:master-a8d20ed-luminous-ubuntu-16.04-x86_64'\n      ceph_rgw: 'docker.io/ceph/daemon:master-a8d20ed-luminous-ubuntu-16.04-x86_64'\n      ceph_cephfs_provisioner: 'quay.io/external_storage/cephfs-provisioner:v0.1.2'\n      ceph_rbd_provisioner: 'quay.io/external_storage/rbd-provisioner:v0.1.1'\n\n\n10) Update Ceph Mon chart with new overrides\n\n\n``helm upgrade ceph-mon ./ceph-mon --values=ceph-update.yaml``\n\n``series of console outputs:``\n\n.. code-block:: console\n\n  ceph-mon-4c8xs                             0/1       Terminating   0          2h\n  ceph-mon-check-d85994946-zzwb4             1/1       Running       0          2h\n  ceph-mon-keyring-generator-jdgfw           0/1       Completed     0          2h\n  ceph-mon-kht8d                             1/1       Running       0          2h\n  ceph-mon-mkpmm                             1/1       Running       0          2h\n\n.. code-block:: console\n\n  ceph-mon-7zxjs                             1/1       Running     1          4m\n  ceph-mon-84xt2                             1/1       Running     1          2m\n  ceph-mon-check-d85994946-zzwb4             1/1       Running     0          2h\n  ceph-mon-fsrv4                             1/1       Running     1          6m\n  ceph-mon-keyring-generator-jdgfw           0/1       Completed   0          2h\n\n\n``Results:`` Mon pods got updated one by one (rolling updates). Each Mon pod\ngot respawn and was in 1/1 running state before next Mon pod got updated.\nEach Mon pod got restarted. Other ceph pods were not affected with this update.\nNo interruption to OSH pods.\n\n\n11) Update Ceph OSD chart with new overrides:\n\n``helm upgrade ceph-osd ./ceph-osd --values=ceph-update.yaml``\n\n``series of console outputs:``\n\n.. code-block:: console\n\n  ceph-osd-default-83945928-7jz4s            0/1       Terminating   0          2h\n  ceph-osd-default-83945928-bh82j            1/1       Running       0          2h\n  ceph-osd-default-83945928-t9szk            1/1       Running       0          2h\n  ceph-osd-keyring-generator-6rg65           0/1       Completed     0          2h\n\n.. code-block:: console\n\n  ceph-osd-default-83945928-l84tl            1/1       Running     0          9m\n  ceph-osd-default-83945928-twzmk            1/1       Running     0          6m\n  ceph-osd-default-83945928-wxpmh            1/1       Running     0          11m\n  ceph-osd-keyring-generator-6rg65           0/1       Completed   0          2h\n\n``Results:`` Rolling updates (one pod at a time). Other ceph pods are running.\nNo interruption to OSH pods.\n\n\n12) Update Ceph Client chart with new overrides:\n\n``helm upgrade ceph-client ./ceph-client --values=ceph-update.yaml``\n\n.. code-block:: console\n\n  ceph-mds-5fdcb5c64c-t9nmb                  0/1       Init:0/2      0          11s\n  ceph-mds-745576757f-4vdn4                  1/1       Running       0          2h\n  ceph-mds-745576757f-bxdcs                  1/1       Running       0          2h\n  ceph-mgr-86bdc7c64b-7ptr4                  1/1       Terminating   0          2h\n  ceph-mgr-86bdc7c64b-xgplj                  0/1       Terminating   0          2h\n  ceph-rgw-57c68b7cd5-vxcc5                  0/1       Init:1/3      0          11s\n  ceph-rgw-74559877-h56xs                    1/1       Running       0          2h\n  ceph-rgw-74559877-pfjr5                    1/1       Running       0          2h\n\n.. code-block:: console\n\n  ceph-mds-5fdcb5c64c-c52xq                  1/1       Running     0          2m\n  ceph-mds-5fdcb5c64c-t9nmb                  1/1       Running     0          2m\n  ceph-mgr-654f97cbfd-9kcvb                  1/1       Running     0          1m\n  ceph-mgr-654f97cbfd-gzb7k                  1/1       Running     0          1m\n  ceph-rgw-57c68b7cd5-vxcc5                  1/1       Running     0          2m\n  ceph-rgw-57c68b7cd5-zmdqb                  1/1       Running     0          2m\n\n``Results:`` Rolling updates (one pod at a time). Other ceph pods are running.\nNo interruption to OSH pods.\n\n13) Update Ceph Provisioners chart with new overrides:\n\n``helm upgrade ceph-provisioners ./ceph-provisioners --values=ceph-update.yaml``\n\n.. code-block:: console\n\n  ceph-rbd-provisioner-84665cb84f-6s55r      0/1       Terminating   0          2h\n  ceph-rbd-provisioner-84665cb84f-chwhd      0/1       Terminating   0          2h\n\n\n.. code-block:: console\n\n  ceph-rbd-provisioner-5bfb577ffd-b7tkx      1/1       Running     0          1m\n  ceph-rbd-provisioner-5bfb577ffd-m6gg6      1/1       Running     0          1m\n\n``Results:`` All provisioner pods got terminated at once (same time). Other ceph\npods are running. No interruption to OSH pods.\n\n14) Capture final Ceph pod statuses:\n\n.. code-block:: console\n\n  ceph-bootstrap-s4jkx                       0/1       Completed   0          2h\n  ceph-cephfs-client-key-generator-6bmzz     0/1       Completed   0          2h\n  ceph-mds-5fdcb5c64c-c52xq                  1/1       Running     0          8m\n  ceph-mds-5fdcb5c64c-t9nmb                  1/1       Running     0          8m\n  ceph-mds-keyring-generator-f5lxf           0/1       Completed   0          2h\n  ceph-mgr-654f97cbfd-9kcvb                  1/1       Running     0          8m\n  ceph-mgr-654f97cbfd-gzb7k                  1/1       Running     0          8m\n  ceph-mgr-keyring-generator-w7nxq           0/1       Completed   0          2h\n  ceph-mon-7zxjs                             1/1       Running     1          27m\n  ceph-mon-84xt2                             1/1       Running     1          24m\n  ceph-mon-check-d85994946-zzwb4             1/1       Running     0          2h\n  ceph-mon-fsrv4                             1/1       Running     1          29m\n  ceph-mon-keyring-generator-jdgfw           0/1       Completed   0          2h\n  ceph-osd-default-83945928-l84tl            1/1       Running     0          19m\n  ceph-osd-default-83945928-twzmk            1/1       Running     0          16m\n  ceph-osd-default-83945928-wxpmh            1/1       Running     0          21m\n  ceph-osd-keyring-generator-6rg65           0/1       Completed   0          2h\n  ceph-rbd-pool-z8vlc                        0/1       Completed   0          2h\n  ceph-rbd-provisioner-5bfb577ffd-b7tkx      1/1       Running     0          2m\n  ceph-rbd-provisioner-5bfb577ffd-m6gg6      1/1       Running     0          2m\n  ceph-rgw-57c68b7cd5-vxcc5                  1/1       Running     0          8m\n  ceph-rgw-57c68b7cd5-zmdqb                  1/1       Running     0          8m\n  ceph-rgw-keyring-generator-6rwct           0/1       Completed   0          2h\n  ceph-storage-keys-generator-bgj2t          0/1       Completed   0          2h\n  ingress-796d8cf8d6-nzrd2                   1/1       Running     0          2h\n  ingress-796d8cf8d6-qqvq9                   1/1       Running     0          2h\n  ingress-error-pages-54454dc79b-d5r5w       1/1       Running     0          2h\n  ingress-error-pages-54454dc79b-gfpqv       1/1       Running     0          2h\n\n15) Capture final Openstack pod statuses:\n\n.. code-block:: console\n\n  cinder-api-67495cdffc-24fhs                    1/1       Running   0          1h\n  cinder-api-67495cdffc-kz5fn                    1/1       Running   0          1h\n  cinder-backup-65b7bd9b79-8n9pb                 1/1       Running   0          1h\n  cinder-scheduler-9ddbb7878-rbt4l               1/1       Running   0          1h\n  cinder-volume-75bf4cc9bd-6298x                 1/1       Running   0          1h\n  glance-api-68f6df4d5d-q84hs                    1/1       Running   0          2h\n  glance-api-68f6df4d5d-qbfwb                    1/1       Running   0          2h\n  ingress-7b4bc84cdd-84dtj                       1/1       Running   0          2h\n  ingress-7b4bc84cdd-ws45r                       1/1       Running   0          2h\n  ingress-error-pages-586c7f86d6-dlpm2           1/1       Running   0          2h\n  ingress-error-pages-586c7f86d6-w7cj2           1/1       Running   0          2h\n  keystone-api-7d9759db58-dz6kt                  1/1       Running   0          2h\n  keystone-api-7d9759db58-pvsc2                  1/1       Running   0          2h\n  libvirt-f7ngc                                  1/1       Running   0          1h\n  libvirt-gtjc7                                  1/1       Running   0          1h\n  libvirt-qmwf5                                  1/1       Running   0          1h\n  mariadb-ingress-84894687fd-m8fkr               1/1       Running   0          2h\n  mariadb-ingress-error-pages-78fb865f84-c6th5   1/1       Running   0          2h\n  mariadb-server-0                               1/1       Running   0          2h\n  memcached-memcached-5db74ddfd5-qjgvz           1/1       Running   0          2h\n  neutron-dhcp-agent-default-9bpxc               1/1       Running   0          52m\n  neutron-l3-agent-default-47n7k                 1/1       Running   0          52m\n  neutron-metadata-agent-default-hp46c           1/1       Running   0          52m\n  neutron-ovs-agent-default-6sbtg                1/1       Running   0          52m\n  neutron-ovs-agent-default-nl8fr                1/1       Running   0          52m\n  neutron-ovs-agent-default-tvmc4                1/1       Running   0          52m\n  neutron-server-775c765d9f-cx2gk                1/1       Running   0          52m\n  neutron-server-775c765d9f-ll5ml                1/1       Running   0          52m\n  nova-api-metadata-557c68cb46-8f8d5             1/1       Running   1          52m\n  nova-api-osapi-7658bfd554-7fbtx                1/1       Running   0          52m\n  nova-api-osapi-7658bfd554-v7qcr                1/1       Running   0          52m\n  nova-compute-default-g2jbd                     1/1       Running   0          52m\n  nova-compute-default-ljcbc                     1/1       Running   0          52m\n  nova-compute-default-mr24c                     1/1       Running   0          52m\n  nova-conductor-64457cf995-lbv65                1/1       Running   0          52m\n  nova-conductor-64457cf995-zts48                1/1       Running   0          52m\n  nova-novncproxy-54467b9c66-vp49j               1/1       Running   0          52m\n  nova-scheduler-59647c6d9f-vm78p                1/1       Running   0          52m\n  openvswitch-db-cv47r                           1/1       Running   0          1h\n  openvswitch-db-dq7rc                           1/1       Running   0          1h\n  openvswitch-db-znp6l                           1/1       Running   0          1h\n  openvswitch-vswitchd-8p2j5                     1/1       Running   0          1h\n  openvswitch-vswitchd-v9rrp                     1/1       Running   0          1h\n  openvswitch-vswitchd-wlgkx                     1/1       Running   0          1h\n  rabbitmq-rabbitmq-0                            1/1       Running   0          2h\n  rabbitmq-rabbitmq-1                            1/1       Running   0          2h\n  rabbitmq-rabbitmq-2                            1/1       Running   0          2h\n\n16) Confirm Ceph component's version.\n\n.. code-block:: console\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl exec -n ceph ceph-mon-fsrv4 -- ceph -v\n  ceph version 12.2.5 (cad919881333ac92274171586c827e01f554a70a) luminous (stable)\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl exec -n ceph ceph-osd-default-83945928-l84tl -- ceph -v\n  ceph version 12.2.5 (cad919881333ac92274171586c827e01f554a70a) luminous (stable)\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl exec -n ceph ceph-rgw-57c68b7cd5-vxcc5 -- ceph -v\n  ceph version 12.2.5 (cad919881333ac92274171586c827e01f554a70a) luminous (stable)\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl exec -n ceph ceph-mgr-654f97cbfd-gzb7k -- ceph -v\n  ceph version 12.2.5 (cad919881333ac92274171586c827e01f554a70a) luminous (stable)\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl exec -n ceph ceph-mds-5fdcb5c64c-c52xq -- ceph -v\n  ceph version 12.2.5 (cad919881333ac92274171586c827e01f554a70a) luminous (stable)\n\n17) Check which images Provisionors and Mon-Check PODs are using\n\n.. code-block:: console\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl describe pod -n ceph ceph-rbd-provisioner-5bfb577ffd-b7tkx\n\n  Containers:\n    ceph-rbd-provisioner:\n      Container ID:  docker://55b18b3400e8753f49f1343ee918a308ed1760816a1ce9797281dbfe3c5f9671\n      Image:         quay.io/external_storage/rbd-provisioner:v0.1.1\n\n.. code-block:: console\n\n  ubuntu@mnode1:/opt/openstack-helm$ kubectl describe pod -n ceph ceph-mon-check-d85994946-zzwb4\n\n  Containers:\n    ceph-mon:\n      Container ID:  docker://d5a3396f99704038ab8ef6bfe329013ed46472ebb8e26dddc140b621329f0f92\n      Image:         docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal\n\nConclusion:\n===========\nCeph can be upgraded without downtime for Openstack components in a multinode env.\n"
  },
  {
    "path": "doc/source/testing/helm-tests.rst",
    "content": "==========\nHelm Tests\n==========\n\nEvery OpenStack-Helm chart should include any required Helm tests necessary to\nprovide a sanity check for the OpenStack service.  Information on using the Helm\ntesting framework can be found in the Helm repository_.  Currently, the Rally\ntesting framework is used to provide these checks for the core services.  The\nKeystone Helm test template can be used as a reference, and can be found here_.\n\n.. _repository: https://github.com/kubernetes/helm/blob/master/docs/chart_tests.md\n\n.. _here: https://github.com/openstack/openstack-helm/blob/master/keystone/templates/pod-rally-test.yaml\n\n\nTesting Expectations\n--------------------\n\nAny templates for Helm tests submitted should follow the philosophies applied in\nthe other templates.  These include: use of overrides where appropriate, use of\nendpoint lookups and other common functionality in helm-toolkit, and mounting\nany required scripting templates via the configmap-bin template for the service\nchart.  If Rally tests are not appropriate or adequate for a service chart, any\nadditional tests should be documented appropriately and adhere to the same\nexpectations.\n\nRunning Tests\n-------------\n\nAny Helm tests associated with a chart can be run by executing:\n\n::\n\n    helm test <helm-release-name>\n\nThe output of the Helm tests can be seen by looking at the logs of the pod\ncreated by the Helm tests.  These logs can be viewed with:\n\n::\n\n    kubectl logs <test-pod-name> -n <namespace>\n\nAdditional information on Helm tests for OpenStack-Helm and how to execute\nthese tests locally via the scripts used in the gate can be found in the\ngates_ directory.\n\n.. _gates: https://github.com/openstack/openstack-helm/blob/master/tools/gate/\n\n\nAdding Tests\n------------\n\nAll tests should be added to the gates during development, and are required for\nany new service charts prior to merging.  All Helm tests should be included as\npart of the deployment script.  An example of this can be seen in\nthis script_.\n\n.. _script: https://github.com/openstack/openstack-helm/blob/9d4f9862ca07f08005f9bdb4e6d58ad770fa4178/tools/deployment/multinode/080-keystone.sh#L32\n"
  },
  {
    "path": "doc/source/testing/index.rst",
    "content": "=======\nTesting\n=======\n\n.. toctree::\n   :maxdepth: 2\n\n   helm-tests\n   ceph-resiliency/index\n   ceph-upgrade\n   ceph-node-resiliency\n"
  },
  {
    "path": "doc/source/troubleshooting/ceph.rst",
    "content": "Backing up a PVC\n^^^^^^^^^^^^^^^^\n\nBacking up a PVC stored in Ceph, is fairly straigthforward, in this example we\nuse the PVC ``mysql-data-mariadb-server-0`` as an example, but this will also\napply to any other services using PVCs eg. RabbitMQ, Postgres.\n\n\n.. code-block:: shell\n\n    #  get all required details\n    NS_NAME=\"openstack\"\n    PVC_NAME=\"mysql-data-mariadb-server-0\"\n    # you can check this by running  kubectl get pvc -n ${NS_NAME}\n\n    PV_NAME=\"$(kubectl get -n ${NS_NAME} pvc \"${PVC_NAME}\" --no-headers | awk '{ print $3 }')\"\n    RBD_NAME=\"$(kubectl get pv \"${PV_NAME}\" -o json | jq -r '.spec.rbd.image')\"\n    MON_POD=$(kubectl get pods \\\n      --namespace=ceph \\\n      --selector=\"application=ceph\" \\\n      --selector=\"component=mon\" \\\n      --no-headers | awk '{ print $1; exit }')\n\n    # copy admin keyring from ceph mon to host node\n\n    kubectl exec -it ${MON_POD} -n ceph -- cat /etc/ceph/ceph.client.admin.keyring > /etc/ceph/ceph.client.admin.keyring\n    sudo kubectl get cm -n ceph ceph-etc -o json|jq -j  .data[] > /etc/ceph/ceph.conf\n\n    export CEPH_MON_NAME=\"ceph-mon-discovery.ceph.svc.cluster.local\"\n\n    # create snapshot and export to a file\n\n    rbd snap create rbd/${RBD_NAME}@snap1 -m ${CEPH_MON_NAME}\n    rbd snap list rbd/${RBD_NAME} -m ${CEPH_MON_NAME}\n\n    # Export the snapshot and compress, make sure we have enough space on host to accommodate big files that we are working .\n\n    # a. if we have enough space on host\n\n    rbd export rbd/${RBD_NAME}@snap1 /backup/${RBD_NAME}.img -m ${CEPH_MON_NAME}\n    cd /backup\n    time xz -0vk --threads=0  /backup/${RBD_NAME}.img\n\n    # b. if we have less space on host we can directly export  and compress in single command\n\n    rbd export rbd/${RBD_NAME}@snap1 -m ${CEPH_MON_NAME} - | xz  -0v --threads=0 >  /backup/${RBD_NAME}.img.xz\n\n\nRestoring is just as straightforward. Once the workload consuming the device has\nbeen stopped, and the raw RBD device removed the following will import the\nback up and create a device:\n\n.. code-block:: shell\n\n    cd /backup\n    unxz -k ${RBD_NAME}.img.xz\n    rbd import /backup/${RBD_NAME}.img rbd/${RBD_NAME} -m ${CEPH_MON_NAME}\n\nOnce this has been done the workload can be restarted.\n"
  },
  {
    "path": "doc/source/troubleshooting/database.rst",
    "content": "====================\nDatabase Deployments\n====================\n\nThis guide is to help users debug any general storage issues when\ndeploying Charts in this repository.\n\nGalera Cluster\n==============\n\nTo test MariaDB, do the following:\n\n::\n\n    admin@kubenode01:~/projects/openstack-helm$ kubectl exec mariadb-0 -it -n openstack -- mysql -h mariadb.openstack -uroot -ppassword -e 'show databases;'\n    +--------------------+\n    | Database           |\n    +--------------------+\n    | information_schema |\n    | keystone           |\n    | mysql              |\n    | performance_schema |\n    +--------------------+\n    admin@kubenode01:~/projects/openstack-helm$\n"
  },
  {
    "path": "doc/source/troubleshooting/index.rst",
    "content": "===============\nTroubleshooting\n===============\n\nSometimes things go wrong. These guides will help you solve many common\nissues with the following:\n\n.. toctree::\n   :maxdepth: 2\n\n   database\n   persistent-storage\n   ubuntu-hwe-kernel\n   ceph\n   migrate-ceph-to-rook\n\nGetting help\n============\n\nChannels\n--------\n\n* Join us on `IRC <irc://chat.oftc.net/openstack-helm>`_:\n  #openstack-helm on oftc\n* Join us on `Slack <http://slack.k8s.io/>`_ - #openstack-helm\n\nBugs and Feature requests\n-------------------------\n\nPlease, refer to\n`Contribution guidelines <../contributor/contributing.html>`_.\n"
  },
  {
    "path": "doc/source/troubleshooting/migrate-ceph-to-rook.rst",
    "content": "Migrating Ceph to Rook\n^^^^^^^^^^^^^^^^^^^^^^\n\nIt may be necessary or desired to migrate an existing `Ceph`_ cluster that was\noriginally deployed using the Ceph charts in `openstack-helm`_ to be\nmanaged by the Rook operator moving forward. This operation is not a supported\n`Rook`_ feature, but it is possible to achieve.\n\nThe procedure must completely stop and start all of the Ceph pods a few times\nduring the migration process and is therefore disruptive to Ceph operations,\nbut the result is a Ceph cluster deployed and managed by the Rook operator that\nmaintains the same cluster FSID as the original with all OSD data preserved.\n\nThe steps involved in migrating a legacy openstack-helm Ceph cluster to Rook\nare based on the Rook `Troubleshooting`_ documentation and are outlined below.\n\n#. Retrieve the cluster FSID, the name and IP address of an existing ceph-mon\n   host that contains a healthy monitor store, and the numbers of existing\n   ceph-mon and ceph-mgr pods from the existing Ceph cluster and save\n   this information for later.\n#. Rename CephFS pools so that they use the naming convention expected by Rook.\n   CephFS pools names are not customizable with Rook, so new metadata and data\n   pools will be created for any filesystems deployed by Rook if the existing\n   pools are not named as expected. Pools must be named\n   \"<filesystem name>-metadata\" and \"<filesystem name>-data\" in order for Rook to\n   use existing CephFS pools.\n#. Delete Ceph resources deployed via the openstack-helm charts,\n   uninstall the charts, and remove Ceph node labels.\n#. Add the Rook Helm repository and deploy the Rook operator and a minimal Ceph\n   cluster using the Rook Helm charts. The cluster will have a new FSID and will\n   not include any OSDs because the OSD disks are all initialized in the old Ceph\n   cluster and that will be recognized.\n#. Save the Ceph monitor keyring, host, and IP address, as well as the IP\n   address of the new monitor itself, for later use.\n#. Stop the Rook operator by scaling the rook-ceph-operator deployment in the\n   Rook operator namespace to zero replicas and destroy the newly-deployed Ceph\n   cluster by deleting all deployments in the Rook Ceph cluster namespace except\n   for rook-ceph-tools.\n#. Copy the store from an old monitor saved in the first step to the new\n   monitor and update its keyring with the key saved from the new monitor.\n#. Edit the monmap in the copied monitor store using monmaptool to remove all\n   of the old monitors from the monmap and add a single new one using the new\n   monitor's IP address.\n#. Edit the rook-ceph-mon secret in the Rook Ceph cluster namespace and\n   overwrite the base64-encoded FSID with the base64 encoding of the FSID from the\n   old Ceph cluster. Make sure the encoding includes the FSID only, no whitespace\n   or newline characters.\n#. Edit the rook-config-override configmap in the Rook Ceph cluster namespace\n   and set auth_supported, auth_client_required, auth_service_required, and\n   auth_cluster_required all to none in the global config section to disable\n   authentication in the Ceph cluster.\n#. Scale the rook-ceph-operator deployment back to one replica to deploy the\n   Ceph cluster again. This time, the cluster will have the old cluster's FSID\n   and the OSD pods will come up in the new cluster with their data intact.\n#. Using 'ceph auth import' from the rook-ceph-tools pod, import the\n   [client.admin] portion of the key saved from the new monitor from its initial\n   Rook deployment.\n#. Edit the rook-config-override configmap again and remove the settings\n   previously added to disable authentication. This will re-enable authentication\n   when Ceph daemons are restarted.\n#. Scale the rook-ceph-operator deployment to zero replicas and delete the Ceph\n   cluster deployments again to destroy the Ceph cluster one more time.\n#. When everything has terminated, immediately scale the rook-ceph-operator\n   deployment back to one to deploy the Ceph cluster one final time.\n#. After the Ceph cluster has been deployed again and all of the daemons are\n   running (with authentication enabled now), edit the deployed cephcluster\n   resource in the Rook Ceph cluster namespace to set the mon and mgr counts to\n   their original values saved in the first step.\n#. Now you have a Ceph cluster, deployed and managed by Rook, with the original\n   FSID and all of its data intact, with the same number of monitors and managers\n   that existed in the previous deployment.\n\nThere is a rudimentary `script`_ provided that automates this process. It\nisn't meant to be a complete solution and isn't supported as such. It is simply\nan example that is known to work for some test implementations.\n\nThe script makes use of environment variables to tell it which Rook release and\nCeph release to deploy, in which Kubernetes namespaces to deploy the Rook\noperator and Ceph cluster, and paths to YAML files that contain the necessary\ndefinitions for the Rook operator and Rook Ceph cluster. All of these have\ndefault values and are not required to be set, but it will likely be necessary\nat least to define paths to the YAML files required to deploy the Rook operator\nand Ceph cluster. Please refer to the comments near the top of the script for\nmore information about utilizing these environment variables.\n\nThe Ceph cluster definition provided to Rook should match the existing Ceph\ncluster as closely as possible. Otherwise, the migration may not migrate Ceph\ncluster resources as expected. The migration of deployed Ceph resources is\nunique to each deployment, so sample definitions are not provided here.\n\nMigrations using this procedure and/or script are very delicate and will\nrequire a lot of testing prior to being implemented in production. This is a\nrisky operation even with testing and should be performed very carefully.\n\n.. _Ceph: https://ceph.io\n.. _openstack-helm: https://opendev.org/openstack/openstack-helm\n.. _Rook: https://rook.io\n.. _Troubleshooting: https://rook.io/docs/rook/latest-release/Troubleshooting/disaster-recovery/#adopt-an-existing-rook-ceph-cluster-into-a-new-kubernetes-cluster\n.. _script: https://opendev.org/openstack/openstack-helm/src/tools/deployment/ceph/migrate-to-rook-ceph.sh\n"
  },
  {
    "path": "doc/source/troubleshooting/persistent-storage.rst",
    "content": "==================\nPersistent Storage\n==================\n\nThis guide is to help users debug any general storage issues when\ndeploying charts in this repository.\n\nCeph\n====\n\nCeph Deployment Status\n~~~~~~~~~~~~~~~~~~~~~~\n\nFirst, we want to validate that Ceph is working correctly. This\ncan be done with the following Ceph command:\n\n::\n\n    admin@kubenode01:~$ MON_POD=$(kubectl get --no-headers pods -n=ceph -l=\"application=ceph,component=mon\" | awk '{ print $1; exit }')\n    admin@kubenode01:~$ kubectl exec -n ceph ${MON_POD} -- ceph -s\n        cluster:\n          id:     06a191c7-81bd-43f3-b5dd-3d6c6666af71\n          health: HEALTH_OK\n\n        services:\n          mon: 1 daemons, quorum att.port.direct\n          mgr: att.port.direct(active)\n          mds: cephfs-1/1/1 up  {0=mds-ceph-mds-68c9c76d59-zqc55=up:active}\n          osd: 1 osds: 1 up, 1 in\n          rgw: 1 daemon active\n\n        data:\n          pools:   11 pools, 208 pgs\n          objects: 352 objects, 464 MB\n          usage:   62467 MB used, 112 GB / 173 GB avail\n          pgs:     208 active+clean\n\n        io:\n          client:   253 B/s rd, 39502 B/s wr, 1 op/s rd, 8 op/s wr\n    admin@kubenode01:~$\n\nUse one of your Ceph Monitors to check the status of the cluster. A\ncouple of things to note above; our health is ``HEALTH_OK``, we have 3\nmons, we've established a quorum, and we can see that all of our OSDs\nare up and in the OSD map.\n\nPVC Preliminary Validation\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nBefore proceeding, it is important to ensure that you have deployed a\nclient key in the namespace you wish to fulfill ``PersistentVolumeClaims``.\nTo verify that your deployment namespace has a client key:\n\n::\n\n    admin@kubenode01: $ kubectl get secret -n openstack pvc-ceph-client-key\n    NAME                  TYPE                DATA      AGE\n    pvc-ceph-client-key   kubernetes.io/rbd   1         8h\n\nWithout this, your RBD-backed PVCs will never reach the ``Bound`` state.  For\nmore information, see how to `activate namespace for ceph <../install/multinode.html#activating-control-plane-namespace-for-ceph>`_.\n\nNote: This step is not relevant for PVCs within the same namespace Ceph\nwas deployed.\n\nCeph Validating PVC Operation\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nTo validate persistent volume claim (PVC) creation, we've placed a test\nmanifest `here <https://raw.githubusercontent.com/openstack/openstack-helm/master/tests/pvc-test.yaml>`_.\nDeploy this manifest and verify the job completes successfully.\n\nCeph Validating StorageClass\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nNext we can look at the storage class, to make sure that it was created\ncorrectly:\n\n::\n\n    admin@kubenode01:~$ kubectl describe storageclass/general\n    Name:            general\n    IsDefaultClass:  No\n    Annotations:     <none>\n    Provisioner:     ceph.com/rbd\n    Parameters:      adminId=admin,adminSecretName=pvc-ceph-conf-combined-storageclass,adminSecretNamespace=ceph,imageFeatures=layering,imageFormat=2,monitors=ceph-mon.ceph.svc.cluster.local:6789,pool=rbd,userId=admin,userSecretName=pvc-ceph-client-key\n    ReclaimPolicy:   Delete\n    Events:          <none>\n    admin@kubenode01:~$\n\nThe parameters are what we're looking for here. If we see parameters\npassed to the StorageClass correctly, we will see the\n``ceph-mon.ceph.svc.cluster.local:6789`` hostname/port, things like ``userid``,\nand appropriate secrets used for volume claims.\n"
  },
  {
    "path": "doc/source/troubleshooting/ubuntu-hwe-kernel.rst",
    "content": "=================\nUbuntu HWE Kernel\n=================\n\nTo make use of CephFS in Ubuntu the HWE Kernel is required, until the issue\ndescribed `here <https://github.com/kubernetes-incubator/external-storage/issues/345>`_\nis fixed.\n\nInstallation\n============\n\nTo deploy the HWE kernel, prior to deploying Kubernetes and OpenStack-Helm\nthe following commands should be run on each node:\n\n.. code-block:: shell\n\n    #!/bin/bash\n    sudo -H apt-get update\n    sudo -H apt-get install -y linux-generic-hwe-16.04\n    sudo -H reboot now\n"
  },
  {
    "path": "doc/source/upgrade/index.rst",
    "content": "Upgrade\n=======\n\nContents:\n\n.. toctree::\n   :maxdepth: 2\n\n   multiple-osd-releases\n"
  },
  {
    "path": "doc/source/upgrade/multiple-osd-releases.rst",
    "content": "================================================================\nCeph - upgrade monolithic ceph-osd chart to multiple ceph charts\n================================================================\n\nThis document captures the steps  to  move from installed monolithic ceph-osd chart\nto mutlitple ceph osd charts.\n\nthis work will bring flexibility on site update as we will have more control on osds.\n\n\nInstall single ceph-osd chart:\n==============================\n\nstep 1: Setup:\n==============\n\n- Follow OSH single node or  multinode guide to bring up OSH environment.\n\n.. note::\n  we will have single ceph osd chart and here are the override values for ceph disks\n    osd:\n      - data:\n          type: block-logical\n          location: /dev/vdb\n        journal:\n          type: block-logical\n          location:  /dev/vda1\n      - data:\n          type: block-logical\n          location: /dev/vdc\n        journal:\n          type: block-logical\n          location:  /dev/vda2\n\n\nStep 2:  Setup the OSH environment and check ceph  cluster health\n=================================================================\n\n.. note::\n  Make sure we have healthy ceph cluster running\n\n``Ceph status:``\n\n.. code-block:: console\n\n  ubuntu@k8smaster:/opt/openstack-helm$ kubectl exec -n ceph ceph-mon-5qn68 -- ceph -s\n      cluster:\n        id:     61a4e07f-8b4a-4c47-8fc7-a0e7345ac0b0\n        health: HEALTH_OK\n\n      services:\n         mon: 3 daemons, quorum k8smaster,k8sslave1,k8sslave2\n         mgr: k8sslave2(active), standbys: k8sslave1\n         mds: cephfs-1/1/1 up  {0=mds-ceph-mds-5bf9fdfc6b-8nq4p=up:active}, 1 up:standby\n         osd: 6 osds: 6 up, 6 in\n      data:\n         pools:   18 pools, 186 pgs\n         objects: 377  objects, 1.2 GiB\n         usage:   4.2 GiB used, 116 GiB / 120 GiB avail\n         pgs:     186 active+clean\n\n- Ceph cluster is in HEALTH_OK state with 3 MONs and 6 OSDs.\n\n.. note::\n  Make sure we have single ceph osd chart only\n\n``Helm status:``\n\n.. code-block:: console\n\n  ubuntu@k8smaster:/opt/openstack-helm$  helm list | grep -i osd\n  ceph-osd            1     Tue Mar 26 03:21:07 2019        DEPLOYED        ceph-osd-vdb-0.1.0\n\n- single osd chart deployed sucessfully.\n\n\nUpgrade to multiple ceph osd charts:\n====================================\n\nstep 1: setup\n=============\n\n- create multiple ceph osd charts as per requirement\n\n.. note::\n  copy ceph-osd folder to multiple ceph osd charts  in openstack-helm folder\n\n.. code-block:: console\n\n  ubuntu@k8smaster:/opt/openstack-helm$  cp -r ceph-osd ceph-osd-vdb\n  ubuntu@k8smaster:/opt/openstack-helm$  cp -r ceph-osd ceph-osd-vdc\n\n.. note::\n  make sure  to correct chart name in each osd chart folder created above, need to\n  update it in  Charts.yaml .\n\n- create script to install multiple  ceph osd charts\n\n.. note::\n  create new installation scripts to reflect new ceph osd charts.\n\n.. code-block:: console\n\n  ubuntu@k8smaster:/opt/openstack-helm$  cp ./tools/deployment/multinode/030-ceph.sh\n  ./tools/deployment/multinode/030-ceph-osd-vdb.sh\n\n  ubuntu@k8smaster:/opt/openstack-helm$  cp ./tools/deployment/multinode/030-ceph.sh\n  ./tools/deployment/multinode/030-ceph-osd-vdc.sh\n\n.. note::\n  make sure to delete all other ceph charts from above scripts and have only new ceph osd chart.\n  and also have correct overrides as shown below.\n\n  example1: for CHART in ceph-osd-vdb; do\n  helm upgrade --install ${CHART} ${OSH_PATH}/${CHART} \\\n  --namespace=ceph \\\n  --values=/tmp/ceph.yaml \\\n  ${OSH_EXTRA_HELM_ARGS} \\\n  ${OSH_EXTRA_HELM_ARGS_CEPH_DEPLOY}\n\n  osd:\n    - data:\n        type: block-logical\n        location: /dev/vdb\n      journal:\n        type: block-logical\n        location:  /dev/vda1\n\n  example2: for CHART in ceph-osd-vdc; do\n  helm upgrade --install ${CHART} ${OSH_PATH}/${CHART} \\\n  --namespace=ceph \\\n  --values=/tmp/ceph.yaml \\\n  ${OSH_EXTRA_HELM_ARGS} \\\n  ${OSH_EXTRA_HELM_ARGS_CEPH_DEPLOY}\n\n  osd:\n    - data:\n        type: block-logical\n        location: /dev/vdc\n      journal:\n        type: block-logical\n        location:  /dev/vda2\n\nstep 2: Scale down  applications using ceph pvc\n===============================================\n\n.. note::\n  Scale down all the applications who are using pvcs so that we will not\n  have any writes on  ceph rbds .\n\n.. code-block:: console\n\n  ubuntu@k8smaster:/opt/openstack-helm$  sudo kubectl scale statefulsets -n openstack\n  mariadb-server --replicas=0\n\n  ubuntu@k8smaster:/opt/openstack-helm$  sudo kubectl scale statefulsets -n openstack\n  rabbitmq-rabbitmq --replicas=0\n\n- just gave one example but we need to do it for all the applications using pvcs\n\n\nstep 3: Setup ceph cluster flags to prevent rebalance\n=====================================================\n\n.. note::\n  setup few flags on ceph cluster to prevent rebalance during this process.\n\n.. code-block:: console\n\n  ubuntu@k8smaster:/opt/openstack-helm$ kubectl exec -n ceph ceph-mon-5qn68 -- ceph osd set\n  noout\n\n  ubuntu@k8smaster:/opt/openstack-helm$ kubectl exec -n ceph ceph-mon-5qn68 -- ceph osd set\n  nobackfill\n\n  ubuntu@k8smaster:/opt/openstack-helm$ kubectl exec -n ceph ceph-mon-5qn68 -- ceph osd set\n  norecover\n\n  ubuntu@k8smaster:/opt/openstack-helm$ kubectl exec -n ceph ceph-mon-5qn68 -- ceph osd set\n  pause\n\nstep 4: Delete single ceph-osd chart\n====================================\n\n.. note::\n  Delete the single ceph-osd chart.\n\n\n.. code-block:: console\n\n  ubuntu@k8smaster:/opt/openstack-helm$ helm delete --purge ceph-osd\n\n\nstep 5: install new ceph-osd charts\n===================================\n\n.. note::\n  Now we can install multiple ceph osd releases.\n\n\n.. code-block:: console\n\n  ubuntu@k8smaster:/opt/openstack-helm$ ./tools/deployment/multinode/030-ceph-osd-vdb.sh\n  ubuntu@k8smaster:/opt/openstack-helm$ ./tools/deployment/multinode/030-ceph-osd-vdc.sh\n  ubuntu@k8smaster:/opt/openstack-helm# helm list | grep -i osd\n  ceph-osd-vdb            1            Tue Mar 26 03:21:07 2019        DEPLOYED  ceph-osd-vdb-0.1.0\n  ceph-osd-vdc            1            Tue Mar 26 03:22:13 2019        DEPLOYED  ceph-osd-vdc-0.1.0\n\n- wait and check for healthy ceph cluster , if there are any issues need to sort out until we see\n  healthy ceph cluster.\n\nstep 6: Unset ceph cluster flags\n================================\n\n.. note::\n  unset the flags we set on the ceph cluster in above steps.\n\n\n.. code-block:: console\n\n  ubuntu@k8smaster:/opt/openstack-helm$ kubectl exec -n ceph ceph-mon-5qn68 -- ceph osd unset\n  noout\n\n  ubuntu@k8smaster:/opt/openstack-helm$ kubectl exec -n ceph ceph-mon-5qn68 -- ceph osd unset\n  nobackfill\n\n  ubuntu@k8smaster:/opt/openstack-helm$ kubectl exec -n ceph ceph-mon-5qn68 -- ceph osd unset\n  norecover\n\n  ubuntu@k8smaster:/opt/openstack-helm$ kubectl exec -n ceph ceph-mon-5qn68 -- ceph osd unset\n  pause\n\nstep 7: Scale up the applications using pvc\n===========================================\n\n.. note::\n  Since ceph cluster is back to healthy status, now scale up the applications.\n\n\n.. code-block:: console\n\n  ubuntu@k8smaster:/opt/openstack-helm$  sudo kubectl scale statefulsets -n openstack\n  mariadb-server --replicas=3\n\n  ubuntu@k8smaster:/opt/openstack-helm$  sudo kubectl scale statefulsets -n openstack\n  rabbitmq-rabbitmq --replicas=3\n"
  },
  {
    "path": "elastic-apm-server/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v6.2.3\ndescription: OpenStack-Helm Elastic APM Server\nname: elastic-apm-server\nversion: 2025.2.0\nhome: https://www.elastic.co/guide/en/apm/get-started/current/index.html\nsources:\n  - https://github.com/elastic/apm-server\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit/\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "elastic-apm-server/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: elastic-apm-server-bin\ndata:\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "elastic-apm-server/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: elastic-apm-server-etc\ndata:\n  apm-server.yml: |\n{{ toYaml .Values.conf.apm_server | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "elastic-apm-server/templates/deployment.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment }}\n{{- $envAll := . }}\n{{- $esUserSecret := .Values.secrets.elasticsearch.user }}\n\n{{- $mounts_elastic_apm_server := .Values.pod.mounts.elastic_apm_server.elastic_apm_server }}\n\n{{- $serviceAccountName := printf \"%s-%s\" .Release.Name \"elastic-apm-server\" }}\n{{ tuple $envAll \"elastic-apm-server\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ .Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $serviceAccountName }}\n  apiGroup: rbac.authorization.k8s.io\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n- apiGroups: [\"\"]\n  resources:\n  - namespaces\n  - pods\n  verbs:\n  - get\n  - list\n  - watch\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: elastic-apm-server\n  labels:\n{{ tuple $envAll \"elastic-apm-server\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"elastic-apm-server\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"elastic-apm-server\" | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"elastic-apm-server\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"elastic-apm-server\" \"containerNames\" (list \"elastic-apm-server\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      replicas: {{ .Values.pod.replicas.elastic_apm_server }}\n      serviceAccountName: {{ $serviceAccountName }}\n      nodeSelector:\n        {{ .Values.labels.elastic_apm_server.node_selector_key }}: {{ .Values.labels.elastic_apm_server.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"elastic_apm_server\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: elastic-apm-server\n          image: {{ .Values.images.tags.elastic_apm_server }}\n          imagePullPolicy: {{ .Values.images.pull_policy }}\n          securityContext:\n            runAsUser: 0\n{{ tuple $envAll $envAll.Values.pod.resources.elastic_apm_server | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          args:\n            - \"-c\"\n            - \"/usr/share/apm-server/apm-server.yml\"\n            - \"-e\"\n          ports:\n            - name: server\n              containerPort: {{ tuple \"elastic_apm_server\" \"internal\" \"server\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          env:\n            - name: ELASTICSEARCH_HOST\n              value: {{ tuple \"elasticsearch\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" | quote }}\n            - name: ELASTICSEARCH_PORT\n              value: {{ tuple \"elasticsearch\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: APM_SERVER_HOST\n              value: {{ tuple \"elastic_apm_server\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" | quote }}\n            - name: APM_SERVER_PORT\n              value: {{ tuple \"elastic_apm_server\" \"internal\" \"server\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: ELASTICSEARCH_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_USERNAME\n            - name: ELASTICSEARCH_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_PASSWORD\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: elastic-apm-server-etc\n              mountPath: /usr/share/apm-server/apm-server.yml\n              readOnly: true\n              subPath: apm-server.yml\n            - name: data\n              mountPath: /usr/share/apm-server/data\n{{ if $mounts_elastic_apm_server.volumeMounts }}{{ toYaml $mounts_elastic_apm_server.volumeMounts | indent 8 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: elastic-apm-server-etc\n          configMap:\n            name: elastic-apm-server-etc\n            defaultMode: 0444\n        - name: data\n          hostPath:\n            path: /var/lib/elastic-apm-server\n            type: DirectoryOrCreate\n{{ if $mounts_elastic_apm_server.volumes }}{{ toYaml $mounts_elastic_apm_server.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "elastic-apm-server/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "elastic-apm-server/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"filebeat\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "elastic-apm-server/templates/secret-elasticsearch-creds.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_elasticsearch }}\n{{- $envAll := . }}\n{{- $secretName := index $envAll.Values.secrets.elasticsearch.user }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n  ELASTICSEARCH_USERNAME: {{ .Values.endpoints.elasticsearch.auth.admin.username | b64enc }}\n  ELASTICSEARCH_PASSWORD: {{ .Values.endpoints.elasticsearch.auth.admin.password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "elastic-apm-server/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "elastic-apm-server/templates/service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"elastic_apm_server\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: server\n    port: {{ tuple \"elastic_apm_server\" \"internal\" \"server\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.elastic_apm_server.node_port.enabled }}\n    nodePort: {{ .Values.network.elastic_apm_server.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"elastic-apm-server\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.elastic_apm_server.node_port.enabled }}\n  type: NodePort\n  {{ end }}\n"
  },
  {
    "path": "elastic-apm-server/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for elastic-apm-server\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nrelease_group: null\n\nlabels:\n  elastic_apm_server:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  tags:\n    elastic_apm_server: docker.elastic.co/apm/apm-server:6.2.3\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nsecrets:\n  elasticsearch:\n    user: elastic-apm-server-elasticsearch-user\n  oci_image_registry:\n    elastic-apm-server: elastic-apm-server-oci-image-registry\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - elastic-apm-server-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    elastic_apm_server:\n      services: null\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\nconf:\n  apm_server:\n    setup:\n      dashboards:\n        enabled: true\n    host: ['${APM_SERVER_HOST}:${APM_SERVER_PORT}']\n    output:\n      elasticsearch:\n        hosts: [\"${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}\"]\n        username: \"${ELASTICSEARCH_USERNAME}\"\n        password: \"${ELASTICSEARCH_PASSWORD}\"\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      elastic-apm-server:\n        username: elastic-apm-server\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  elasticsearch:\n    namespace: null\n    name: elasticsearch\n    auth:\n      admin:\n        username: admin\n        password: changeme\n    hosts:\n      data: elasticsearch-data\n      default: elasticsearch-logging\n      discovery: elasticsearch-discovery\n      public: elasticsearch\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      http:\n        default: 80\n  elastic_apm_server:\n    namespace: null\n    name: apm-server\n    hosts:\n      default: apm-server\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      server:\n        default: 8200\n\npod:\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n  lifecycle:\n    upgrades:\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        elastic_apm_server:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n  replicas:\n    elastic_apm_server: 1\n  resources:\n    elastic_apm_server:\n      enabled: false\n      limits:\n        memory: '400Mi'\n        cpu: '400m'\n      requests:\n        memory: '100Mi'\n        cpu: '100m'\n  mounts:\n    elastic_apm_server:\n      elastic_apm_server:\n\nnetwork:\n  elastic_apm_server:\n    node_port:\n      enabled: false\n      port: 30200\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  deployment: true\n  service: true\n  job_image_repo_sync: true\n  secret_elasticsearch: true\n  secret_registry: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "elastic-filebeat/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v7.1.0\ndescription: OpenStack-Helm Elastic Filebeat\nname: elastic-filebeat\nversion: 2025.2.0\nhome: https://www.elastic.co/products/beats/filebeat\nsources:\n  - https://github.com/elastic/beats/tree/master/filebeat\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit/\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "elastic-filebeat/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: filebeat-bin\ndata:\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "elastic-filebeat/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: filebeat-etc\ndata:\n  filebeat.yml: |\n{{ toYaml .Values.conf.filebeat | indent 4 }}\n  system.yml: |\n{{ toYaml .Values.conf.modules.system | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "elastic-filebeat/templates/daemonset.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.daemonset }}\n{{- $envAll := . }}\n{{- $esUserSecret := .Values.secrets.elasticsearch.user }}\n\n{{- $mounts_filebeat := .Values.pod.mounts.filebeat.filebeat }}\n\n{{- $serviceAccountName := printf \"%s-%s\" .Release.Name \"filebeat\" }}\n{{ tuple $envAll \"filebeat\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ .Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $serviceAccountName }}\n  apiGroup: rbac.authorization.k8s.io\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - namespaces\n      - nodes\n      - pods\n      - services\n      - endpoints\n      - replicationcontrollers\n      - limitranges\n    verbs:\n      - get\n      - list\n      - watch\n  - apiGroups:\n      - apps\n    resources:\n      - statefulsets\n      - daemonsets\n      - deployments\n      - replicasets\n    verbs:\n      - get\n      - list\n      - watch\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: filebeat\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"filebeat\" \"daemon\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"filebeat\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"filebeat\" \"daemon\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"filebeat\" \"containerNames\" (list \"filebeat\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ if $envAll.Values.pod.tolerations.filebeat.enabled }}\n{{ tuple $envAll \"filebeat\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ else }}\n      nodeSelector:\n        {{ .Values.labels.filebeat.node_selector_key }}: {{ .Values.labels.filebeat.node_selector_value | quote }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"filebeat\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: filebeat\n          image: {{ .Values.images.tags.filebeat }}\n          imagePullPolicy: {{ .Values.images.pull_policy }}\n          securityContext:\n            runAsUser: 0\n{{ tuple $envAll $envAll.Values.pod.resources.filebeat | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          args:\n            - \"-e\"\n          ports:\n            - name: filebeat\n              containerPort: {{ tuple \"filebeat\" \"internal\" \"service\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          env:\n            - name: NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            - name: ELASTICSEARCH_HOST\n              value: {{ tuple \"elasticsearch\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" | quote }}\n            - name: ELASTICSEARCH_PORT\n              value: {{ tuple \"elasticsearch\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: KIBANA_HOST\n              value: {{ tuple \"kibana\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" | quote }}\n            - name: KIBANA_PORT\n              value: {{ tuple \"kibana\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: ELASTICSEARCH_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_USERNAME\n            - name: ELASTICSEARCH_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_PASSWORD\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: data\n              mountPath: /usr/share/filebeat/data\n            - name: varlog\n              mountPath: /var/log\n            - name: varlibdockercontainers\n              mountPath: /var/lib/docker/containers\n              readOnly: true\n            - name: filebeat-etc\n              mountPath: /usr/share/filebeat/filebeat.yml\n              readOnly: true\n              subPath: filebeat.yml\n            - name: filebeat-etc\n              mountPath: /usr/share/filebeat/modules.d/system.yml\n              subPath: system.yml\n              readOnly: true\n{{ if $mounts_filebeat.volumeMounts }}{{ toYaml $mounts_filebeat.volumeMounts | indent 8 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: varlog\n          hostPath:\n            path: /var/log\n        - name: varlibdockercontainers\n          hostPath:\n            path: /var/lib/docker/containers\n        - name: filebeat-etc\n          configMap:\n            name: filebeat-etc\n            defaultMode: 0444\n        - name: data\n          hostPath:\n            path: /var/lib/filebeat\n            type: DirectoryOrCreate\n{{ if $mounts_filebeat.volumes }}{{ toYaml $mounts_filebeat.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "elastic-filebeat/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "elastic-filebeat/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"filebeat\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "elastic-filebeat/templates/secret-elasticsearch-creds.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_elasticsearch }}\n{{- $envAll := . }}\n{{- $secretName := index $envAll.Values.secrets.elasticsearch.user }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n  ELASTICSEARCH_USERNAME: {{ .Values.endpoints.elasticsearch.auth.admin.username | b64enc }}\n  ELASTICSEARCH_PASSWORD: {{ .Values.endpoints.elasticsearch.auth.admin.password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "elastic-filebeat/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "elastic-filebeat/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for filebeat\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nrelease_group: null\n\nlabels:\n  filebeat:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  tags:\n    filebeat: docker.elastic.co/beats/filebeat-oss:7.1.0\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nsecrets:\n  elasticsearch:\n    user: filebeat-elasticsearch-user\n  oci_image_registry:\n    elastic-filebeat: elastic-filebeat-oci-image-registry-key\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - filebeat-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    filebeat:\n      services:\n        - endpoint: internal\n          service: kibana\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\nconf:\n  filebeat:\n    setup:\n      dashboards:\n        enabled: true\n        index: \"filebeat-*\"\n        retry:\n          enabled: true\n          interval: 5\n      kibana:\n        host: \"${KIBANA_HOST}:${KIBANA_PORT}\"\n        username: \"${ELASTICSEARCH_USERNAME}\"\n        password: \"${ELASTICSEARCH_PASSWORD}\"\n    path:\n      logs: /var/log/\n    output:\n      elasticsearch:\n        hosts: [\"${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/\"]\n        username: \"${ELASTICSEARCH_USERNAME}\"\n        password: \"${ELASTICSEARCH_PASSWORD}\"\n    filebeat:\n      config:\n        modules:\n          path: ${path.config}/modules.d/*.yml\n          reload:\n            enabled: true\n      autodiscover:\n        providers:\n          - type: kubernetes\n            templates:\n              - condition:\n                  equals:\n                    kubernetes.namespace: kube-system\n                config:\n                  - type: docker\n                    containers.ids:\n                      - \"${data.kubernetes.container.id}\"\n                    exclude_lines: [\"^\\\\s+[\\\\-`('.|_]\"]\n          - type: kubernetes\n            templates:\n              - condition:\n                  equals:\n                    kubernetes.namespace: ceph\n                config:\n                  - type: docker\n                    containers.ids:\n                      - \"${data.kubernetes.container.id}\"\n                    exclude_lines: [\"^\\\\s+[\\\\-`('.|_]\"]\n          - type: kubernetes\n            templates:\n              - condition:\n                  equals:\n                    kubernetes.namespace: openstack\n                config:\n                  - type: docker\n                    containers.ids:\n                      - \"${data.kubernetes.container.id}\"\n                    exclude_lines: [\"^\\\\s+[\\\\-`('.|_]\"]\n          - type: kubernetes\n            templates:\n              - condition:\n                  equals:\n                    kubernetes.namespace: osh-infra\n                config:\n                  - type: docker\n                    containers.ids:\n                      - \"${data.kubernetes.container.id}\"\n                    exclude_lines: [\"^\\\\s+[\\\\-`('.|_]\"]\n      processors:\n        - add_kubernetes_metadata:\n            in_cluster: true\n        - drop_event:\n            when:\n              equals:\n                kubernetes:\n                  container:\n                    name: \"filebeat\"\n  modules:\n    system:\n      - module: system\n        syslog:\n          enabled: true\n          var.paths: [\"/var/log/syslog*\"]\n          fields:\n            host:\n              name: \"${NODE_NAME}\"\n        auth:\n          enabled: true\n          var.paths: [\"/var/log/auth.log\"]\n          fields:\n            host:\n              name: \"${NODE_NAME}\"\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      elastic-filebeat:\n        username: elastic-filebeat\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  elasticsearch:\n    namespace: null\n    name: elasticsearch\n    auth:\n      admin:\n        username: admin\n        password: changeme\n    hosts:\n      data: elasticsearch-data\n      default: elasticsearch-logging\n      discovery: elasticsearch-discovery\n      public: elasticsearch\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      http:\n        default: 80\n  kibana:\n    name: kibana\n    namespace: null\n    hosts:\n      default: kibana-dash\n      public: kibana\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      kibana:\n        default: 5601\n      http:\n        default: 80\n  filebeat:\n    namespace: null\n    name: filebeat\n    hosts:\n      default: filebeat\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      service:\n        default: 5066\n\npod:\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  lifecycle:\n    upgrades:\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        filebeat:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n  resources:\n    filebeat:\n      enabled: false\n      limits:\n        memory: '400Mi'\n        cpu: '400m'\n      requests:\n        memory: '100Mi'\n        cpu: '100m'\n  tolerations:\n    filebeat:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n      - key: node-role.kubernetes.io/node\n        operator: Exists\n  mounts:\n    filebeat:\n      filebeat:\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  daemonset: true\n  job_image_repo_sync: true\n  secret_elasticsearch: true\n  secret_registry: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "elastic-metricbeat/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v7.1.0\ndescription: OpenStack-Helm Elastic Metricbeat\nname: elastic-metricbeat\nversion: 2025.2.0\nhome: https://www.elastic.co/products/beats/metricbeat\nsources:\n  - https://github.com/elastic/beats/tree/master/metricbeat\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit/\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "elastic-metricbeat/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: metricbeat-etc\ndata:\n  metricbeat.yml: |\n{{ toYaml .Values.conf.metricbeat | indent 4 }}\n  rabbitmq.yml: |\n{{ toYaml .Values.conf.modules.rabbitmq | indent 4 }}\n  mysql.yml: |\n{{ toYaml .Values.conf.modules.mysql | indent 4 }}\n  system.yml: |\n{{ toYaml .Values.conf.modules.system | indent 4 }}\n  daemonset_kubernetes.yml: |\n{{ toYaml .Values.conf.modules.daemonset_kubernetes | indent 4 }}\n  deployment_kubernetes.yml: |\n{{ toYaml .Values.conf.modules.deployment_kubernetes | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "elastic-metricbeat/templates/daemonset-node-metrics.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.daemonset }}\n{{- $envAll := . }}\n{{- $esUserSecret := .Values.secrets.elasticsearch.user }}\n\n{{- $mounts_metricbeat := .Values.pod.mounts.metricbeat.metricbeat }}\n\n{{- $serviceAccountName := printf \"%s-%s\" .Release.Name \"metricbeat\" }}\n{{ tuple $envAll \"metricbeat\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ .Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $serviceAccountName }}\n  apiGroup: rbac.authorization.k8s.io\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - namespaces\n      - nodes\n      - pods\n      - services\n      - endpoints\n      - replicationcontrollers\n      - limitranges\n      - events\n    verbs:\n      - get\n      - list\n      - watch\n  - apiGroups:\n      - apps\n    resources:\n      - statefulsets\n      - daemonsets\n      - deployments\n      - replicasets\n    verbs:\n      - get\n      - list\n      - watch\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: metricbeat-node-modules\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"metricbeat\" \"daemon\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"metricbeat\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"metricbeat\" \"daemon\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n      hostNetwork: true\n      dnsPolicy: {{ .Values.pod.dns_policy }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ if $envAll.Values.pod.tolerations.metricbeat.enabled }}\n{{ tuple $envAll \"metricbeat\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ else }}\n      nodeSelector:\n        {{ .Values.labels.metricbeat.node_selector_key }}: {{ .Values.labels.metricbeat.node_selector_value | quote }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"metricbeat\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: metricbeat\n          securityContext:\n            privileged: true\n            runAsUser: 0\n          image: {{ .Values.images.tags.metricbeat }}\n          imagePullPolicy: {{ .Values.images.pull_policy }}\n{{ tuple $envAll $envAll.Values.pod.resources.metricbeat | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          args:\n            - \"-c\"\n            - \"/usr/share/metricbeat/metricbeat.yml\"\n            - \"-e\"\n            - \"-system.hostfs=/hostfs\"\n          env:\n            - name: ELASTICSEARCH_HOST\n              value: {{ tuple \"elasticsearch\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" | quote }}\n            - name: ELASTICSEARCH_PORT\n              value: {{ tuple \"elasticsearch\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: KIBANA_HOST\n              value: {{ tuple \"kibana\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" | quote }}\n            - name: KIBANA_PORT\n              value: {{ tuple \"kibana\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: ELASTICSEARCH_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_USERNAME\n            - name: ELASTICSEARCH_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_PASSWORD\n            - name: POD_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: metricbeat-etc\n              mountPath: /usr/share/metricbeat/metricbeat.yml\n              subPath: metricbeat.yml\n              readOnly: true\n            - name: metricbeat-etc\n              mountPath: /usr/share/metricbeat/modules.d/system.yml\n              subPath: system.yml\n              readOnly: true\n            - name: metricbeat-etc\n              mountPath: /usr/share/metricbeat/modules.d/kubernetes.yml\n              subPath: daemonset_kubernetes.yml\n              readOnly: true\n            - name: dockersock\n              mountPath: /var/run/docker.sock\n            - name: proc\n              mountPath: /hostfs/proc\n              readOnly: true\n            - name: cgroup\n              mountPath: /hostfs/sys/fs/cgroup\n              readOnly: true\n{{ if $mounts_metricbeat.volumeMounts }}{{ toYaml $mounts_metricbeat.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: proc\n          hostPath:\n            path: /proc\n        - name: cgroup\n          hostPath:\n            path: /sys/fs/cgroup\n        - name: dockersock\n          hostPath:\n            path: /var/run/docker.sock\n        - name: metricbeat-etc\n          configMap:\n            defaultMode: 0444\n            name: metricbeat-etc\n        - name: data\n          emptyDir: {}\n{{ if $mounts_metricbeat.volumes }}{{ toYaml $mounts_metricbeat.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "elastic-metricbeat/templates/deployment-modules.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment }}\n{{- $envAll := . }}\n\n{{- $esUserSecret := .Values.secrets.elasticsearch.user }}\n\n{{- $serviceAccountName := printf \"%s-%s\" .Release.Name \"metricbeat-deployments\" }}\n{{ tuple $envAll \"metricbeat\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ .Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $serviceAccountName }}\n  apiGroup: rbac.authorization.k8s.io\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - namespaces\n      - nodes\n      - pods\n      - services\n      - endpoints\n      - replicationcontrollers\n      - limitranges\n      - events\n    verbs:\n      - get\n      - list\n      - watch\n  - apiGroups:\n      - apps\n    resources:\n      - statefulsets\n      - daemonsets\n      - deployments\n      - replicasets\n    verbs:\n      - get\n      - list\n      - watch\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: metricbeat-deployment-modules\n  labels:\n{{ tuple $envAll \"metricbeat\" \"deployment-modules\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.metricbeat }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"metricbeat\" \"deployment-modules\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"metricbeat\" \"deployment-modules\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"metricbeat\" \"deployment-modules\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.metricbeat.node_selector_key }}: {{ .Values.labels.metricbeat.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"metricbeat\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: metricbeat\n          securityContext:\n            runAsUser: 0\n{{ tuple $envAll \"metricbeat\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.metricbeat | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          args:\n            - \"-c\"\n            - \"/usr/share/metricbeat/metricbeat.yml\"\n            - \"-e\"\n          env:\n            - name: ELASTICSEARCH_HOST\n              value: {{ tuple \"elasticsearch\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" | quote }}\n            - name: ELASTICSEARCH_PORT\n              value: {{ tuple \"elasticsearch\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: KUBE_STATE_METRICS_HOST\n              value: {{ tuple \"kube_state_metrics\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" | quote }}\n            - name: KUBE_STATE_METRICS_PORT\n              value: {{ tuple \"kube_state_metrics\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: KIBANA_HOST\n              value: {{ tuple \"kibana\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" | quote }}\n            - name: KIBANA_PORT\n              value: {{ tuple \"kibana\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: ELASTICSEARCH_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_USERNAME\n            - name: ELASTICSEARCH_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_PASSWORD\n            - name: POD_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: metricbeat-etc\n              mountPath: /usr/share/metricbeat/metricbeat.yml\n              subPath: metricbeat.yml\n              readOnly: true\n            - name: metricbeat-etc\n              mountPath: /usr/share/metricbeat/modules.d/kubernetes.yml\n              subPath: deployment_kubernetes.yml\n              readOnly: true\n            - name: metricbeat-etc\n              mountPath: /usr/share/metricbeat/modules.d/mysql.yml\n              subPath: mysql.yml\n              readOnly: true\n            - name: metricbeat-etc\n              mountPath: /usr/share/metricbeat/modules.d/rabbitmq.yml\n              subPath: rabbitmq.yml\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: metricbeat-etc\n          configMap:\n            name: metricbeat-etc\n            defaultMode: 0444\n{{- end }}\n"
  },
  {
    "path": "elastic-metricbeat/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "elastic-metricbeat/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"metricbeat\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "elastic-metricbeat/templates/secret-elasticsearch-creds.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_elasticsearch }}\n{{- $envAll := . }}\n{{- $secretName := index $envAll.Values.secrets.elasticsearch.user }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n  ELASTICSEARCH_USERNAME: {{ .Values.endpoints.elasticsearch.auth.admin.username | b64enc }}\n  ELASTICSEARCH_PASSWORD: {{ .Values.endpoints.elasticsearch.auth.admin.password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "elastic-metricbeat/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "elastic-metricbeat/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for metricbeat\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nrelease_group: null\n\nlabels:\n  metricbeat:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  tags:\n    metricbeat: docker.elastic.co/beats/metricbeat-oss:7.1.0\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nsecrets:\n  elasticsearch:\n    user: metricbeat-elasticsearch-user\n  oci_image_registry:\n    elastic-metricbeat: elastic-metricbeat-oci-image-registry-key\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - metricbeat-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    metricbeat:\n      services:\n        - endpoint: internal\n          service: kibana\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\nconf:\n  metricbeat:\n    setup:\n      dashboards:\n        enabled: true\n        index: metricbeat-*\n        retry:\n          enabled: true\n          interval: 5\n      kibana:\n        host: \"${KIBANA_HOST}:${KIBANA_PORT}\"\n        username: \"${ELASTICSEARCH_USERNAME}\"\n        password: \"${ELASTICSEARCH_PASSWORD}\"\n    metricbeat:\n      config:\n        modules:\n          path: ${path.config}/modules.d/*.yml\n          reload:\n            enabled: true\n    output:\n      elasticsearch:\n        hosts: ['${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}']\n        username: ${ELASTICSEARCH_USERNAME}\n        password: ${ELASTICSEARCH_PASSWORD}\n  modules:\n    docker:\n      - module: docker\n        metricsets:\n          - \"container\"\n          - \"cpu\"\n          - \"diskio\"\n          - \"healthcheck\"\n          - \"info\"\n          - \"image\"\n          - \"memory\"\n          - \"network\"\n        hosts: [\"unix:///var/run/docker.sock\"]\n        period: 10s\n        enabled: true\n    system:\n      - module: system\n        period: 10s\n        metricsets:\n          - cpu\n          - load\n          - memory\n          - network\n          - process\n          - process_summary\n          - core\n          - diskio\n          - socket\n          - filesystem\n          - fsstat\n        processes: ['.*']\n        cpu.metrics: [\"percentages\"]\n        core.metrics: [\"percentages\"]\n        process.include_top_n:\n          by_cpu: 5\n          by_memory: 5\n        enabled: true\n    daemonset_kubernetes:\n      - module: kubernetes\n        metricsets:\n          - node\n          - system\n          - pod\n          - container\n          - volume\n        period: 10s\n        hosts: [\"localhost:10255\"]\n        add_metadata: true\n        in_cluster: true\n        enabled: true\n    deployment_kubernetes:\n      - module: kubernetes\n        metricsets:\n          - state_node\n          - state_deployment\n          - state_replicaset\n          - state_pod\n          - state_container\n          - event\n        period: 10s\n        hosts: ['${KUBE_STATE_METRICS_HOST}:${KUBE_STATE_METRICS_PORT}']\n        add_metadata: true\n        in_cluster: true\n        enabled: true\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      elastic-metricbeat:\n        username: elastic-metricbeat\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  kube_state_metrics:\n    namespace: null\n    hosts:\n      default: kube-state-metrics\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      metrics:\n        default: 8080\n  elasticsearch:\n    namespace: null\n    name: elasticsearch\n    auth:\n      admin:\n        username: admin\n        password: changeme\n    hosts:\n      data: elasticsearch-data\n      default: elasticsearch-logging\n      discovery: elasticsearch-discovery\n      public: elasticsearch\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      http:\n        default: 80\n  kibana:\n    name: kibana\n    namespace: osh-infra\n    hosts:\n      default: kibana-dash\n      public: kibana\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      kibana:\n        default: 5601\n      http:\n        default: 80\n\npod:\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  lifecycle:\n    upgrades:\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        metricbeat:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n  dns_policy: \"ClusterFirstWithHostNet\"\n  replicas:\n    metricbeat: 1\n  resources:\n    metricbeat:\n      enabled: false\n      limits:\n        memory: '400Mi'\n        cpu: '400m'\n      requests:\n        memory: '100Mi'\n        cpu: '100m'\n  tolerations:\n    metricbeat:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n      - key: node-role.kubernetes.io/node\n        operator: Exists\n  mounts:\n    metricbeat:\n      metricbeat:\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  daemonset: true\n  deployment: true\n  job_image_repo_sync: true\n  secret_elasticsearch: true\n  secret_registry: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "elastic-packetbeat/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v7.1.0\ndescription: OpenStack-Helm Elastic Packetbeat\nname: elastic-packetbeat\nversion: 2025.2.0\nhome: https://www.elastic.co/products/beats/packetbeat\nsources:\n  - https://github.com/elastic/beats/tree/master/packetbeat\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit/\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "elastic-packetbeat/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: packetbeat-etc\ndata:\n  packetbeat.yml: |\n{{ toYaml .Values.conf.packetbeat | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "elastic-packetbeat/templates/daemonset.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.daemonset }}\n{{- $envAll := . }}\n{{- $esUserSecret := .Values.secrets.elasticsearch.user }}\n\n{{- $mounts_packetbeat := .Values.pod.mounts.packetbeat.packetbeat }}\n\n{{- $serviceAccountName := printf \"%s-%s\" .Release.Name \"packetbeat\" }}\n{{ tuple $envAll \"packetbeat\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ .Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $serviceAccountName }}\n  apiGroup: rbac.authorization.k8s.io\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - namespaces\n      - nodes\n      - pods\n      - services\n      - endpoints\n      - replicationcontrollers\n      - limitranges\n    verbs:\n      - get\n      - list\n      - watch\n  - apiGroups:\n      - apps\n    resources:\n      - statefulsets\n      - daemonsets\n      - deployments\n      - replicasets\n    verbs:\n      - get\n      - list\n      - watch\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: packetbeat\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"packetbeat\" \"daemon\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"packetbeat\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"packetbeat\" \"daemon\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n      securityContext:\n        runAsUser: 0\n      hostNetwork: true\n      dnsPolicy: {{ .Values.pod.dns_policy }}\n      serviceAccountName: {{ $serviceAccountName }}\n      initContainers:\n{{ tuple $envAll \"packetbeat\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: packetbeat\n          image: {{ .Values.images.tags.packetbeat }}\n          imagePullPolicy: {{ .Values.images.pull_policy }}\n{{ tuple $envAll $envAll.Values.pod.resources.packetbeat | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          securityContext:\n            privileged: true\n            capabilities:\n              add:\n                - NET_ADMIN\n          args:\n            - \"-c\"\n            - \"/usr/share/packetbeat/packetbeat.yml\"\n            - \"-e\"\n          env:\n            - name: ELASTICSEARCH_HOST\n              value: {{ tuple \"elasticsearch\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" | quote }}\n            - name: ELASTICSEARCH_PORT\n              value: {{ tuple \"elasticsearch\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: KIBANA_HOST\n              value: {{ tuple \"kibana\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" | quote }}\n            - name: KIBANA_PORT\n              value: {{ tuple \"kibana\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: ELASTICSEARCH_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_USERNAME\n            - name: ELASTICSEARCH_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_PASSWORD\n            - name: NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: packetbeat-etc\n              mountPath: /usr/share/packetbeat/packetbeat.yml\n              subPath: packetbeat.yml\n              readOnly: true\n{{ if $mounts_packetbeat.volumeMounts }}{{ toYaml $mounts_packetbeat.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: packetbeat-etc\n          configMap:\n            defaultMode: 0444\n            name: packetbeat-etc\n{{ if $mounts_packetbeat.volumes }}{{ toYaml $mounts_packetbeat.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "elastic-packetbeat/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "elastic-packetbeat/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"metricbeat\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "elastic-packetbeat/templates/secret-elasticsearch-creds.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_elasticsearch }}\n{{- $envAll := . }}\n{{- $secretName := index $envAll.Values.secrets.elasticsearch.user }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n  ELASTICSEARCH_USERNAME: {{ .Values.endpoints.elasticsearch.auth.admin.username | b64enc }}\n  ELASTICSEARCH_PASSWORD: {{ .Values.endpoints.elasticsearch.auth.admin.password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "elastic-packetbeat/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "elastic-packetbeat/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for packetbeat\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nrelease_group: null\n\nlabels:\n  packetbeat:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  tags:\n    packetbeat: docker.elastic.co/beats/packetbeat-oss:7.1.0\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nsecrets:\n  elasticsearch:\n    user: packetbeat-elasticsearch-user\n  oci_image_registry:\n    elastic-packetbeat: elastic-packetbeat-oci-image-registry-key\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - packetbeat-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    packetbeat:\n      services: null\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\nconf:\n  packetbeat:\n    setup:\n      kibana:\n        host: \"${KIBANA_HOST}:${KIBANA_PORT}\"\n        username: \"${ELASTICSEARCH_USERNAME}\"\n        password: \"${ELASTICSEARCH_PASSWORD}\"\n      dashboards:\n        enabled: true\n        index: \"packetbeat-*\"\n        retry:\n          enabled: true\n          interval: 5\n    packetbeat:\n      flows:\n        timeout: 30s\n        period: 10s\n      interfaces:\n        device: any\n      protocols:\n        - type: dhcpv4\n          ports: [67, 68]\n        - type: dns\n          ports: [53]\n          include_authorities: true\n          include_additionals: true\n        - type: http\n          ports: [80, 8080, 8081, 5000, 8002, 6666, 3000, 5601, 9100, 9090, 44134]\n    output:\n      elasticsearch:\n        hosts: ['${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}']\n        username: ${ELASTICSEARCH_USERNAME}\n        password: ${ELASTICSEARCH_PASSWORD}\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      elastic-packetbeat:\n        username: elastic-packetbeat\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  elasticsearch:\n    name: elasticsearch\n    namespace: null\n    auth:\n      admin:\n        username: admin\n        password: changeme\n    hosts:\n      data: elasticsearch-data\n      default: elasticsearch-logging\n      discovery: elasticsearch-discovery\n      public: elasticsearch\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      http:\n        default: 80\n  kibana:\n    name: kibana\n    namespace: null\n    hosts:\n      default: kibana-dash\n      public: kibana\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      kibana:\n        default: 5601\n      http:\n        default: 80\n\npod:\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n  lifecycle:\n    upgrades:\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        packetbeat:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n  dns_policy: \"ClusterFirstWithHostNet\"\n  replicas:\n    packetbeat: 1\n  resources:\n    packetbeat:\n      enabled: false\n      limits:\n        memory: '400Mi'\n        cpu: '400m'\n      requests:\n        memory: '100Mi'\n        cpu: '100m'\n  mounts:\n    packetbeat:\n      packetbeat:\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  daemonset: true\n  job_image_repo_sync: true\n  secret_elasticsearch: true\n  secret_registry: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "elasticsearch/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v8.19.9\ndescription: OpenStack-Helm ElasticSearch\nname: elasticsearch\nversion: 2025.2.0\nhome: https://www.elastic.co/\nsources:\n  - https://github.com/elastic/elasticsearch\n  - https://opendev.org/openstack/openstack-helm-addons\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "elasticsearch/templates/bin/_apache.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ev\n\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n\n  if [ -f /etc/apache2/envvars ]; then\n     # Loading Apache2 ENV variables\n     source /etc/httpd/apache2/envvars\n  fi\n  # Apache gets grumpy about PID files pre-existing\n  rm -f /etc/httpd/logs/httpd.pid\n\n  if [ -f /usr/local/apache2/conf/.htpasswd ]; then\n    htpasswd -b /usr/local/apache2/conf/.htpasswd \"$ELASTICSEARCH_USERNAME\" \"$ELASTICSEARCH_PASSWORD\"\n  else\n    htpasswd -cb /usr/local/apache2/conf/.htpasswd \"$ELASTICSEARCH_USERNAME\" \"$ELASTICSEARCH_PASSWORD\"\n  fi\n\n  if [ ! -z $ELASTICSEARCH_LOGGING_USERNAME ]; then\n    htpasswd -b /usr/local/apache2/conf/.htpasswd \"$ELASTICSEARCH_LOGGING_USERNAME\" \"$ELASTICSEARCH_LOGGING_PASSWORD\"\n  fi\n\n  #Launch Apache on Foreground\n  exec httpd -DFOREGROUND\n}\n\nfunction stop () {\n  apachectl -k graceful-stop\n}\n\n$COMMAND\n"
  },
  {
    "path": "elasticsearch/templates/bin/_ceph-admin-keyring.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport HOME=/tmp\n\ncat <<EOF > /etc/ceph/ceph.client.admin.keyring\n[client.admin]\n{{- if .Values.conf.ceph.admin_keyring }}\n    key = {{ .Values.conf.ceph.admin_keyring }}\n{{- else }}\n    key = $(cat /tmp/client-keyring)\n{{- end }}\nEOF\n\nexit 0\n"
  },
  {
    "path": "elasticsearch/templates/bin/_create_s3_buckets.sh.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n#!/bin/bash\n\nset -e\n\nfunction check_rgw_s3_bucket () {\n  echo \"Checking if bucket exists\"\n  s3cmd $CONNECTION_ARGS $USER_AUTH_ARGS ls s3://$S3_BUCKET\n}\n\nfunction create_rgw_s3_bucket () {\n  echo \"Creating bucket\"\n  s3cmd $CONNECTION_ARGS $S3_BUCKET_OPTS $USER_AUTH_ARGS mb s3://$S3_BUCKET\n}\n\nfunction modify_bucket_acl () {\n  echo \"Updating bucket ACL\"\n  s3cmd $CONNECTION_ARGS $USER_AUTH_ARGS setacl s3://$S3_BUCKET --acl-grant=read:$S3_USERNAME --acl-grant=write:$S3_USERNAME\n}\n\n{{- $envAll := . }}\n{{- range $bucket := .Values.storage.s3.buckets }}\n\nS3_BUCKET={{ $bucket.name }}\nS3_BUCKET_OPTS={{ $bucket.options | default nil | include \"helm-toolkit.utils.joinListWithSpace\" }}\nS3_SSL_OPT={{ $bucket.ssl_connection_option | default \"\" }}\n\nS3_USERNAME=${{ printf \"%s_S3_USERNAME\" ( $bucket.client | replace \"-\" \"_\" | upper) }}\nS3_ACCESS_KEY=${{ printf \"%s_S3_ACCESS_KEY\" ( $bucket.client | replace \"-\" \"_\" | upper) }}\nS3_SECRET_KEY=${{ printf \"%s_S3_SECRET_KEY\" ( $bucket.client | replace \"-\" \"_\" | upper) }}\n\n{{- with $client := index $envAll.Values.storage.s3.clients $bucket.client }}\n\nRGW_HOST={{ $client.settings.endpoint | default (tuple \"ceph_object_store\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\")  }}\nRGW_PROTO={{ $client.settings.protocol | default (tuple \"ceph_object_store\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\")   }}\n\n{{- end }}\n\nCONNECTION_ARGS=\"--host=$RGW_HOST --host-bucket=$RGW_HOST\"\nif [ \"$RGW_PROTO\" = \"http\" ]; then\n  CONNECTION_ARGS+=\" --no-ssl\"\nelse\n  CONNECTION_ARGS+=\" $S3_SSL_OPT\"\nfi\n\nUSER_AUTH_ARGS=\" --access_key=$S3_ACCESS_KEY --secret_key=$S3_SECRET_KEY\"\n\necho \"Creating Bucket $S3_BUCKET at $RGW_HOST\"\ncheck_rgw_s3_bucket || ( create_rgw_s3_bucket && modify_bucket_acl )\n\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/bin/_create_s3_users.sh.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n   http://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n#!/bin/bash\n\nset -e\n\nfunction create_s3_user () {\n  echo \"Creating s3 user and key pair\"\n  radosgw-admin user create \\\n    --uid=${S3_USERNAME} \\\n    --display-name=${S3_USERNAME} \\\n    --key-type=s3 \\\n    --access-key ${S3_ACCESS_KEY} \\\n    --secret-key ${S3_SECRET_KEY}\n}\n\nfunction update_s3_user () {\n  # Retrieve old access keys, if they exist\n  old_access_keys=$(radosgw-admin user info --uid=${S3_USERNAME} \\\n    | jq -r '.keys[].access_key' || true)\n  if [[ ! -z ${old_access_keys} ]]; then\n    for access_key in $old_access_keys; do\n      # If current access key is the same as the key supplied, do nothing.\n      if [ \"$access_key\" == \"${S3_ACCESS_KEY}\" ]; then\n        echo \"Current user and key pair exists.\"\n        continue\n      else\n        # If keys differ, remove previous key\n        radosgw-admin key rm --uid=${S3_USERNAME} --key-type=s3 --access-key=$access_key\n      fi\n    done\n  fi\n  # Perform one more additional check to account for scenarios where multiple\n  # key pairs existed previously, but one existing key was the supplied key\n  current_access_key=$(radosgw-admin user info --uid=${S3_USERNAME} \\\n    | jq -r '.keys[].access_key' || true)\n  # If the supplied key does not exist, modify the user\n  if [[ -z ${current_access_key} ]]; then\n    # Modify user with new access and secret keys\n    echo \"Updating existing user's key pair\"\n    radosgw-admin user modify \\\n      --uid=${S3_USERNAME}\\\n      --access-key ${S3_ACCESS_KEY} \\\n      --secret-key ${S3_SECRET_KEY}\n  fi\n}\n\n{{- range $client, $config := .Values.storage.s3.clients -}}\n{{- if $config.create_user | default false }}\n\nS3_USERNAME=${{ printf \"%s_S3_USERNAME\" ($client | replace \"-\" \"_\" | upper)  }}\nS3_ACCESS_KEY=${{ printf \"%s_S3_ACCESS_KEY\" ($client | replace \"-\" \"_\" | upper)  }}\nS3_SECRET_KEY=${{ printf \"%s_S3_SECRET_KEY\" ($client | replace \"-\" \"_\" | upper)  }}\n\nuser_exists=$(radosgw-admin user info --uid=${S3_USERNAME} || true)\nif [[ -z ${user_exists} ]]; then\n  echo \"Creating $S3_USERNAME\"\n  create_s3_user > /dev/null 2>&1\nelse\n  echo \"Updating $S3_USERNAME\"\n  update_s3_user > /dev/null 2>&1\nfi\n\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/bin/_create_template.sh.tpl",
    "content": "#!/bin/bash\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -e\n\nNUM_ERRORS=0\n\n{{ range $name, $object := .Values.conf.api_objects }}\n{{ if not (empty $object) }}\n\necho \"creating {{$name}}\"\nerror=$(curl ${CACERT_OPTION} -K- <<< \"--user ${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}\" \\\n   -X{{ $object.method | default \"PUT\" | upper }} \\\n   \"${ELASTICSEARCH_ENDPOINT}/{{ $object.endpoint }}\" \\\n   -H 'Content-Type: application/json' -d '{{ $object.body | toJson }}' | jq -r '.error')\n\nif [ $error == \"null\" ]; then\n   echo \"Object {{$name}} was created.\"\nelse\n   echo \"Error when creating object {{$name}}: $(echo $error | jq -r)\"\n   NUM_ERRORS=$(($NUM_ERRORS+1))\nfi\n\n{{ end }}\n{{ end }}\n\nif [ $NUM_ERRORS -gt 0 ]; then\n   exit 1\nelse\n   echo \"leaving normally\"\nfi\n"
  },
  {
    "path": "elasticsearch/templates/bin/_curator.sh.tpl",
    "content": "#!/bin/sh\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec {{ .Values.conf.curator.executable }} --config /etc/config/config.yml /etc/config/action_file.yml\n"
  },
  {
    "path": "elasticsearch/templates/bin/_elasticsearch.sh.tpl",
    "content": "#!/bin/bash\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- $envAll := . }}\n\nset -e\nCOMMAND=\"${@:-start}\"\n\nfunction initiate_keystore () {\n  elasticsearch-keystore create\n  {{- if .Values.conf.elasticsearch.snapshots.enabled }}\n  {{- range $client, $settings := .Values.storage.s3.clients -}}\n  {{- $access_key := printf \"%s_S3_ACCESS_KEY\" ( $client | replace \"-\" \"_\" | upper) }}\n  {{- $secret_key := printf \"%s_S3_SECRET_KEY\" ( $client | replace \"-\" \"_\" | upper) }}\n  echo ${{$access_key}} | elasticsearch-keystore add -xf s3.client.{{ $client }}.access_key\n  echo ${{$secret_key}} | elasticsearch-keystore add -xf s3.client.{{ $client }}.secret_key\n  {{- end }}\n  {{- end }}\n\n  {{- if .Values.manifests.certificates }}\n  {{- $alias := .Values.secrets.tls.elasticsearch.elasticsearch.internal }}\n  JAVA_KEYTOOL_PATH=/usr/share/elasticsearch/jdk/bin/keytool\n  TRUSTSTORE_PATH=/usr/share/elasticsearch/config/elasticsearch-java-truststore\n  ${JAVA_KEYTOOL_PATH} -importcert -alias {{$alias}} -keystore ${TRUSTSTORE_PATH} -trustcacerts -noprompt -file ${JAVA_KEYSTORE_CERT_PATH} -storepass ${ELASTICSEARCH_PASSWORD}\n  ${JAVA_KEYTOOL_PATH} -storepasswd -keystore ${TRUSTSTORE_PATH} -new ${ELASTICSEARCH_PASSWORD} -storepass ${ELASTICSEARCH_PASSWORD}\n  {{- end }}\n}\n\nfunction start () {\n  initiate_keystore\n  exec /usr/local/bin/docker-entrypoint.sh elasticsearch\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\nfunction wait_to_join() {\n  # delay 5 seconds before the first check\n  sleep 5\n  joined=$(curl -s ${CACERT_OPTION} -K- <<< \"--user ${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}\" \"${ELASTICSEARCH_ENDPOINT}/_cat/nodes\" | grep -w $NODE_NAME || true )\n  i=0\n  while [ -z \"$joined\" ]; do\n    sleep 5\n    joined=$(curl -s ${CACERT_OPTION} -K- <<< \"--user ${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}\" \"${ELASTICSEARCH_ENDPOINT}/_cat/nodes\" | grep -w $NODE_NAME || true )\n    i=$((i+1))\n    # Waiting for up to 60 minutes\n    if [ $i -gt 720 ]; then\n      break\n    fi\n  done\n}\n\nfunction allocate_data_node () {\n  echo \"Node ${NODE_NAME} has started. Waiting to rejoin the cluster.\"\n  wait_to_join\n  echo \"Re-enabling Replica Shard Allocation\"\n  curl -s ${CACERT_OPTION} -K- <<< \"--user ${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}\" -XPUT -H 'Content-Type: application/json' \\\n    \"${ELASTICSEARCH_ENDPOINT}/_cluster/settings\" -d \"{\n    \\\"persistent\\\": {\n      \\\"cluster.routing.allocation.enable\\\": null\n    }\n  }\"\n}\n\nfunction start_master_node () {\n  initiate_keystore\n  if [ ! -f {{ $envAll.Values.conf.elasticsearch.config.path.data }}/cluster-bootstrap.txt ];\n  then\n    {{ if empty $envAll.Values.conf.elasticsearch.config.cluster.initial_master_nodes -}}\n    {{- $_ := set $envAll.Values \"__eligible_masters\" ( list ) }}\n    {{- range $podInt := until ( atoi (print $envAll.Values.pod.replicas.master ) ) }}\n    {{- $eligibleMaster := printf \"elasticsearch-master-%s\" (toString $podInt) }}\n    {{- $__eligible_masters := append $envAll.Values.__eligible_masters $eligibleMaster }}\n    {{- $_ := set $envAll.Values \"__eligible_masters\" $__eligible_masters }}\n    {{- end -}}\n    {{- $masters := include \"helm-toolkit.utils.joinListWithComma\" $envAll.Values.__eligible_masters -}}\n    echo {{$masters}} >> {{ $envAll.Values.conf.elasticsearch.config.path.data }}/cluster-bootstrap.txt\n    exec /usr/local/bin/docker-entrypoint.sh elasticsearch -Ecluster.initial_master_nodes={{$masters}}\n    {{- end }}\n  else\n    exec /usr/local/bin/docker-entrypoint.sh elasticsearch\n  fi\n}\n\nfunction start_data_node () {\n  initiate_keystore\n  allocate_data_node &\n  /usr/local/bin/docker-entrypoint.sh elasticsearch &\n  function drain_data_node () {\n\n    # Implement the Rolling Restart Protocol Described Here:\n    # https://www.elastic.co/guide/en/elasticsearch/reference/7.x/restart-cluster.html#restart-cluster-rolling\n\n    echo \"Disabling Replica Shard Allocation\"\n    curl -s ${CACERT_OPTION} -K- <<< \"--user ${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}\" -XPUT -H 'Content-Type: application/json' \\\n      \"${ELASTICSEARCH_ENDPOINT}/_cluster/settings\" -d \"{\n      \\\"persistent\\\": {\n        \\\"cluster.routing.allocation.enable\\\": \\\"primaries\\\"\n      }\n    }\"\n\n    # If version < 7.6 use _flush/synced; otherwise use _flush\n    # https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-synced-flush-api.html#indices-synced-flush-api\n\n    version=$(curl -s ${CACERT_OPTION} -K- <<< \"--user ${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}\" \"${ELASTICSEARCH_ENDPOINT}/\" | jq -r .version.number)\n\n    if [[ $version =~ \"7.1\" ]]; then\n      action=\"_flush/synced\"\n    else\n      action=\"_flush\"\n    fi\n\n    curl -s ${CACERT_OPTION} -K- <<< \"--user ${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}\" -XPOST \"${ELASTICSEARCH_ENDPOINT}/$action\"\n\n    # TODO: Check the response of synced flush operations to make sure there are no failures.\n    # Synced flush operations that fail due to pending indexing operations are listed in the response body,\n    # although the request itself still returns a 200 OK status. If there are failures, reissue the request.\n    # (The only side effect of not doing so is slower start up times. See flush documentation linked above)\n\n    echo \"Node ${NODE_NAME} is ready to shutdown\"\n\n    echo \"Killing Elasticsearch background processes\"\n    jobs -p | xargs -t -r kill -TERM\n    wait\n\n    # remove the trap handler\n    trap - TERM EXIT HUP INT\n\n    echo \"Node ${NODE_NAME} shutdown is complete\"\n    exit 0\n  }\n  trap drain_data_node TERM EXIT HUP INT\n  wait\n\n}\n\n$COMMAND\n"
  },
  {
    "path": "elasticsearch/templates/bin/_helm-tests.sh.tpl",
    "content": "#!/bin/bash\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nfunction create_test_index () {\n  index_result=$(curl ${CACERT_OPTION} -K- <<< \"--user ${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}\" \\\n  -XPUT \"${ELASTICSEARCH_ENDPOINT}/test_index?pretty\" -H 'Content-Type: application/json' -d'\n  {\n    \"settings\" : {\n      \"index\" : {\n        \"number_of_shards\" : 3,\n        \"number_of_replicas\" : 2\n      }\n    }\n  }\n  ' | grep -o '\"acknowledged\" *: *true')\n\n  if [ -n \"$index_result\" ]; then\n    echo \"PASS: Test index created!\";\n  else\n    echo \"FAIL: Test index not created!\";\n    exit 1;\n  fi\n}\n\nfunction remove_test_index () {\n  echo \"Deleting index created for service testing\"\n  curl ${CACERT_OPTION} -K- <<< \"--user ${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}\" \\\n  -XDELETE \"${ELASTICSEARCH_ENDPOINT}/test_index\"\n}\n\nremove_test_index || true\ncreate_test_index\nremove_test_index\n"
  },
  {
    "path": "elasticsearch/templates/bin/_verify-repositories.sh.tpl",
    "content": "#!/bin/bash\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{ $envAll := . }}\n\nset -ex\n\nfunction verify_snapshot_repository() {\n  curl ${CACERT_OPTION} -K- <<< \"--user ${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}\" \\\n    -XPOST \"${ELASTICSEARCH_ENDPOINT}/_snapshot/$1/_verify\"\n}\n\nrepositories=$(curl ${CACERT_OPTION} -K- <<< \"--user ${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}\" \\\n                \"${ELASTICSEARCH_ENDPOINT}/_snapshot\" | jq -r 'keys | @sh')\n\nrepositories=$(echo $repositories | sed \"s/'//g\") # Strip single quotes from jq output\n\nfor repository in $repositories; do\n  error=$(verify_snapshot_repository $repository | jq -r '.error' )\n  if [ $error == \"null\" ]; then\n    echo \"$repository is verified.\"\n  else\n    echo \"Error for $repository: $(echo $error | jq -r)\"\n    exit 1;\n  fi\ndone\n"
  },
  {
    "path": "elasticsearch/templates/certificates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.certificates -}}\n{{  dict \"envAll\" . \"service\" \"elasticsearch\" \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end -}}\n"
  },
  {
    "path": "elasticsearch/templates/configmap-bin-curator.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin_curator }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: elastic-curator-bin\ndata:\n  curator.sh: |\n{{ tuple \"bin/_curator.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/configmap-bin-elasticsearch.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin_elasticsearch }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: elasticsearch-bin\ndata:\n  apache.sh: |\n{{ tuple \"bin/_apache.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  elasticsearch.sh: |\n{{ tuple \"bin/_elasticsearch.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  helm-tests.sh: |\n{{ tuple \"bin/_helm-tests.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ceph-admin-keyring.sh: |\n{{ tuple \"bin/_ceph-admin-keyring.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  create-s3-bucket.sh: |\n{{ tuple \"bin/_create_s3_buckets.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  create-s3-user.sh: |\n{{ tuple \"bin/_create_s3_users.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  create_template.sh: |\n{{ tuple \"bin/_create_template.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  verify-repositories.sh: |\n{{ tuple \"bin/_verify-repositories.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/configmap-etc-curator.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc_curator }}\n{{- $envAll := . }}\n\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: elastic-curator-etc\ntype: Opaque\ndata:\n  action_file.yml: {{ toYaml .Values.conf.curator.action_file | b64enc }}\n  config.yml: {{ toYaml .Values.conf.curator.config | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/configmap-etc-elasticsearch.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc_elasticsearch }}\n{{- $envAll := . }}\n\n{{- if .Values.conf.elasticsearch.snapshots.enabled }}\n{{- range $client, $config := $envAll.Values.storage.s3.clients }}\n{{- $settings := $config.settings }}\n{{- $endpoint := $settings.endpoint | default (tuple \"ceph_object_store\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\") }}\n{{- $_ := set $settings \"endpoint\" $endpoint }}\n{{- $protocol := $settings.protocol | default (tuple \"ceph_object_store\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\") }}\n{{- $_ := set $settings \"protocol\" $protocol }}\n{{- $_:= set $envAll.Values.conf.elasticsearch.config.s3.client $client $settings }}\n{{- end -}}\n{{- end -}}\n\n{{- if empty .Values.conf.elasticsearch.config.discovery.seed_hosts -}}\n{{- $discovery_svc := tuple \"elasticsearch\" \"discovery\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" -}}\n{{- $_:= set .Values.conf.elasticsearch.config.discovery \"seed_hosts\" $discovery_svc -}}\n{{- end -}}\n\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: elasticsearch-etc\ntype: Opaque\ndata:\n  elasticsearch.yml: {{ toYaml .Values.conf.elasticsearch.config | b64enc }}\n  # NOTE(portdirect): this must be last, to work round helm ~2.7 bug.\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.httpd \"key\" \"httpd.conf\" \"format\" \"Secret\") | indent 2 }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.log4j2 \"key\" \"log4j2.properties\" \"format\" \"Secret\") | indent 2 }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.jvm_options \"key\" \"jvm.options\" \"format\" \"Secret\") | indent 2 }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/cron-job-curator.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.cron_curator }}\n{{- $envAll := . }}\n\n{{- $esUserSecret := .Values.secrets.elasticsearch.user }}\n\n{{- $serviceAccountName := \"elastic-curator\" }}\n{{ tuple $envAll \"curator\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: elastic-curator\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"elasticsearch\" \"curator\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  schedule: {{ .Values.jobs.curator.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.curator.history.success }}\n  failedJobsHistoryLimit: {{ .Values.jobs.curator.history.failed }}\n  concurrencyPolicy: Forbid\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"elasticsearch\" \"curator\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"elasticsearch\" \"curator\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n        spec:\n{{ dict \"envAll\" $envAll \"application\" \"curator\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 10 }}\n          nodeSelector:\n            {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value | quote }}\n          serviceAccountName: {{ $serviceAccountName }}\n          restartPolicy: OnFailure\n          initContainers:\n{{ tuple $envAll \"curator\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 12 }}\n          containers:\n            - name: curator\n{{ tuple $envAll \"curator\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.curator | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{ dict \"envAll\" $envAll \"application\" \"curator\" \"container\" \"curator\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 14 }}\n              command:\n                - /tmp/curator.sh\n              env:\n                - name: ELASTICSEARCH_USERNAME\n                  valueFrom:\n                    secretKeyRef:\n                      name: {{ $esUserSecret }}\n                      key: ELASTICSEARCH_USERNAME\n                - name: ELASTICSEARCH_PASSWORD\n                  valueFrom:\n                    secretKeyRef:\n                      name: {{ $esUserSecret }}\n                      key: ELASTICSEARCH_PASSWORD\n                - name: ELASTICSEARCH_URL\n                  valueFrom:\n                    secretKeyRef:\n                      name: {{ $esUserSecret }}\n                      key: ELASTICSEARCH_URL\n              volumeMounts:\n                - name: pod-tmp\n                  mountPath: /tmp\n                - name: pod-etc-curator\n                  mountPath: /etc/config\n                - name: elastic-curator-bin\n                  mountPath: /tmp/curator.sh\n                  subPath: curator.sh\n                  readOnly: true\n                - name: elastic-curator-etc\n                  mountPath: /etc/config/config.yml\n                  subPath: config.yml\n                  readOnly: true\n                - name: elastic-curator-etc\n                  mountPath: /etc/config/action_file.yml\n                  subPath: action_file.yml\n                  readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.elasticsearch.elasticsearch.internal \"path\" \"/etc/elasticsearch/certs\" \"certs\" tuple \"ca.crt\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 16 }}\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: pod-etc-curator\n              emptyDir: {}\n            - name: elastic-curator-bin\n              configMap:\n                name: elastic-curator-bin\n                defaultMode: 0555\n            - name: elastic-curator-etc\n              secret:\n                secretName: elastic-curator-etc\n                defaultMode: 0444\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.elasticsearch.elasticsearch.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/cron-job-verify-repositories.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and (.Values.manifests.cron_verify_repositories) (.Values.conf.elasticsearch.snapshots.enabled) }}\n{{- $envAll := . }}\n\n{{- $esUserSecret := .Values.secrets.elasticsearch.user }}\n\n{{- $serviceAccountName := \"verify-repositories\" }}\n{{ tuple $envAll \"verify_repositories\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: elasticsearch-verify-repositories\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"elasticsearch\" \"verify-repositories\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  schedule: {{ .Values.jobs.verify_repositories.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.verify_repositories.history.success }}\n  failedJobsHistoryLimit: {{ .Values.jobs.verify_repositories.history.failed }}\n  concurrencyPolicy: Forbid\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"elasticsearch\" \"verify-repositories\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" \"elasticsearch-verify-repositories\" \"containerNames\" (list \"elasticsearch-verify-repositories\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"elasticsearch\" \"verify-repositories\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n        spec:\n{{ dict \"envAll\" $envAll \"application\" \"verify_repositories\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 10 }}\n          nodeSelector:\n            {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value | quote }}\n          serviceAccountName: {{ $serviceAccountName }}\n          restartPolicy: OnFailure\n          initContainers:\n{{ tuple $envAll \"verify_repositories\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 12 }}\n          containers:\n            - name: elasticsearch-verify-repositories\n{{ tuple $envAll \"snapshot_repository\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.snapshot_repository | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{ dict \"envAll\" $envAll \"application\" \"verify_repositories\" \"container\" \"elasticsearch_verify_repositories\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 14 }}\n              command:\n                - /tmp/verify-repositories.sh\n              env:\n                - name: ELASTICSEARCH_USERNAME\n                  valueFrom:\n                    secretKeyRef:\n                      name: {{ $esUserSecret }}\n                      key: ELASTICSEARCH_USERNAME\n                - name: ELASTICSEARCH_PASSWORD\n                  valueFrom:\n                    secretKeyRef:\n                      name: {{ $esUserSecret }}\n                      key: ELASTICSEARCH_PASSWORD\n                - name: ELASTICSEARCH_ENDPOINT\n                  value: {{ printf \"%s://%s\" (tuple \"elasticsearch\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\") (tuple \"elasticsearch\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\") }}\n{{- if .Values.manifests.certificates }}\n                - name: CACERT_OPTION\n                  value: \"--cacert /etc/elasticsearch/certs/ca.crt\"\n{{- end }}\n              volumeMounts:\n                - name: pod-tmp\n                  mountPath: /tmp\n                - name: elasticsearch-bin\n                  mountPath: /tmp/verify-repositories.sh\n                  subPath: verify-repositories.sh\n                  readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.elasticsearch.elasticsearch.internal \"path\" \"/etc/elasticsearch/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 16 }}\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: elasticsearch-bin\n              configMap:\n                name: elasticsearch-bin\n                defaultMode: 0555\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.elasticsearch.elasticsearch.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/deployment-client.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"readinessProbeTemplate\" }}\n{{- $probePort := tuple \"elasticsearch\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $probeUser := .Values.endpoints.elasticsearch.auth.admin.username }}\n{{- $probePass := .Values.endpoints.elasticsearch.auth.admin.password }}\n{{- $authHeader := printf \"%s:%s\" $probeUser $probePass | b64enc }}\nhttpGet:\n  path: /_cluster/health\n  scheme: {{ tuple \"elasticsearch\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n  port: {{ $probePort }}\n  httpHeaders:\n    - name: Authorization\n      value: Basic {{ $authHeader }}\n{{- end }}\n{{- define \"livenessProbeTemplate\" }}\n{{- $probePort := tuple \"elasticsearch\" \"internal\" \"discovery\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\ntcpSocket:\n  port: {{ $probePort }}\n{{- end }}\n\n{{- if .Values.manifests.deployment_client }}\n{{- $envAll := . }}\n\n{{- $esUserSecret := .Values.secrets.elasticsearch.user }}\n{{- $s3UserSecret := .Values.secrets.rgw.elasticsearch }}\n\n{{- $mounts_elasticsearch := .Values.pod.mounts.elasticsearch.elasticsearch }}\n\n{{- $serviceAccountName := printf \"%s-%s\" .Release.Name \"elasticsearch-client\" }}\n{{ tuple $envAll \"elasticsearch_client\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: elasticsearch-client\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"elasticsearch\" \"client\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.client }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"elasticsearch\" \"client\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"elasticsearch\" \"client\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin-elasticsearch.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc-elasticsearch.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"elasticsearch-client\" \"containerNames\" (list \"elasticsearch-client\" \"init\" \"memory-map-increase\" \"apache-proxy\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"client\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"elasticsearch\" \"client\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.client.node_selector_key }}: {{ .Values.labels.client.node_selector_value | quote }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.client.timeout | default \"600\" }}\n      initContainers:\n{{ tuple $envAll \"elasticsearch_client\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n        - name: memory-map-increase\n{{ tuple $envAll \"memory_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.client | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"client\" \"container\" \"memory_map_increase\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n          - sysctl\n          - -w\n          - vm.max_map_count={{ .Values.conf.init.max_map_count }}\n      containers:\n        - name: apache-proxy\n{{ tuple $envAll \"apache_proxy\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.apache_proxy | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"client\" \"container\" \"apache_proxy\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/apache.sh\n            - start\n          ports:\n            - name: {{ tuple \"elasticsearch\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" }}\n              containerPort: {{ tuple \"elasticsearch\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            tcpSocket:\n              port: {{ tuple \"elasticsearch\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 20\n            periodSeconds: 10\n          env:\n            - name: ELASTICSEARCH_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_USERNAME\n            - name: ELASTICSEARCH_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_PASSWORD\n            - name: ELASTICSEARCH_LOGGING_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_LOGGING_USERNAME\n            - name: ELASTICSEARCH_LOGGING_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_LOGGING_PASSWORD\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: elasticsearch-bin\n              mountPath: /tmp/apache.sh\n              subPath: apache.sh\n              readOnly: true\n            - name: elasticsearch-etc\n              mountPath: /usr/local/apache2/conf/httpd.conf\n              subPath: httpd.conf\n              readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.elasticsearch.elasticsearch.internal \"path\" \"/etc/elasticsearch/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n        - name: elasticsearch-client\n{{ tuple $envAll \"elasticsearch\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.client | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"client\" \"container\" \"elasticsearch_client\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/elasticsearch.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/elasticsearch.sh\n                  - stop\n          ports:\n            - name: transport\n              containerPort: {{ tuple \"elasticsearch\" \"internal\" \"discovery\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{ dict \"envAll\" . \"component\" \"elasticsearch\" \"container\" \"elasticsearch-client\" \"type\" \"liveness\" \"probeTemplate\" (include \"livenessProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" . \"component\" \"elasticsearch\" \"container\" \"elasticsearch-client\" \"type\" \"readiness\" \"probeTemplate\" (include \"readinessProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          env:\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.name\n            - name: node.roles\n              value: \"[ingest]\"\n            - name: HTTP_ENABLE\n              value: \"true\"\n            - name: DISCOVERY_SERVICE\n              value: {{ tuple \"elasticsearch\" \"discovery\" $envAll | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n            - name: ES_JAVA_OPTS\n              value: \"{{ .Values.conf.elasticsearch.env.java_opts.client }}\"\n{{- if .Values.manifests.certificates }}\n            - name: JAVA_KEYSTORE_CERT_PATH\n              value: \"/usr/share/elasticsearch/config/ca.crt\"\n            - name: ELASTICSEARCH_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_PASSWORD\n{{- end }}\n{{- if .Values.conf.elasticsearch.snapshots.enabled }}\n{{- if .Values.manifests.object_bucket_claim }}\n{{- include \"helm-toolkit.snippets.rgw_s3_bucket_user_env_vars_rook\" . | indent 12 }}\n{{- else }}\n{{- include \"helm-toolkit.snippets.rgw_s3_user_env_vars\" . | indent 12 }}\n{{- end }}\n{{- end }}\n{{- if .Values.pod.env.client }}\n{{ include \"helm-toolkit.utils.to_k8s_env_vars\" .Values.pod.env.client | indent 12 }}\n{{- end }}\n{{- if .Values.pod.env.secrets }}\n{{ tuple $envAll .Values.pod.env.secrets | include \"helm-toolkit.utils.to_k8s_env_secret_vars\" | indent 12 }}\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: elasticsearch-logs\n              mountPath: {{ .Values.conf.elasticsearch.config.path.logs }}\n            - name: elasticsearch-bin\n              mountPath: /tmp/elasticsearch.sh\n              subPath: elasticsearch.sh\n              readOnly: true\n            - name: elasticsearch-etc\n              mountPath: /usr/share/elasticsearch/config/elasticsearch.yml\n              subPath: elasticsearch.yml\n              readOnly: true\n            - name: elasticsearch-etc\n              mountPath: /usr/share/elasticsearch/config/log4j2.properties\n              subPath: log4j2.properties\n              readOnly: true\n            - name: elasticsearch-etc\n              mountPath: /usr/share/elasticsearch/config/jvm.options\n              subPath: jvm.options\n              readOnly: true\n            - name: storage\n              mountPath: {{ .Values.conf.elasticsearch.config.path.data }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.elasticsearch.elasticsearch.internal \"path\" \"/usr/share/elasticsearch/config\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_elasticsearch.volumeMounts }}{{ toYaml $mounts_elasticsearch.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: elasticsearch-logs\n          emptyDir: {}\n        - name: elasticsearch-bin\n          configMap:\n            name: elasticsearch-bin\n            defaultMode: 0555\n        - name: elasticsearch-etc\n          secret:\n            secretName: elasticsearch-etc\n            defaultMode: 0444\n        - name: storage\n          emptyDir: {}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.elasticsearch.elasticsearch.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_elasticsearch.volumes }}{{ toYaml $mounts_elasticsearch.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/deployment-gateway.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.network.remote_clustering.enabled }}\n{{- $envAll := . }}\n\n{{- $esUserSecret := .Values.secrets.elasticsearch.user }}\n{{- $s3UserSecret := .Values.secrets.rgw.elasticsearch }}\n\n{{- $mounts_elasticsearch := .Values.pod.mounts.elasticsearch.elasticsearch }}\n\n{{- $serviceAccountName := printf \"%s-%s\" .Release.Name \"elasticsearch-remote-gateway\" }}\n{{ tuple $envAll \"elasticsearch_gateway\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: elasticsearch-gateway\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"elasticsearch\" \"gateway\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  replicas: {{ .Values.pod.replicas.gateway }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"elasticsearch\" \"gateway\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"elasticsearch\" \"gateway\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin-elasticsearch.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc-elasticsearch.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"elasticsearch-gateway\" \"containerNames\" (list \"elasticsearch-remote-gateway\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"gateway\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"elasticsearch\" \"gateway\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.gateway.node_selector_key }}: {{ .Values.labels.gateway.node_selector_value | quote }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.client.timeout | default \"600\" }}\n      initContainers:\n{{ tuple $envAll \"elasticsearch\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n        - name: memory-map-increase\n{{ tuple $envAll \"memory_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.client | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"gateway\" \"container\" \"memory_map_increase\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n          - sysctl\n          - -w\n          - vm.max_map_count={{ .Values.conf.init.max_map_count }}\n      containers:\n        - name: elasticsearch-gateway\n{{ tuple $envAll \"elasticsearch\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.gateway | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"gateway\" \"container\" \"elasticsearch_gateway\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/elasticsearch.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/elasticsearch.sh\n                  - stop\n          ports:\n            - name: transport\n              containerPort: {{ tuple \"elasticsearch\" \"internal\" \"discovery\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          livenessProbe:\n            tcpSocket:\n              port: {{ tuple \"elasticsearch\" \"internal\" \"discovery\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 20\n            periodSeconds: 10\n          readinessProbe:\n            tcpSocket:\n              port: {{ tuple \"elasticsearch\" \"internal\" \"discovery\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 20\n            periodSeconds: 10\n          env:\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.name\n            - name: node.roles\n              value: \"[ingest]\"\n            - name: HTTP_ENABLE\n              value: \"false\"\n            - name: DISCOVERY_SERVICE\n              value: {{ tuple \"elasticsearch\" \"discovery\" $envAll | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n            - name: ES_JAVA_OPTS\n              value: \"{{ .Values.conf.elasticsearch.env.java_opts.client }}\"\n{{- if .Values.manifests.certificates }}\n            - name: JAVA_KEYSTORE_CERT_PATH\n              value: \"/usr/share/elasticsearch/config/ca.crt\"\n            - name: ELASTICSEARCH_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_PASSWORD\n{{- end }}\n{{- if .Values.conf.elasticsearch.snapshots.enabled }}\n{{- if .Values.manifests.object_bucket_claim }}\n{{- include \"helm-toolkit.snippets.rgw_s3_bucket_user_env_vars_rook\" . | indent 12 }}\n{{- else }}\n{{- include \"helm-toolkit.snippets.rgw_s3_user_env_vars\" . | indent 12 }}\n{{- end }}\n{{- end }}\n{{- if .Values.pod.env.gateway }}\n{{ include \"helm-toolkit.utils.to_k8s_env_vars\" .Values.pod.env.gateway | indent 12 }}\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: elasticsearch-logs\n              mountPath: {{ .Values.conf.elasticsearch.config.path.logs }}\n            - name: elasticsearch-bin\n              mountPath: /tmp/elasticsearch.sh\n              subPath: elasticsearch.sh\n              readOnly: true\n            - name: elasticsearch-etc\n              mountPath: /usr/share/elasticsearch/config/elasticsearch.yml\n              subPath: elasticsearch.yml\n              readOnly: true\n            - name: elasticsearch-etc\n              mountPath: /usr/share/elasticsearch/config/log4j2.properties\n              subPath: log4j2.properties\n              readOnly: true\n            - name: elasticsearch-etc\n              mountPath: /usr/share/elasticsearch/config/jvm.options\n              subPath: jvm.options\n              readOnly: true\n            - name: storage\n              mountPath: {{ .Values.conf.elasticsearch.config.path.data }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.elasticsearch.elasticsearch.internal \"path\" \"/usr/share/elasticsearch/config\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_elasticsearch.volumeMounts }}{{ toYaml $mounts_elasticsearch.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: elasticsearch-logs\n          emptyDir: {}\n        - name: elasticsearch-bin\n          configMap:\n            name: elasticsearch-bin\n            defaultMode: 0555\n        - name: elasticsearch-etc\n          secret:\n            secretName: elasticsearch-etc\n            defaultMode: 0444\n        - name: storage\n          emptyDir: {}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.elasticsearch.elasticsearch.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_elasticsearch.volumes }}{{ toYaml $mounts_elasticsearch.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "elasticsearch/templates/ingress-elasticsearch.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress .Values.network.elasticsearch.ingress.public }}\n{{- $envAll := . -}}\n{{- $port := tuple \"elasticsearch\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" }}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendService\" \"elasticsearch\" \"backendServiceType\" \"elasticsearch\" \"backendPort\" $port -}}\n{{- $secretName := $envAll.Values.secrets.tls.elasticsearch.elasticsearch.internal -}}\n{{- if and .Values.manifests.certificates $secretName -}}\n{{- $_ := set $ingressOpts \"certIssuer\" .Values.endpoints.elasticsearch.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/job-elasticsearch-template.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_elasticsearch_templates }}\n{{- $envAll := . }}\n{{- $esUserSecret := .Values.secrets.elasticsearch.user }}\n{{- $mounts_elasticsearch_templates := .Values.pod.mounts.elasticsearch_templates.elasticsearch_templates }}\n{{- $mounts_elasticsearch_templates_init := .Values.pod.mounts.elasticsearch_templates.init_container }}\n\n{{- $serviceAccountName := \"create-elasticsearch-templates\" }}\n{{ tuple $envAll \"elasticsearch_templates\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: create-elasticsearch-templates\n  labels:\n{{ tuple $envAll \"elasticsearch\" \"create-templates\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  backoffLimit: {{ .Values.jobs.create_elasticsearch_templates.backoffLimit }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"elasticsearch\" \"create-templates\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"create-elasticsearch-templates\" \"containerNames\" (list \"create-elasticsearch-templates\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"create_template\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value | quote }}\n      initContainers:\n{{ tuple $envAll \"elasticsearch_templates\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: create-elasticsearch-templates\n{{ tuple $envAll \"elasticsearch_templates\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.elasticsearch_templates | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"create_template\" \"container\" \"create_elasticsearch_template\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: ELASTICSEARCH_ENDPOINT\n              value: {{ printf \"%s://%s\" (tuple \"elasticsearch\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\") (tuple \"elasticsearch\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\") }}\n{{- if .Values.manifests.certificates }}\n            - name: CACERT_OPTION\n              value: \"--cacert /etc/elasticsearch/certs/ca.crt\"\n{{- end }}\n            - name: ELASTICSEARCH_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_USERNAME\n            - name: ELASTICSEARCH_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_PASSWORD\n          command:\n            - /tmp/create_template.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: elasticsearch-bin\n              mountPath: /tmp/create_template.sh\n              subPath: create_template.sh\n              readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.elasticsearch.elasticsearch.internal \"path\" \"/etc/elasticsearch/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_elasticsearch_templates.volumeMounts }}{{ toYaml $mounts_elasticsearch_templates.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: elasticsearch-bin\n          configMap:\n            name: elasticsearch-bin\n            defaultMode: 0555\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.elasticsearch.elasticsearch.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_elasticsearch_templates.volumes }}{{ toYaml $mounts_elasticsearch_templates.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"elasticsearch\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/job-s3-bucket.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and (.Values.manifests.job_s3_bucket) (.Values.conf.elasticsearch.snapshots.enabled) }}\n{{- $esBucket := .Values.conf.elasticsearch.snapshots.bucket }}\n{{- $s3BucketJob := dict \"envAll\" . \"serviceName\" \"elasticsearch\" \"s3Bucket\" $esBucket -}}\n{{- if .Values.manifests.certificates }}\n{{- $_ := set $s3BucketJob \"tlsCertificateSecret\" .Values.secrets.tls.elasticsearch.elasticsearch.internal -}}\n{{- $_ := set $s3BucketJob \"tlsCertificatePath\" \"/etc/elasticsearch/certs/ca.crt\" -}}\n{{- end }}\n{{ $s3BucketJob | include \"helm-toolkit.manifests.job_s3_bucket\" }}\n{{- end -}}\n"
  },
  {
    "path": "elasticsearch/templates/job-s3-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and (.Values.manifests.job_s3_user) (.Values.conf.elasticsearch.snapshots.enabled) }}\n{{- $s3UserJob := dict \"envAll\" . \"serviceName\" \"elasticsearch\" -}}\n{{ $s3UserJob | include \"helm-toolkit.manifests.job_s3_user\" }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/monitoring/prometheus/exporter-deployment.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.monitoring.prometheus.deployment_exporter .Values.monitoring.prometheus.enabled }}\n{{- $envAll := . }}\n\n{{- $esUserSecret := .Values.secrets.elasticsearch.user }}\n\n{{- $serviceAccountName := \"prometheus-elasticsearch-exporter\" }}\n{{ tuple $envAll \"prometheus_elasticsearch_exporter\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: prometheus-elasticsearch-exporter\n  labels:\n{{ tuple $envAll \"prometheus-elasticsearch-exporter\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.prometheus_elasticsearch_exporter }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"prometheus-elasticsearch-exporter\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"prometheus-elasticsearch-exporter\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"prometheus-elasticsearch-exporter\" \"containerNames\" (list \"elasticsearch-exporter\" \"init\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      nodeSelector:\n        {{ .Values.labels.exporter.node_selector_key }}: {{ .Values.labels.exporter.node_selector_value | quote }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.prometheus_elasticsearch_exporter.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"prometheus_elasticsearch_exporter\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n      containers:\n        - name: elasticsearch-exporter\n{{ tuple $envAll \"prometheus_elasticsearch_exporter\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.exporter | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"exporter\" \"container\" \"elasticsearch_exporter\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - \"elasticsearch_exporter\"\n            - '--es.uri=$(ELASTICSEARCH_URI)'\n            - '--web.telemetry-path={{ .Values.endpoints.prometheus_elasticsearch_exporter.path.default }}'\n            - '--web.listen-address=:{{ .Values.endpoints.prometheus_elasticsearch_exporter.port.metrics.default }}'\n            - '--es.timeout={{ .Values.conf.prometheus_elasticsearch_exporter.es.timeout }}'\n            - '--log.format={{ .Values.conf.prometheus_elasticsearch_exporter.log.format }}'\n            - '--log.level={{ .Values.conf.prometheus_elasticsearch_exporter.log.level }}'\n            {{- if .Values.conf.prometheus_elasticsearch_exporter.es.all }}\n            - '--es.all'\n            {{- end }}\n            {{- if .Values.conf.prometheus_elasticsearch_exporter.es.indices }}\n            - '--es.indices'\n            {{- end }}\n            {{- if .Values.conf.prometheus_elasticsearch_exporter.es.indices_settings }}\n            - '--es.indices_settings'\n            {{- end }}\n            {{- if .Values.conf.prometheus_elasticsearch_exporter.es.indices_mappings }}\n            - '--es.indices_mappings'\n            {{- end }}\n            {{- if .Values.conf.prometheus_elasticsearch_exporter.es.aliases }}\n            - '--es.aliases'\n            {{- end }}\n            {{- if .Values.conf.prometheus_elasticsearch_exporter.es.shards }}\n            - '--es.shards'\n            {{- end }}\n            {{- if .Values.conf.prometheus_elasticsearch_exporter.es.snapshots }}\n            - '--collector.snapshots'\n            {{- end }}\n            {{- if .Values.conf.prometheus_elasticsearch_exporter.es.cluster_settings }}\n            - '--collector.clustersettings'\n            {{- end }}\n            {{- if .Values.conf.prometheus_elasticsearch_exporter.es.slm }}\n            - '--collector.slm'\n            {{- end }}\n            {{- if .Values.conf.prometheus_elasticsearch_exporter.es.data_stream }}\n            - '--es.data_stream'\n            {{- end }}\n            {{- if .Values.manifests.certificates }}\n            - '--es.ca=/tmp/elasticsearch/certs/ca.crt'\n            {{- else }}\n            - '--es.ssl-skip-verify'\n            {{- end }}\n          env:\n            - name: ELASTICSEARCH_URI\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_URI\n          ports:\n            - name: metrics\n              containerPort: {{ tuple \"prometheus_elasticsearch_exporter\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            tcpSocket:\n              port: {{ tuple \"prometheus_elasticsearch_exporter\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 20\n            periodSeconds: 10\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.elasticsearch.elasticsearch.internal \"path\" \"/tmp/elasticsearch/certs\" \"certs\" tuple \"ca.crt\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.elasticsearch.elasticsearch.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/monitoring/prometheus/exporter-network-policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.monitoring.prometheus.network_policy_exporter .Values.monitoring.prometheus.enabled -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"prometheus-elasticsearch-exporter\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "elasticsearch/templates/monitoring/prometheus/exporter-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.monitoring.prometheus.service_exporter .Values.monitoring.prometheus.enabled }}\n{{- $envAll := . }}\n{{- $prometheus_annotations := $envAll.Values.monitoring.prometheus.elasticsearch_exporter }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"prometheus_elasticsearch_exporter\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  labels:\n{{ tuple $envAll \"prometheus-elasticsearch-exporter\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n{{- if .Values.monitoring.prometheus.enabled }}\n{{ tuple $prometheus_annotations | include \"helm-toolkit.snippets.prometheus_service_annotations\" | indent 4 }}\n{{- end }}\nspec:\n  ports:\n  - name: metrics\n    port: {{ tuple \"prometheus_elasticsearch_exporter\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"prometheus-elasticsearch-exporter\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/network-policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"elasticsearch\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "elasticsearch/templates/object-bucket-claim.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and (.Values.manifests.object_bucket_claim) (.Values.conf.elasticsearch.snapshots.enabled) }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: \"elasticsearch-dependencies-objectbucket\"\n  namespace: {{ .Release.Namespace }}\nrules:\n  - apiGroups:\n      - \"objectbucket.io\"\n    verbs:\n      - get\n      - list\n    resources:\n      - objectbuckets\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: \"elasticsearch-dependencies-objectbucket\"\n  namespace: {{ .Release.Namespace }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: \"elasticsearch-dependencies-objectbucket\"\nsubjects:\n  - kind: ServiceAccount\n    name: create-elasticsearch-templates\n    namespace: {{ .Release.Namespace }}\n  - kind: ServiceAccount\n    name: verify-repositories\n    namespace: {{ .Release.Namespace }}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: \"cluster-elasticsearch-dependencies-objectbucket\"\nrules:\n  - apiGroups:\n      - 'objectbucket.io'\n    resources:\n      - objectbuckets\n    verbs:\n      - get\n      - list\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: \"cluster-elasticsearch-dependencies-objectbucket\"\nsubjects:\n  - kind: ServiceAccount\n    name: create-elasticsearch-templates\n    namespace: {{ .Release.Namespace }}\n  - kind: ServiceAccount\n    name: verify-repositories\n    namespace: {{ .Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: \"cluster-elasticsearch-dependencies-objectbucket\"\n  apiGroup: rbac.authorization.k8s.io\n\n{{- range $bucket := .Values.storage.s3.buckets }}\n# When using this Rook CRD, not only bucket will be created,\n# but also a secret containing the credentials to access the bucket.\n---\napiVersion: objectbucket.io/v1alpha1\nkind: ObjectBucketClaim\nmetadata:\n  name: {{ $bucket.name }}\nspec:\n  bucketName: {{ $bucket.name }}\n  storageClassName: {{ $bucket.storage_class }}\n...\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "elasticsearch/templates/pod-helm-tests.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.helm_tests }}\n{{- $envAll := . }}\n{{- $esUserSecret := .Values.secrets.elasticsearch.user }}\n\n{{- $serviceAccountName := print .Release.Name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: \"{{.Release.Name}}-test\"\n  labels:\n{{ tuple $envAll \"elasticsearch\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"elasticsearch-test\" \"containerNames\" (list \"init\" \"elasticsearch-helm-tests\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n{{ dict \"envAll\" $envAll \"application\" \"test\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 2 }}\n  serviceAccountName: {{ $serviceAccountName }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n  restartPolicy: Never\n  initContainers:\n{{ tuple $envAll \"tests\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n  containers:\n    - name: elasticsearch-helm-tests\n{{ tuple $envAll \"helm_tests\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" \"container\" \"helm_tests\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      command:\n        - /tmp/helm-tests.sh\n      env:\n        - name: ELASTICSEARCH_USERNAME\n          valueFrom:\n            secretKeyRef:\n              name: {{ $esUserSecret }}\n              key: ELASTICSEARCH_USERNAME\n        - name: ELASTICSEARCH_PASSWORD\n          valueFrom:\n            secretKeyRef:\n              name: {{ $esUserSecret }}\n              key: ELASTICSEARCH_PASSWORD\n        - name: ELASTICSEARCH_ENDPOINT\n          value: {{ printf \"%s://%s\" (tuple \"elasticsearch\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\") (tuple \"elasticsearch\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\") }}\n{{- if .Values.manifests.certificates }}\n        - name: CACERT_OPTION\n          value: \"--cacert /etc/elasticsearch/certs/ca.crt\"\n{{- end }}\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: elasticsearch-bin\n          mountPath: /tmp/helm-tests.sh\n          subPath: helm-tests.sh\n          readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.elasticsearch.elasticsearch.internal \"path\" \"/etc/elasticsearch/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 8 }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: elasticsearch-bin\n      configMap:\n        name: elasticsearch-bin\n        defaultMode: 0555\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.elasticsearch.elasticsearch.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/secret-elasticsearch.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_elasticsearch }}\n{{- $envAll := . }}\n{{- $secretName := index $envAll.Values.secrets.elasticsearch.user }}\n\n{{- $elasticsearch_user := .Values.endpoints.elasticsearch.auth.admin.username }}\n{{- $elasticsearch_password := .Values.endpoints.elasticsearch.auth.admin.password }}\n{{- $elasticsearch_host := tuple \"elasticsearch\" \"internal\" \"http\" $envAll | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n{{- $elasticsearch_scheme := tuple \"elasticsearch\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" }}\n{{- $elasticsearch_uri := printf \"%s://%s:%s@%s\" $elasticsearch_scheme $elasticsearch_user $elasticsearch_password $elasticsearch_host }}\n{{- $elasticsearch_url := printf \"%s://%s\" $elasticsearch_scheme $elasticsearch_host }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n  ELASTICSEARCH_USERNAME: {{ .Values.endpoints.elasticsearch.auth.admin.username | b64enc }}\n  ELASTICSEARCH_PASSWORD: {{ .Values.endpoints.elasticsearch.auth.admin.password | b64enc }}\n  ELASTICSEARCH_LOGGING_USERNAME: {{ .Values.endpoints.elasticsearch.auth.logging.username | b64enc }}\n  ELASTICSEARCH_LOGGING_PASSWORD: {{ .Values.endpoints.elasticsearch.auth.logging.password | b64enc }}\n  ELASTICSEARCH_URI: {{ $elasticsearch_uri | b64enc }}\n  ELASTICSEARCH_URL: {{ $elasticsearch_url | b64enc }}\n  BIND_DN: {{ .Values.endpoints.ldap.auth.admin.bind | b64enc }}\n  BIND_PASSWORD: {{ .Values.endpoints.ldap.auth.admin.password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/secret-environment.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_environment .Values.pod.env.secrets }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.Release.Name \"env-secret\" | quote }}\ntype: Opaque\ndata:\n  {{- range $key, $value := .Values.pod.env.secrets }}\n  {{ $key | upper }}: {{ $value | b64enc }}\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{- include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"elasticsearch\" \"backendService\" \"elasticsearch\" ) }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/secret-s3-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_s3 }}\n{{ include \"helm-toolkit.snippets.rgw_s3_secret_creds\" . }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/service-data.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_data }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"elasticsearch\" \"data\" $envAll | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: transport\n    port: {{ tuple \"elasticsearch\" \"internal\" \"discovery\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"elasticsearch\" \"data\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/service-discovery.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_discovery }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"elasticsearch\" \"discovery\" $envAll | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: transport\n    port: {{ tuple \"elasticsearch\" \"internal\" \"discovery\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"elasticsearch\" \"master\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/service-gateway.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.network.remote_clustering.enabled }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"elasticsearch\" \"gateway\" $envAll | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: transport\n    port: {{ tuple \"elasticsearch\" \"internal\" \"discovery\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    nodePort: {{ .Values.network.remote_clustering.node_port.port }}\n  selector:\n{{ tuple $envAll \"elasticsearch\" \"gateway\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  type: NodePort\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/service-ingress-elasticsearch.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress .Values.network.elasticsearch.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"elasticsearch\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/service-logging.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_logging }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"elasticsearch\" \"default\" $envAll | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: {{ tuple \"elasticsearch\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" }}\n    port: {{ tuple \"elasticsearch\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    targetPort: {{ tuple \"elasticsearch\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{- if .Values.network.elasticsearch.node_port.enabled }}\n    nodePort: {{ .Values.network.elasticsearch.node_port.port }}\n    {{- end }}\n  selector:\n{{ tuple $envAll \"elasticsearch\" \"client\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{- if .Values.network.elasticsearch.node_port.enabled }}\n  type: NodePort\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/statefulset-data.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.statefulset_data }}\n{{- $envAll := . }}\n\n{{- $esUserSecret := .Values.secrets.elasticsearch.user }}\n{{- $s3UserSecret := .Values.secrets.rgw.elasticsearch }}\n\n{{- $mounts_elasticsearch := .Values.pod.mounts.elasticsearch.elasticsearch }}\n\n{{- $serviceAccountName := printf \"%s-%s\" .Release.Name \"elasticsearch-data\" }}\n{{ tuple $envAll \"elasticsearch_data\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  name: elasticsearch-data\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"elasticsearch\" \"data\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_statefulset\" | indent 2 }}\n  serviceName: {{ tuple \"elasticsearch\" \"data\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  podManagementPolicy: \"Parallel\"\n  replicas: {{ .Values.pod.replicas.data }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"elasticsearch\" \"data\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"elasticsearch\" \"data\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" \"elasticsearch-data\" \"containerNames\" (list \"elasticsearch-data\" \"init\" \"memory-map-increase\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin-elasticsearch.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc-elasticsearch.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{- if and .Values.manifests.secret_s3 .Values.conf.elasticsearch.snapshots.enabled }}\n        secret-s3-user-hash: {{ tuple \"secret-s3-user.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{- end }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"data\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"elasticsearch\" \"data\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.data.node_selector_key }}: {{ .Values.labels.data.node_selector_value | quote }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.data.timeout | default \"600\" }}\n      initContainers:\n{{ tuple $envAll \"elasticsearch_data\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n        - name: memory-map-increase\n{{ tuple $envAll \"memory_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.data | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"data\" \"container\" \"memory_map_increase\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - sysctl\n            - -w\n            - vm.max_map_count={{ .Values.conf.init.max_map_count }}\n        - name: elasticsearch-perms\n{{ tuple $envAll \"elasticsearch\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.prometheus | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"data\" \"container\" \"elasticsearch_perms\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - chown\n            - -R\n            - \"1000:1000\"\n            - {{ .Values.conf.elasticsearch.config.path.data }}\n          volumeMounts:\n            - name: storage\n              mountPath: {{ .Values.conf.elasticsearch.config.path.data }}\n      containers:\n        - name: elasticsearch-data\n{{ tuple $envAll \"elasticsearch\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.data | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"data\" \"container\" \"elasticsearch_data\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/elasticsearch.sh\n            - start_data_node\n          ports:\n            - name: transport\n              containerPort: {{ tuple \"elasticsearch\" \"internal\" \"discovery\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            tcpSocket:\n              port: {{ tuple \"elasticsearch\" \"internal\" \"discovery\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 20\n            periodSeconds: 10\n          env:\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.name\n            - name: ELASTICSEARCH_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_USERNAME\n            - name: ELASTICSEARCH_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_PASSWORD\n            - name: ELASTICSEARCH_ENDPOINT\n              value: {{ printf \"%s://%s\" (tuple \"elasticsearch\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\") (tuple \"elasticsearch\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\") }}\n{{- if .Values.manifests.certificates }}\n            - name: CACERT_OPTION\n              value: \"--cacert /usr/share/elasticsearch/config/ca.crt\"\n            - name: JAVA_KEYSTORE_CERT_PATH\n              value: \"/usr/share/elasticsearch/config/ca.crt\"\n{{- end }}\n            - name: node.roles\n              value: \"[data]\"\n            - name: HTTP_ENABLE\n              value: \"false\"\n            - name: ES_JAVA_OPTS\n              value: \"{{ .Values.conf.elasticsearch.env.java_opts.data }}\"\n            - name: DISCOVERY_SERVICE\n              value: {{ tuple \"elasticsearch\" \"discovery\" $envAll | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n{{- if .Values.conf.elasticsearch.snapshots.enabled }}\n{{- if .Values.manifests.object_bucket_claim }}\n{{- include \"helm-toolkit.snippets.rgw_s3_bucket_user_env_vars_rook\" . | indent 12 }}\n{{- else }}\n{{- include \"helm-toolkit.snippets.rgw_s3_user_env_vars\" . | indent 12 }}\n{{- end }}\n{{- end }}\n{{- if .Values.pod.env.data }}\n{{ include \"helm-toolkit.utils.to_k8s_env_vars\" .Values.pod.env.data | indent 12 }}\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: elasticsearch-logs\n              mountPath: {{ .Values.conf.elasticsearch.config.path.logs }}\n            - name: elasticsearch-bin\n              mountPath: /tmp/elasticsearch.sh\n              subPath: elasticsearch.sh\n              readOnly: true\n            - name: elasticsearch-etc\n              mountPath: /usr/share/elasticsearch/config/elasticsearch.yml\n              subPath: elasticsearch.yml\n              readOnly: true\n            - name: elasticsearch-etc\n              mountPath: /usr/share/elasticsearch/config/log4j2.properties\n              subPath: log4j2.properties\n              readOnly: true\n            - name: elasticsearch-etc\n              mountPath: /usr/share/elasticsearch/config/jvm.options\n              subPath: jvm.options\n              readOnly: true\n            - name: storage\n              mountPath: {{ .Values.conf.elasticsearch.config.path.data }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.elasticsearch.elasticsearch.internal \"path\" \"/usr/share/elasticsearch/config\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_elasticsearch.volumeMounts }}{{ toYaml $mounts_elasticsearch.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: elasticsearch-logs\n          emptyDir: {}\n        - name: elasticsearch-bin\n          configMap:\n            name: elasticsearch-bin\n            defaultMode: 0555\n        - name: elasticsearch-etc\n          secret:\n            secretName: elasticsearch-etc\n            defaultMode: 0444\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.elasticsearch.elasticsearch.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_elasticsearch.volumes }}{{ toYaml $mounts_elasticsearch.volumes | indent 8 }}{{ end }}\n{{- if not .Values.storage.data.enabled }}\n        - name: storage\n          emptyDir: {}\n{{- else }}\n  volumeClaimTemplates:\n    - metadata:\n        name: storage\n      spec:\n        accessModes: {{ .Values.storage.data.pvc.access_mode }}\n        resources:\n          requests:\n            storage: {{ .Values.storage.data.requests.storage  }}\n        storageClassName: {{ .Values.storage.data.storage_class }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/templates/statefulset-master.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.statefulset_master }}\n{{- $envAll := . }}\n\n{{- $mounts_elasticsearch := .Values.pod.mounts.elasticsearch.elasticsearch }}\n\n{{- $serviceAccountName := \"elasticsearch-master\" }}\n{{ tuple $envAll \"elasticsearch_master\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  name: elasticsearch-master\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"elasticsearch\" \"master\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  serviceName: {{ tuple \"elasticsearch\" \"discovery\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  podManagementPolicy: \"Parallel\"\n  replicas: {{ .Values.pod.replicas.master }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"elasticsearch\" \"master\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_statefulset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"elasticsearch\" \"master\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin-elasticsearch.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc-elasticsearch.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{- if and .Values.manifests.secret_s3 .Values.conf.elasticsearch.snapshots.enabled }}\n        secret-s3-user-hash: {{ tuple \"secret-s3-user.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{- end }}\n{{ dict \"envAll\" $envAll \"podName\" \"elasticsearch-master\" \"containerNames\" (list \"elasticsearch-master\" \"init\" \"memory-map-increase\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"master\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"elasticsearch\" \"master\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.master.timeout | default \"600\" }}\n      nodeSelector:\n        {{ .Values.labels.master.node_selector_key }}: {{ .Values.labels.master.node_selector_value | quote }}\n      initContainers:\n{{ tuple $envAll \"elasticsearch_master\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n        - name: memory-map-increase\n{{ tuple $envAll \"memory_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.master | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"master\" \"container\" \"memory_map_increase\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n          - sysctl\n          - -w\n          - vm.max_map_count={{ .Values.conf.init.max_map_count }}\n        - name: elasticsearch-perms\n{{ tuple $envAll \"elasticsearch\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.prometheus | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"master\" \"container\" \"elasticsearch_perms\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - chown\n            - -R\n            - \"1000:1000\"\n            - {{ .Values.conf.elasticsearch.config.path.data }}\n          volumeMounts:\n            - name: storage\n              mountPath: {{ .Values.conf.elasticsearch.config.path.data }}\n      containers:\n        - name: elasticsearch-master\n{{ dict \"envAll\" $envAll \"application\" \"master\" \"container\" \"elasticsearch_master\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ tuple $envAll \"elasticsearch\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.master | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/elasticsearch.sh\n            - start_master_node\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/elasticsearch.sh\n                  - stop\n          ports:\n            - name: transport\n              containerPort: {{ tuple \"elasticsearch\" \"internal\" \"discovery\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            tcpSocket:\n              port: {{ tuple \"elasticsearch\" \"internal\" \"discovery\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 20\n            periodSeconds: 10\n          env:\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.name\n            - name: node.roles\n              value: \"[master]\"\n            - name: HTTP_ENABLE\n              value: \"false\"\n            - name: DISCOVERY_SERVICE\n              value: {{ tuple \"elasticsearch\" \"discovery\" $envAll | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n            - name: ES_JAVA_OPTS\n              value: \"{{ .Values.conf.elasticsearch.env.java_opts.master }}\"\n{{- if .Values.manifests.certificates }}\n            - name: JAVA_KEYSTORE_CERT_PATH\n              value: \"/usr/share/elasticsearch/config/ca.crt\"\n            - name: ELASTICSEARCH_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.elasticsearch.user }}\n                  key: ELASTICSEARCH_PASSWORD\n{{- end }}\n{{- if .Values.conf.elasticsearch.snapshots.enabled }}\n{{- if .Values.manifests.object_bucket_claim }}\n{{- include \"helm-toolkit.snippets.rgw_s3_bucket_user_env_vars_rook\" . | indent 12 }}\n{{- else }}\n{{- include \"helm-toolkit.snippets.rgw_s3_user_env_vars\" . | indent 12 }}\n{{- end }}\n{{- end }}\n{{- if .Values.pod.env.master }}\n{{ include \"helm-toolkit.utils.to_k8s_env_vars\" .Values.pod.env.master | indent 12 }}\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: elasticsearch-logs\n              mountPath: {{ .Values.conf.elasticsearch.config.path.logs }}\n            - name: elasticsearch-bin\n              mountPath: /tmp/elasticsearch.sh\n              subPath: elasticsearch.sh\n              readOnly: true\n            - name: elasticsearch-etc\n              mountPath: /usr/share/elasticsearch/config/elasticsearch.yml\n              subPath: elasticsearch.yml\n              readOnly: true\n            - name: elasticsearch-etc\n              mountPath: /usr/share/elasticsearch/config/log4j2.properties\n              subPath: log4j2.properties\n              readOnly: true\n            - name: elasticsearch-etc\n              mountPath: /usr/share/elasticsearch/config/jvm.options\n              subPath: jvm.options\n              readOnly: true\n            - name: storage\n              mountPath: {{ .Values.conf.elasticsearch.config.path.data }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.elasticsearch.elasticsearch.internal \"path\" \"/usr/share/elasticsearch/config\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_elasticsearch.volumeMounts }}{{ toYaml $mounts_elasticsearch.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: elasticsearch-logs\n          emptyDir: {}\n        - name: elasticsearch-bin\n          configMap:\n            name: elasticsearch-bin\n            defaultMode: 0555\n        - name: elasticsearch-etc\n          secret:\n            secretName: elasticsearch-etc\n            defaultMode: 0444\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.elasticsearch.elasticsearch.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_elasticsearch.volumes }}{{ toYaml $mounts_elasticsearch.volumes | indent 8 }}{{ end }}\n{{- if not .Values.storage.master.enabled }}\n        - name: storage\n          emptyDir: {}\n{{- else }}\n  volumeClaimTemplates:\n    - metadata:\n        name: storage\n      spec:\n        accessModes: {{ .Values.storage.master.pvc.access_mode }}\n        resources:\n          requests:\n            storage: {{ .Values.storage.master.requests.storage  }}\n        storageClassName: {{ .Values.storage.master.storage_class }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "elasticsearch/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for elasticsearch\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nimages:\n  tags:\n    apache_proxy: docker.io/library/httpd:2.4\n    memory_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    elasticsearch: docker.elastic.co/elasticsearch/elasticsearch:8.19.9\n    curator: docker.io/untergeek/curator:8.0.10\n    ceph_key_placement: quay.io/airshipit/ceph-config-helper:ubuntu_jammy_20.2.1-1-20260407\n    s3_bucket: quay.io/airshipit/ceph-daemon:ubuntu_jammy_20.2.1-1-20260407\n    s3_user: quay.io/airshipit/ceph-config-helper:ubuntu_jammy_20.2.1-1-20260407\n    helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    prometheus_elasticsearch_exporter: quay.io/prometheuscommunity/elasticsearch-exporter:v1.9.0\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    snapshot_repository: quay.io/airshipit/ceph-config-helper:ubuntu_jammy_20.2.1-1-20260407\n    elasticsearch_templates: docker.io/linuxserver/yq:latest\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  client:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  data:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  exporter:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  master:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  gateway:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - elasticsearch-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    curator:\n      services:\n        - endpoint: internal\n          service: elasticsearch\n        - endpoint: data\n          service: elasticsearch\n        - endpoint: discovery\n          service: elasticsearch\n      jobs:\n        - elasticsearch-register-snapshot-repository\n    elasticsearch_client:\n      services:\n        - endpoint: discovery\n          service: elasticsearch\n      jobs: null\n    elasticsearch_gateway:\n      services:\n        - endpoint: discovery\n          service: elasticsearch\n    elasticsearch_data:\n      services:\n        - endpoint: internal\n          service: elasticsearch\n        - endpoint: discovery\n          service: elasticsearch\n      jobs: null\n    elasticsearch_master:\n      services: null\n      jobs: null\n    elasticsearch_templates:\n      services:\n        - endpoint: internal\n          service: elasticsearch\n      jobs:\n        - elasticsearch-s3-bucket\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    prometheus_elasticsearch_exporter:\n      services:\n        - endpoint: internal\n          service: elasticsearch\n    snapshot_repository:\n      services:\n        - endpoint: internal\n          service: elasticsearch\n      jobs:\n        - elasticsearch-s3-bucket\n    verify_repositories:\n      services: null\n      jobs:\n        - create-elasticsearch-templates\n    s3_user:\n      services:\n        - endpoint: internal\n          service: ceph_object_store\n    s3_bucket:\n      jobs:\n        - elasticsearch-s3-user\n    tests:\n      services: null\n      jobs:\n        - create-elasticsearch-templates\n\npod:\n  env:\n    client: null\n    data: null\n    master: null\n    gateway: null\n    secrets: null\n  security_context:\n    exporter:\n      pod:\n        runAsUser: 99\n      container:\n        elasticsearch_exporter:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    client:\n      pod:\n        runAsUser: 0\n      container:\n        memory_map_increase:\n          privileged: true\n          readOnlyRootFilesystem: true\n        apache_proxy:\n          readOnlyRootFilesystem: false\n        elasticsearch_client:\n          runAsUser: 1000\n          runAsGroup: 1000\n          readOnlyRootFilesystem: false\n    master:\n      pod:\n        runAsUser: 0\n      container:\n        memory_map_increase:\n          privileged: true\n          readOnlyRootFilesystem: true\n        elasticsearch_perms:\n          readOnlyRootFilesystem: true\n        elasticsearch_master:\n          runAsUser: 1000\n          runAsGroup: 1000\n          readOnlyRootFilesystem: false\n    snapshot_repository:\n      pod:\n        runAsUser: 0\n      container:\n        register_snapshot_repository:\n          readOnlyRootFilesystem: true\n    test:\n      pod:\n        runAsUser: 0\n      container:\n        helm_test:\n          readOnlyRootFilesystem: true\n    data:\n      pod:\n        runAsUser: 0\n      container:\n        memory_map_increase:\n          privileged: true\n          readOnlyRootFilesystem: true\n        elasticsearch_perms:\n          readOnlyRootFilesystem: true\n        elasticsearch_data:\n          runAsUser: 1000\n          runAsGroup: 1000\n          # NOTE: This was changed from true to false to account for\n          # recovery scenarios when the data pods are unexpectedly lost due to\n          # node outages and shard/index recovery is required\n          readOnlyRootFilesystem: false\n    gateway:\n      pod:\n        runAsUser: 0\n      container:\n        memory_map_increase:\n          privileged: true\n          readOnlyRootFilesystem: true\n        apache_proxy:\n          readOnlyRootFilesystem: false\n        elasticsearch_gateway:\n          runAsUser: 1000\n          runAsGroup: 1000\n          readOnlyRootFilesystem: false\n    curator:\n      pod:\n        runAsUser: 0\n      container:\n        curator:\n          readOnlyRootFilesystem: true\n    verify_repositories:\n      pod:\n        runAsUser: 0\n      container:\n        elasticsearch_verify_repositories:\n          readOnlyRootFilesystem: true\n    create_template:\n      pod:\n        runAsUser: 0\n      container:\n        create_elasticsearch_template:\n          readOnlyRootFilesystem: true\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  replicas:\n    master: 3\n    data: 3\n    client: 3\n    gateway: 3\n  lifecycle:\n    upgrades:\n      statefulsets:\n        pod_replacement_strategy: RollingUpdate\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    termination_grace_period:\n      master:\n        timeout: 600\n      data:\n        timeout: 1200\n      client:\n        timeout: 600\n      prometheus_elasticsearch_exporter:\n        timeout: 600\n  probes:\n    elasticsearch:\n      elasticsearch-client:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            timeoutSeconds: 30\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 60\n            periodSeconds: 10\n  mounts:\n    elasticsearch:\n      elasticsearch:\n    elasticsearch_templates:\n      elasticsearch_templates:\n  resources:\n    enabled: false\n    apache_proxy:\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n    client:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    master:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    data:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    prometheus_elasticsearch_exporter:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    gateway:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      curator:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      elasticsearch_templates:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      snapshot_repository:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      storage_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      s3_bucket:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      s3_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nnetwork_policy:\n  elasticsearch:\n    ingress:\n      - {}\n    egress:\n      - {}\n  prometheus-elasticsearch-exporter:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nsecrets:\n  rgw:\n    elasticsearch: elasticsearch-s3-user-creds\n  elasticsearch:\n    user: elasticsearch-user-secrets\n  oci_image_registry:\n    elasticsearch: elasticsearch-oci-image-registry-key\n  tls:\n    elasticsearch:\n      elasticsearch:\n        public: elasticsearch-tls-public\n        internal: elasticsearch-tls-api\n\njobs:\n  curator:\n    cron: \"* */6 * * *\"\n    history:\n      success: 3\n      failed: 1\n  verify_repositories:\n    cron: \"*/30 * * * *\"\n    history:\n      success: 3\n      failed: 1\n  create_elasticsearch_templates:\n    backoffLimit: 6\n\nconf:\n  httpd: |\n    ServerRoot \"/usr/local/apache2\"\n\n    Listen 80\n\n    LoadModule allowmethods_module modules/mod_allowmethods.so\n    LoadModule mpm_event_module modules/mod_mpm_event.so\n    LoadModule authn_file_module modules/mod_authn_file.so\n    LoadModule authn_core_module modules/mod_authn_core.so\n    LoadModule authz_host_module modules/mod_authz_host.so\n    LoadModule authz_groupfile_module modules/mod_authz_groupfile.so\n    LoadModule authz_user_module modules/mod_authz_user.so\n    LoadModule authz_core_module modules/mod_authz_core.so\n    LoadModule access_compat_module modules/mod_access_compat.so\n    LoadModule auth_basic_module modules/mod_auth_basic.so\n    LoadModule ldap_module modules/mod_ldap.so\n    LoadModule authnz_ldap_module modules/mod_authnz_ldap.so\n    LoadModule reqtimeout_module modules/mod_reqtimeout.so\n    LoadModule filter_module modules/mod_filter.so\n    LoadModule proxy_html_module modules/mod_proxy_html.so\n    LoadModule log_config_module modules/mod_log_config.so\n    LoadModule env_module modules/mod_env.so\n    LoadModule headers_module modules/mod_headers.so\n    LoadModule setenvif_module modules/mod_setenvif.so\n    LoadModule version_module modules/mod_version.so\n    LoadModule proxy_module modules/mod_proxy.so\n    LoadModule proxy_connect_module modules/mod_proxy_connect.so\n    LoadModule proxy_http_module modules/mod_proxy_http.so\n    LoadModule proxy_balancer_module modules/mod_proxy_balancer.so\n    LoadModule slotmem_shm_module modules/mod_slotmem_shm.so\n    LoadModule slotmem_plain_module modules/mod_slotmem_plain.so\n    LoadModule unixd_module modules/mod_unixd.so\n    LoadModule status_module modules/mod_status.so\n    LoadModule autoindex_module modules/mod_autoindex.so\n    LoadModule rewrite_module modules/mod_rewrite.so\n\n    <IfModule unixd_module>\n    User daemon\n    Group daemon\n    </IfModule>\n\n    <Directory />\n        AllowOverride none\n        Require all denied\n    </Directory>\n\n    <Files \".ht*\">\n        Require all denied\n    </Files>\n\n    ErrorLog /dev/stderr\n\n    LogLevel warn\n\n    <IfModule log_config_module>\n        LogFormat \"%a %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" combined\n        LogFormat \"%{X-Forwarded-For}i %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" proxy\n        LogFormat \"%h %l %u %t \\\"%r\\\" %>s %b\" common\n\n        <IfModule logio_module>\n          LogFormat \"%a %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\" %I %O\" combinedio\n        </IfModule>\n\n        SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n        CustomLog /dev/stdout common\n        CustomLog /dev/stdout combined\n        CustomLog /dev/stdout proxy env=forwarded\n    </IfModule>\n\n    <Directory \"/usr/local/apache2/cgi-bin\">\n        AllowOverride None\n        Options None\n        Require all granted\n    </Directory>\n\n    <IfModule headers_module>\n        RequestHeader unset Proxy early\n    </IfModule>\n\n    <IfModule proxy_html_module>\n    Include conf/extra/proxy-html.conf\n    </IfModule>\n\n    <VirtualHost *:80>\n      <Location />\n          ProxyPass http://localhost:{{ tuple \"elasticsearch\" \"internal\" \"client\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/\n          ProxyPassReverse http://localhost:{{ tuple \"elasticsearch\" \"internal\" \"client\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/\n          AuthName \"Elasticsearch\"\n          AuthType Basic\n          AuthBasicProvider file ldap\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          AuthLDAPBindDN {{ .Values.endpoints.ldap.auth.admin.bind }}\n          AuthLDAPBindPassword {{ .Values.endpoints.ldap.auth.admin.password }}\n          AuthLDAPURL {{ tuple \"ldap\" \"default\" \"ldap\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | quote }}\n          Require valid-user\n      </Location>\n\n      # Restrict access to the Elasticsearch Update By Query API Endpoint to prevent modification of indexed documents\n      <Location /*/_update_by_query*>\n          Require all denied\n      </Location>\n      # Restrict access to the Elasticsearch Delete By Query API Endpoint to prevent deletion of indexed documents\n      <Location /*/_delete_by_query*>\n          Require all denied\n      </Location>\n    </VirtualHost>\n  log4j2: |\n    status = error\n    appender.console.type = Console\n    appender.console.name = console\n    appender.console.layout.type = PatternLayout\n    appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker%m%n\n    rootLogger.level = info\n    rootLogger.appenderRef.console.ref = console\n  jvm_options: |\n    -Xms1g\n    -Xmx1g\n    -Des.networkaddress.cache.ttl=60\n    -Des.networkaddress.cache.negative.ttl=10\n    -XX:+AlwaysPreTouch\n    -Xss1m\n    -Djava.awt.headless=true\n    -Dfile.encoding=UTF-8\n    -Djna.nosys=true\n    -XX:-OmitStackTraceInFastThrow\n    -Dio.netty.noUnsafe=true\n    -Dio.netty.noKeySetOptimization=true\n    -Dio.netty.recycler.maxCapacityPerThread=0\n    -Dlog4j.shutdownHookEnabled=false\n    -Dlog4j2.disable.jmx=true\n    -Djava.io.tmpdir=${ES_TMPDIR}\n    {{- if .Values.manifests.certificates }}\n    -Djavax.net.ssl.trustStore=/usr/share/elasticsearch/config/elasticsearch-java-truststore\n    -Djavax.net.ssl.trustStorePassword={{ .Values.endpoints.elasticsearch.auth.admin.password }}\n    {{- end }}\n    -XX:+HeapDumpOnOutOfMemoryError\n    -XX:HeapDumpPath=data\n    -XX:ErrorFile=/usr/share/elasticsearch/logs/hs_err_pid%p.log\n    8:-XX:+PrintGCDetails\n    8:-XX:+PrintGCDateStamps\n    8:-XX:+PrintTenuringDistribution\n    8:-XX:+PrintGCApplicationStoppedTime\n    8:-Xloggc:logs/gc.log\n    8:-XX:+UseGCLogFileRotation\n    8:-XX:NumberOfGCLogFiles=32\n    8:-XX:GCLogFileSize=64m\n    8-13:-XX:+UseConcMarkSweepGC\n    8-13:-XX:CMSInitiatingOccupancyFraction=75\n    8-13:-XX:+UseCMSInitiatingOccupancyOnly\n    9-:-Xlog:gc*,gc+age=trace,safepoint:file=/usr/share/elasticsearch/logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m\n    9-:-Djava.locale.providers=COMPAT\n    10-:-XX:UseAVX=2\n  init:\n    max_map_count: 262144\n  ceph:\n    admin_keyring: null\n  curator:\n    executable: /curator/curator\n    action_file: {}\n      # Remember, leave a key empty if there is no value.  None will be a string,\n      # not a Python \"NoneType\"\n      #\n      # Also remember that all examples have 'disable_action' set to True.  If you\n      # want to use this action as a template, be sure to set this to False after\n      # copying it.\n      #\n      # NOTE(srwilkers): The list of actions below is kept empty, and should be\n      # driven purely by overrides.  As these items are injected as pure YAML,\n      # the desired configuration should include all fields as to avoid unwanted\n      # merges with a set of dummy default values. The supplied values can be\n      # used as an example\n      # actions:\n        # 1:\n        #   action: delete_indices\n        #   description: >-\n        #     \"Delete indices older than 7 days\"\n        #   options:\n        #     timeout_override:\n        #     continue_if_exception: False\n        #     ignore_empty_list: True\n        #     disable_action: True\n        #   filters:\n        #   - filtertype: pattern\n        #     kind: prefix\n        #     value: logstash-\n        #   - filtertype: age\n        #     source: name\n        #     direction: older\n        #     timestring: '%Y.%m.%d'\n        #     unit: days\n        #     unit_count: 7\n        # 2:\n        #   action: delete_indices\n        #   description: >-\n        #     \"Delete indices by age if available disk space is\n        #      less than 80% total disk\"\n        #   options:\n        #     timeout_override: 600\n        #     continue_if_exception: False\n        #     ignore_empty_list: True\n        #     disable_action: True\n        #   filters:\n        #   - filtertype: pattern\n        #     kind: prefix\n        #     value: logstash-\n        #   - filtertype: space\n        #     source: creation_date\n        #     use_age: True\n        #     # This space assumes the default PVC size of 5Gi times three data\n        #     # replicas. This must be adjusted if changed due to Curator being\n        #     # unable to calculate percentages of total disk space\n        #     disk_space: 12\n        # 3:\n        #   action: snapshot\n        #   description: >-\n        #     \"Snapshot indices older than one day\"\n        #   options:\n        #     repository: logstash_snapshots\n        #     # Leaving this blank results in the default name format\n        #     name:\n        #     wait_for_completion: True\n        #     max_wait: 3600\n        #     wait_interval: 10\n        #     timeout_override: 600\n        #     ignore_empty_list: True\n        #     continue_if_exception: False\n        #     disable_action: True\n        #   filters:\n        #   - filtertype: age\n        #     source: name\n        #     direction: older\n        #     timestring: '%Y.%m.%d'\n        #     unit: days\n        #     unit_count: 1\n        # 4:\n        #   action: delete_snapshots\n        #   description: >-\n        #     \"Delete snapshots older than 30 days\"\n        #   options:\n        #     repository: logstash_snapshots\n        #     disable_action: True\n        #     timeout_override: 600\n        #     ignore_empty_list: True\n        #   filters:\n        #   - filtertype: pattern\n        #     kind: prefix\n        #     value: curator-\n        #     exclude:\n        #   - filtertype: age\n        #     source: creation_date\n        #     direction: older\n        #     unit: days\n        #     unit_count: 30\n    config:\n      # Remember, leave a key empty if there is no value.  None will be a string,\n      # not a Python \"NoneType\"\n      elasticsearch:\n        client:\n          hosts: ${ELASTICSEARCH_URL}\n          request_timeout: 60\n        other_settings:\n          username: ${ELASTICSEARCH_USERNAME}\n          password: ${ELASTICSEARCH_PASSWORD}\n\n      logging:\n        loglevel: INFO\n        logformat: json\n        blacklist: ['elastic_transport', 'urllib3']\n  elasticsearch:\n    config:\n      xpack:\n        security:\n          enabled: false\n      bootstrap:\n        # As far as we run the pod as non-root, we can't make locking memory unlimited.\n        # configure the memory locking limits on host itself of disable swap completely.\n        memory_lock: false\n      cluster:\n        name: elasticsearch\n      discovery:\n        # NOTE(srwilkers): This gets configured dynamically via endpoint lookups\n        seed_hosts: null\n      network:\n        host: 0.0.0.0\n      s3:\n        client: {}\n      path:\n        data: /data\n        logs: /logs\n    snapshots:\n      enabled: false\n    env:\n      java_opts:\n        client: \"-Xms256m -Xmx256m\"\n        data: \"-Xms256m -Xmx256m\"\n        master: \"-Xms256m -Xmx256m\"\n  prometheus_elasticsearch_exporter:\n    es:\n      timeout: 30s\n      all: true\n      indices: true\n      indices_settings: true\n      indices_mappings: true\n      aliases: false\n      shards: true\n      snapshots: true\n      cluster_settings: true\n      slm: true\n      data_stream: false\n    log:\n      format: logfmt\n      level: info\n\n  api_objects: {}\n    # Fill this map with API objects to create once Elasticsearch is deployed\n    # name: # This name can be completely arbitrary\n    #   method: # Defaults to PUT\n    #   endpoint: # Path for the request\n    #   body: # Body of the request in yaml (Converted to Json in Template)\n    # Example: ILM Policy\n    # ilm_policy:\n    #   endpoint: _ilm/policy/delete_all_indexes\n    #   body:\n    #     policy:\n    #       phases:\n    #         delete:\n    #           min_age: 14d\n    #           actions:\n    #             delete: {}\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      elasticsearch:\n        username: elasticsearch\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  elasticsearch:\n    name: elasticsearch\n    namespace: null\n    auth:\n      admin:\n        username: admin\n        password: changeme\n      logging:\n        username: remote\n        password: changeme\n    hosts:\n      data: elasticsearch-data\n      default: elasticsearch-logging\n      discovery: elasticsearch-discovery\n      gateway: elasticsaerch-gateway\n      public: elasticsearch\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n      gateway: tcp\n    port:\n      client:\n        default: 9200\n      http:\n        default: 80\n      discovery:\n        default: 9300\n  prometheus_elasticsearch_exporter:\n    namespace: null\n    hosts:\n      default: elasticsearch-exporter\n    host_fqdn_override:\n      default: null\n    path:\n      default: /metrics\n    scheme:\n      default: 'http'\n    port:\n      metrics:\n        default: 9108\n  ldap:\n    hosts:\n      default: ldap\n    auth:\n      admin:\n        bind: \"cn=admin,dc=cluster,dc=local\"\n        password: password\n    host_fqdn_override:\n      default: null\n    path:\n      default: \"/ou=People,dc=cluster,dc=local\"\n    scheme:\n      default: ldap\n    port:\n      ldap:\n        default: 389\n  ceph_object_store:\n    name: radosgw\n    namespace: null\n    hosts:\n      default: ceph-rgw\n      public: radosgw\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      api:\n        default: 8088\n        public: 80\n\nmonitoring:\n  prometheus:\n    enabled: false\n    elasticsearch_exporter:\n      scrape: true\n\nnetwork:\n  elasticsearch:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    node_port:\n      enabled: false\n      port: 30920\n  remote_clustering:\n    enabled: false\n    node_port:\n      port: 30930\n\nstorage:\n  data:\n    enabled: true\n    pvc:\n      name: pvc-elastic\n      access_mode: [\"ReadWriteOnce\"]\n    requests:\n      storage: 5Gi\n    storage_class: general\n  master:\n    enabled: true\n    pvc:\n      name: pvc-elastic\n      access_mode: [\"ReadWriteOnce\"]\n    requests:\n      storage: 1Gi\n    storage_class: general\n  s3:\n    clients: {}\n    # These values configure the s3 clients section of elasticsearch.yml\n    # See: https://www.elastic.co/guide/en/elasticsearch/plugins/current/repository-s3-client.html\n    #   default:\n    #     auth:\n    #       # Values under auth are written to the Secret $client-s3-user-secret\n    #       # and the access & secret keys are added to the elasticsearch keystore\n    #       username: elasticsearch\n    #       access_key: \"elastic_access_key\"\n    #       secret_key: \"elastic_secret_key\"\n    #     settings:\n    #       # Configure Client Settings here (https://www.elastic.co/guide/en/elasticsearch/plugins/current/repository-s3-client.html)\n    #       # endpoint: Defaults to the ceph-rgw endpoint\n    #       # protocol: Defaults to http\n    #       path_style_access: true # Required for ceph-rgw S3 API\n    #     create_user: true # Attempt to create the user at the ceph_object_store endpoint\n    #   backup:\n    #     auth:\n    #       username: elasticsearch\n    #       access_key: \"backup_access_key\"\n    #       secret_key: \"backup_secret_key\"\n    #     settings:\n    #       endpoint: s3.example.com # Specify your own s3 endpoint (defaults to the ceph_object_store endpoint)\n    #       path_style_access: false\n    #     create_user: false\n    buckets: {}\n    # List of buckets to create (if required).\n    # (The client field references one of the clients defined above)\n    #   - name: elasticsearch-bucket\n    #     client: default\n    #     options: # list of extra options for s3cmd\n    #       - --region=\"default:osh-infra\"\n    #     # SSL connection option for s3cmd\n    #     ssl_connecton_option: --ca-certs={path to mounted ca.crt}\n    #   - name: backup-bucket\n    #     client: backup\n    #     options: # list of extra options for s3cmd\n    #       - --region=\"default:backup\"\n    #     # SSL connection option for s3cmd\n    #     ssl_connecton_option: --ca-certs={path to mounted ca.crt}\n\nmanifests:\n  certificates: false\n  configmap_bin_curator: false\n  configmap_bin_elasticsearch: true\n  configmap_etc_curator: false\n  configmap_etc_elasticsearch: true\n  configmap_etc_templates: true\n  cron_curator: false\n  cron_verify_repositories: true\n  deployment_client: true\n  ingress: true\n  job_elasticsearch_templates: true\n  job_image_repo_sync: true\n  job_snapshot_repository: true\n  job_s3_user: true\n  job_s3_bucket: true\n  helm_tests: true\n  secret_elasticsearch: true\n  secret_s3: true\n  monitoring:\n    prometheus:\n      configmap_bin_exporter: true\n      deployment_exporter: true\n      network_policy_exporter: false\n      service_exporter: true\n  network_policy: false\n  secret_ingress_tls: true\n  secret_registry: true\n  service_data: true\n  service_discovery: true\n  service_ingress: true\n  service_logging: true\n  statefulset_data: true\n  statefulset_master: true\n  object_bucket_claim: false\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "etcd/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v3.4.3\ndescription: OpenStack-Helm etcd\nname: etcd\nversion: 2025.2.0\nhome: https://coreos.com/etcd/\nicon: https://raw.githubusercontent.com/CloudCoreo/etcd-cluster/master/images/icon.png\nsources:\n  - https://github.com/coreos/etcd/\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "etcd/templates/bin/_etcd-db-compact.sh.tpl",
    "content": "#!/bin/sh\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\n\nexport ETCDCTL_API=3\n\n{{- if .Values.jobs.db_compact.command_timeout }}\nCOMMAND_TIMEOUT='--command-timeout={{ .Values.jobs.db_compact.command_timeout }}'\n{{- else }}\nCOMMAND_TIMEOUT=''\n{{- end }}\n\nENDPOINTS=$(etcdctl member list --endpoints=http://${ETCD_SERVICE_HOST}:${ETCD_SERVICE_PORT} ${COMMAND_TIMEOUT}| cut -d, -f5 | sed -e 's/ //g' | paste -sd ',')\n\netcdctl --endpoints=${ENDPOINTS} endpoint status --write-out=\"table\" ${COMMAND_TIMEOUT}\n\nrev=$(etcdctl --endpoints=http://${ETCD_SERVICE_HOST}:${ETCD_SERVICE_PORT} endpoint status --write-out=\"json\" ${COMMAND_TIMEOUT}| egrep -o '\"revision\":[0-9]*' | egrep -o '[0-9].*')\ncompact_result=$(etcdctl compact --physical=true --endpoints=${ENDPOINTS} $rev ${COMMAND_TIMEOUT} 2>&1 > /dev/null)\ncompact_res=$?\n\nif [[ $compact_res -ne 0 ]]; then\n    match_pattern=$(echo ${compact_result} | egrep '(mvcc: required revision has been compacted.*$)')\n    match_pattern_res=$?\n    if [[ $match_pattern_res -eq 0 ]]; then\n        exit 0\n    else\n        echo \"Failed to compact database: $compact_result\"\n        exit $compact_res\n    fi\nelse\n    etcdctl defrag --endpoints=${ENDPOINTS} ${COMMAND_TIMEOUT}\n    etcdctl --endpoints=${ENDPOINTS} endpoint status --write-out=\"table\" ${COMMAND_TIMEOUT}\nfi\n"
  },
  {
    "path": "etcd/templates/bin/_etcd-healthcheck.sh.tpl",
    "content": "#!/bin/sh\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\n\nexport ETCDCTL_API=3\n\nETCD_CLIENT_PORT={{ tuple \"etcd\" \"internal\" \"client\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\nDISCOVERY_DOMAIN={{ tuple \"etcd\" \"discovery\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\n\netcdctl endpoint health --endpoints=${POD_NAME}.${DISCOVERY_DOMAIN}:${ETCD_CLIENT_PORT}\n"
  },
  {
    "path": "etcd/templates/bin/_etcd.sh.tpl",
    "content": "#!/bin/sh\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nactive_members_present() {\n  res=1\n  for endpoint in $(echo $ETCD_ENDPOINTS | tr ',' '\\n'); do\n      if etcdctl endpoint health --endpoints=$endpoint >/dev/null 2>&1; then\n          res=$?\n          if [[ \"$res\" == 0 ]]; then\n              break\n          fi\n      fi\n  done\n  echo $res\n}\n\nETCD_REPLICAS={{ .Values.pod.replicas.etcd }}\nPEER_PREFIX_NAME={{- printf \"%s-%s\" .Release.Name \"etcd\"  }}\nDISCOVERY_DOMAIN={{ tuple \"etcd\" \"discovery\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\nETCD_PEER_PORT=2380\nETCD_CLIENT_PORT={{ tuple \"etcd\" \"internal\" \"client\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\nETCD_PROTOCOL={{ tuple \"etcd\" \"internal\" \"client\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" }}\nPEERS=\"${PEER_PREFIX_NAME}-0=${ETCD_PROTOCOL}://${PEER_PREFIX_NAME}-0.${DISCOVERY_DOMAIN}:${ETCD_PEER_PORT}\"\nETCD_ENDPOINTS=\"${ETCD_PROTOCOL}://${PEER_PREFIX_NAME}-0.${DISCOVERY_DOMAIN}:${ETCD_PEER_PORT}\"\nif [[ ${ETCD_REPLICAS} -gt 1 ]] ; then\n  for i in $(seq 1 $(( ETCD_REPLICAS - 1 ))); do\n    PEERS=\"$PEERS,${PEER_PREFIX_NAME}-${i}=${ETCD_PROTOCOL}://${PEER_PREFIX_NAME}-${i}.${DISCOVERY_DOMAIN}:${ETCD_PEER_PORT}\"\n    ETCD_ENDPOINTS=\"${ETCD_ENDPOINTS},${ETCD_PROTOCOL}://${PEER_PREFIX_NAME}-${i}.${DISCOVERY_DOMAIN}:${ETCD_PEER_PORT}\"\n  done\nfi\nADVERTISE_PEER_URL=\"${ETCD_PROTOCOL}://${HOSTNAME}.${DISCOVERY_DOMAIN}:${ETCD_PEER_PORT}\"\nADVERTISE_CLIENT_URL=\"${ETCD_PROTOCOL}://${HOSTNAME}.${DISCOVERY_DOMAIN}:${ETCD_CLIENT_PORT}\"\n\nETCD_INITIAL_CLUSTER_STATE=new\n\nif [[ -z \"$(ls -A $ETCD_DATA_DIR)\" ]]; then\n  echo \"State directory $ETCD_DATA_DIR is empty.\"\n  if [[ $(active_members_present) -eq 0 ]]; then\n      ETCD_INITIAL_CLUSTER_STATE=existing\n      member_id=$(etcdctl --endpoints=${ETCD_ENDPOINTS} member list | grep -w ${ADVERTISE_CLIENT_URL} | awk -F \",\" '{ print $1 }')\n      if [[ -n \"$member_id\" ]]; then\n          echo \"Current node is a member of cluster, member_id: ${member_id}\"\n          echo \"Rejoining...\"\n          echo \"Removing member from the cluster\"\n          etcdctl member remove \"$member_id\" --endpoints=${ETCD_ENDPOINTS}\n          etcdctl member add ${ADVERTISE_CLIENT_URL} --peer-urls=${ADVERTISE_PEER_URL} --endpoints=${ETCD_ENDPOINTS}\n      fi\n  else\n      echo \"Do not have active members. Starting initial cluster state.\"\n  fi\nfi\n\nexec etcd \\\n  --name ${HOSTNAME} \\\n  --listen-peer-urls ${ETCD_PROTOCOL}://0.0.0.0:${ETCD_PEER_PORT} \\\n  --listen-client-urls ${ETCD_PROTOCOL}://0.0.0.0:${ETCD_CLIENT_PORT} \\\n  --advertise-client-urls ${ADVERTISE_CLIENT_URL} \\\n  --initial-advertise-peer-urls ${ADVERTISE_PEER_URL} \\\n  --initial-cluster ${PEERS} \\\n  --initial-cluster-state ${ETCD_INITIAL_CLUSTER_STATE}\n"
  },
  {
    "path": "etcd/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n{{- $configMapBinName := printf \"%s-%s\" $envAll.Release.Name \"etcd-bin\"  }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ $configMapBinName }}\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n  etcd.sh: |\n{{ tuple \"bin/_etcd.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- if .Values.manifests.cron_job_db_compact }}\n  etcd-db-compact.sh: |\n{{ tuple \"bin/_etcd-db-compact.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  etcd-healthcheck.sh: |\n{{ tuple \"bin/_etcd-healthcheck.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "etcd/templates/cron-job-db-compact.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.cron_job_db_compact }}\n{{- $envAll := . }}\n\n{{- $configMapBinName := printf \"%s-%s\" $envAll.Release.Name \"etcd-bin\"  }}\n\n{{- $serviceAccountName := \"etcd-db-compact\" }}\n{{ tuple $envAll \"db_compact\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: etcd-db-compaction\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  schedule: {{ .Values.jobs.db_compact.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.db_compact.history.success }}\n  failedJobsHistoryLimit: {{ .Values.jobs.db_compact.history.failed }}\n  {{- if .Values.jobs.db_compact.starting_deadline }}\n  startingDeadlineSeconds: {{ .Values.jobs.db_compact.starting_deadline }}\n  {{- end }}\n  concurrencyPolicy: Forbid\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"etcd\" \"db-compact\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"etcd\" \"db-compact\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n        spec:\n{{ dict \"envAll\" $envAll \"application\" \"etcd_db_compact\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 10 }}\n          serviceAccountName: {{ $serviceAccountName }}\n          restartPolicy: OnFailure\n          nodeSelector:\n            {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n          initContainers:\n{{ tuple $envAll \"db_compact\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 12 }}\n          containers:\n            - name: etcd-db-compact\n{{ tuple $envAll \"etcd_db_compact\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.db_compact | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{ dict \"envAll\" $envAll \"application\" \"etcd_db_compact\" \"container\" \"etcd_db_compact\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 14 }}\n              command:\n                - /tmp/etcd-db-compact.sh\n              volumeMounts:\n                - name: pod-tmp\n                  mountPath: /tmp\n                - name: etcd-bin\n                  mountPath: /tmp/etcd-db-compact.sh\n                  subPath: etcd-db-compact.sh\n                  readOnly: true\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: etcd-bin\n              configMap:\n                name: {{ $configMapBinName | quote }}\n                defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "etcd/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "etcd/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"etcd\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "etcd/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "etcd/templates/service-discovery.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- if .Values.manifests.service_discovery }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"etcd\" \"discovery\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: client\n      port: {{ tuple \"etcd\" \"internal\" \"client\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n      protocol: TCP\n      targetPort: client\n    - name: peer\n      port: {{ tuple \"etcd_discovery\" \"internal\" \"client\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n      protocol: TCP\n      targetPort: peer\n  publishNotReadyAddresses: true\n  clusterIP: None\n  selector:\n{{ tuple $envAll \"etcd\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "etcd/templates/service.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- if .Values.manifests.service }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"etcd\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  sessionAffinity: ClientIP\n  ports:\n    - name: client\n      port: {{ tuple \"etcd\" \"internal\" \"client\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n      protocol: TCP\n      targetPort: client\n    - name: peer\n      port: {{ tuple \"etcd_discovery\" \"internal\" \"client\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n      protocol: TCP\n      targetPort: peer\n  selector:\n{{ tuple $envAll \"etcd\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "etcd/templates/statefulset.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- define \"etcdProbeTemplate\" }}\nexec:\n  command:\n    - /tmp/etcd-healthcheck.sh\n{{- end }}\n\n{{- if .Values.manifests.statefulset }}\n{{- $envAll := . }}\n\n{{- $rcControllerName := printf \"%s-%s\" $envAll.Release.Name \"etcd\"  }}\n{{- $configMapBinName := printf \"%s-%s\" $envAll.Release.Name \"etcd-bin\"  }}\n\n{{ tuple $envAll \"etcd\" $rcControllerName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  name: {{ $rcControllerName | quote }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"etcd\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  podManagementPolicy: \"Parallel\"\n  serviceName: \"{{ tuple \"etcd\" \"discovery\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\"\n  replicas: {{ .Values.pod.replicas.etcd }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"etcd\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"etcd\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"etcd\" \"containerNames\" (list \"init\" \"etcd\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n      serviceAccountName: {{ $rcControllerName | quote }}\n{{ dict \"envAll\" $envAll \"application\" \"etcd\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"etcd\" \"server\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.server.node_selector_key }}: {{ .Values.labels.server.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"etcd\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: etcd\n{{ tuple $envAll \"etcd\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"etcd\" \"container\" \"etcd\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ dict \"envAll\" . \"component\" \"etcd\" \"container\" \"etcd\" \"type\" \"readiness\" \"probeTemplate\" (include \"etcdProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" . \"component\" \"etcd\" \"container\" \"etcd\" \"type\" \"liveness\" \"probeTemplate\" (include \"etcdProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          env:\n{{ include \"helm-toolkit.utils.to_k8s_env_vars\" .Values.pod.env.etcd | indent 12 }}\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n          command:\n            - /tmp/etcd.sh\n          ports:\n            - containerPort: {{ tuple \"etcd\" \"internal\" \"client\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n              name: client\n              protocol: TCP\n            - containerPort: {{ tuple \"etcd_discovery\" \"internal\" \"client\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n              name: peer\n              protocol: TCP\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcd-bin\n              mountPath: /tmp/etcd.sh\n              subPath: etcd.sh\n              readOnly: true\n            - name: etcd-data\n              mountPath: /var/lib/etcd\n            - name: etcd-bin\n              mountPath: /tmp/etcd-healthcheck.sh\n              subPath: etcd-healthcheck.sh\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: etcd-bin\n          configMap:\n            name: {{ $configMapBinName | quote }}\n            defaultMode: 0555\n        {{- if not .Values.volume.enabled }}\n        - name: etcd-data\n          emptyDir: {}\n        {{- end }}\n{{- end }}\n{{- if .Values.volume.enabled }}\n  volumeClaimTemplates:\n  - metadata:\n      name: etcd-data\n    spec:\n      accessModes: [ \"ReadWriteOnce\" ]\n      resources:\n        requests:\n          storage: {{ .Values.volume.size }}\n      {{- if ne .Values.volume.class_name \"default\" }}\n      storageClassName: {{ .Values.volume.class_name }}\n      {{- end }}\n{{- end }}\n"
  },
  {
    "path": "etcd/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for etcd.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nimages:\n  tags:\n    etcd: 'registry.k8s.io/etcd-amd64:3.4.3'\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n    etcd_db_compact: 'registry.k8s.io/etcd-amd64:3.4.3'\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  server:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - etcd-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    etcd:\n      jobs: null\n    db_compact:\n      services:\n        - endpoint: internal\n          service: etcd\n\npod:\n  env:\n    etcd:\n      ETCD_DATA_DIR: /var/lib/etcd\n      ETCD_INITIAL_CLUSTER_TOKEN: etcd-cluster-1\n  security_context:\n    etcd:\n      pod:\n        runAsUser: 65534\n      container:\n        etcd:\n          runAsUser: 0\n          readOnlyRootFilesystem: false\n    etcd_db_compact:\n      pod:\n        runAsUser: 65534\n        runAsNonRoot: true\n        allowPrivilegeEscalation: false\n      container:\n        etcd_db_compact:\n          allowPrivilegeEscalation: false\n          capabilities:\n            drop:\n              - ALL\n  probes:\n    etcd:\n      etcd:\n        readiness:\n          enabled: True\n          params:\n            initialDelaySeconds: 5\n            periodSeconds: 10\n            timeoutSeconds: 1\n        liveness:\n          enabled: True\n          params:\n            initialDelaySeconds: 5\n            periodSeconds: 10\n            timeoutSeconds: 1\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  replicas:\n    etcd: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        pod_replacement_strategy: RollingUpdate\n        revision_history: 3\n        rolling_update:\n          max_surge: 3\n          max_unavailable: 1\n  resources:\n    jobs:\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_compact:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n\nsecrets:\n  oci_image_registry:\n    etcd: etcd-oci-image-registry-key\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      etcd:\n        username: etcd\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  etcd:\n    name: etcd\n    hosts:\n      default: etcd\n      discovery: etcd-discovery\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      client:\n        default: 2379\n  etcd_discovery:\n    name: etcd-discovery\n    hosts:\n      default: etcd-discovery\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      client:\n        default: 2380\n\nvolume:\n  enabled: false\n  class_name: general\n  size: 5Gi\n\njobs:\n  db_compact:\n    cron: \"1 */2 * * *\"\n    starting_deadline: 600\n    # Timeout have to be set the same format\n    # as it is for etcdctl 120s, 1m etc.\n    command_timeout: 120s\n    history:\n      success: 3\n      failed: 1\n\nmanifests:\n  configmap_bin: true\n  statefulset: true\n  job_image_repo_sync: true\n  secret_registry: true\n  service: true\n  service_discovery: true\n  cron_job_db_compact: false\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "fluentbit/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v0.14.2\ndescription: OpenStack-Helm Fluentbit\nname: fluentbit\nversion: 2025.2.0\nhome: https://www.fluentbit.io/\nsources:\n  - https://github.com/fluent/fluentbit\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit/\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "fluentbit/templates/bin/_fluent-bit.sh.tpl",
    "content": "#!/bin/sh\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nif [ -d \"/var/log/journal\" ]; then\n  export JOURNAL_PATH=\"/var/log/journal\"\nelse\n  export JOURNAL_PATH=\"/run/log/journal\"\nfi\n\nexec /fluent-bit/bin/fluent-bit -c /fluent-bit/etc/fluent-bit.conf\n"
  },
  {
    "path": "fluentbit/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: fluentbit-bin\ndata:\n  fluent-bit.sh: |\n{{ tuple \"bin/_fluent-bit.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "fluentbit/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: fluentbit-etc\ntype: Opaque\ndata:\n  fluent-bit.conf: {{ .Values.conf.fluentbit.template | b64enc }}\n  parsers.conf: {{ .Values.conf.parsers.template | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "fluentbit/templates/daemonset-fluent-bit.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.daemonset_fluentbit }}\n{{- $envAll := . }}\n\n{{- $mounts_fluentbit := .Values.pod.mounts.fluentbit.fluentbit }}\n\n{{- $serviceAccountName := printf \"%s-%s\" .Release.Name \"fluentbit\" }}\n{{ tuple $envAll \"fluentbit\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ .Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $serviceAccountName }}\n  apiGroup: rbac.authorization.k8s.io\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - namespaces\n      - nodes\n      - pods\n      - services\n      - replicationcontrollers\n      - limitranges\n    verbs:\n      - get\n      - list\n      - watch\n  - apiGroups:\n      - apps\n    resources:\n      - statefulsets\n      - daemonsets\n      - deployments\n      - replicasets\n    verbs:\n      - get\n      - list\n      - watch\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: fluentbit\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"fluentbit\" \"daemon\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"fluentbit\" \"daemon\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"fluentbit\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"fluentbit\" \"daemon\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"fluentbit\" \"containerNames\" (list \"fluentbit\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"fluentbit\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ if $envAll.Values.pod.tolerations.fluentbit.enabled }}\n{{ tuple $envAll \"fluentbit\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ else }}\n      nodeSelector:\n        {{ .Values.labels.fluentbit.node_selector_key }}: {{ .Values.labels.fluentbit.node_selector_value | quote }}\n{{ end }}\n      hostNetwork: true\n      hostPID: true\n      dnsPolicy: {{ .Values.pod.dns_policy }}\n      initContainers:\n{{ tuple $envAll \"fluentbit\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: fluentbit\n{{ tuple $envAll \"fluentbit\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.fluentbit | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"fluentbit\" \"container\" \"fluentbit\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/fluent-bit.sh\n          env:\n            - name: FLUENTD_HOST\n              value: {{ tuple \"fluentd\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" | quote }}\n            - name: FLUENTD_PORT\n              value: {{ tuple \"fluentd\" \"internal\" \"service\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: fluentbit-bin\n              mountPath: /tmp/fluent-bit.sh\n              subPath: fluent-bit.sh\n              readOnly: true\n            - name: varlog\n              mountPath: /var/log\n              readOnly: true\n            - name: varlibdockercontainers\n              mountPath: /var/lib/docker/containers\n              readOnly: true\n            - name: fluentbit-etc\n              mountPath: /fluent-bit/etc/fluent-bit.conf\n              subPath: fluent-bit.conf\n              readOnly: true\n            - name: fluentbit-etc\n              mountPath: /fluent-bit/etc/parsers.conf\n              subPath: parsers.conf\n              readOnly: true\n{{ if $mounts_fluentbit.volumeMounts }}{{ toYaml $mounts_fluentbit.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: varlog\n          hostPath:\n            path: /var/log\n        - name: varlibdockercontainers\n          hostPath:\n            path: /var/lib/docker/containers\n        - name: fluentbit-bin\n          configMap:\n            name: fluentbit-bin\n            defaultMode: 0555\n        - name: fluentbit-etc\n          secret:\n            secretName: fluentbit-etc\n            defaultMode: 0444\n{{ if $mounts_fluentbit.volumes }}{{ toYaml $mounts_fluentbit.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "fluentbit/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "fluentbit/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"fluentbit\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "fluentbit/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "fluentbit/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for fluentbit\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nrelease_group: null\n\nlabels:\n  fluentbit:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  tags:\n    fluentbit: docker.io/fluent/fluent-bit:0.14.2\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - fluentbit-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\nconf:\n  fluentbit:\n    template: |\n      [SERVICE]\n          Daemon false\n          Flush 30\n          Log_Level info\n          Parsers_File parsers.conf\n\n      [INPUT]\n          Buffer_Chunk_Size 1M\n          Buffer_Max_Size 1M\n          Mem_Buf_Limit 5MB\n          Name tail\n          Path /var/log/kern.log\n          Tag kernel\n\n      [INPUT]\n          Buffer_Chunk_Size 1M\n          Buffer_Max_Size 1M\n          Mem_Buf_Limit 5MB\n          Name tail\n          Parser docker\n          Path /var/log/containers/*.log\n          Tag kube.*\n\n      [INPUT]\n          Buffer_Chunk_Size 1M\n          Buffer_Max_Size 1M\n          Mem_Buf_Limit 5MB\n          Name tail\n          Path /var/log/libvirt/libvirtd.log\n          Tag libvirt\n\n      [INPUT]\n          Buffer_Chunk_Size 1M\n          Buffer_Max_Size 1M\n          Mem_Buf_Limit 5MB\n          Name tail\n          Path /var/log/libvirt/qemu/*.log\n          Tag qemu\n\n      [INPUT]\n          Buffer_Chunk_Size 1M\n          Buffer_Max_Size 1M\n          Mem_Buf_Limit 5MB\n          Name systemd\n          Path ${JOURNAL_PATH}\n          Systemd_Filter _SYSTEMD_UNIT=kubelet.service\n          Tag journal.*\n\n      [INPUT]\n          Buffer_Chunk_Size 1M\n          Buffer_Max_Size 1M\n          Mem_Buf_Limit 5MB\n          Name systemd\n          Path ${JOURNAL_PATH}\n          Systemd_Filter _SYSTEMD_UNIT=docker.service\n          Tag journal.*\n\n      [FILTER]\n          Interval 1s\n          Match **\n          Name throttle\n          Rate 1000\n          Window 300\n\n      [FILTER]\n          Match libvirt\n          Name record_modifier\n          Record hostname ${HOSTNAME}\n\n      [FILTER]\n          Match qemu\n          Name record_modifier\n          Record hostname ${HOSTNAME}\n\n      [FILTER]\n          Match kernel\n          Name record_modifier\n          Record hostname ${HOSTNAME}\n\n      [FILTER]\n          Match journal.**\n          Name modify\n          Rename _BOOT_ID BOOT_ID\n          Rename _CAP_EFFECTIVE CAP_EFFECTIVE\n          Rename _CMDLINE CMDLINE\n          Rename _COMM COMM\n          Rename _EXE EXE\n          Rename _GID GID\n          Rename _HOSTNAME HOSTNAME\n          Rename _MACHINE_ID MACHINE_ID\n          Rename _PID PID\n          Rename _SYSTEMD_CGROUP SYSTEMD_CGROUP\n          Rename _SYSTEMD_SLICE SYSTEMD_SLICE\n          Rename _SYSTEMD_UNIT SYSTEMD_UNIT\n          Rename _TRANSPORT TRANSPORT\n          Rename _UID UID\n\n      [OUTPUT]\n          Match **.fluentd**\n          Name null\n\n      [FILTER]\n          Match kube.*\n          Merge_JSON_Log true\n          Name kubernetes\n\n      [OUTPUT]\n          Host ${FLUENTD_HOST}\n          Match *\n          Name forward\n          Port ${FLUENTD_PORT}\n  parsers:\n    template: |\n      [PARSER]\n        Decode_Field_As escaped_utf8 log\n        Format json\n        Name docker\n        Time_Format %Y-%m-%dT%H:%M:%S.%L\n        Time_Keep true\n        Time_Key time\n\nsecrets:\n  oci_image_registry:\n    fluentbit: fluentbit-oci-image-registry-key\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      fluentbit:\n        username: fluentbit\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  fluentd:\n    namespace: null\n    name: fluentd\n    hosts:\n      default: fluentd-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      service:\n        default: 24224\n      metrics:\n        default: 24220\n\npod:\n  security_context:\n    fluentbit:\n      pod:\n        runAsUser: 65534\n      container:\n        fluentbit:\n          runAsUser: 0\n          readOnlyRootFilesystem: false\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n  dns_policy: \"ClusterFirstWithHostNet\"\n  lifecycle:\n    upgrades:\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        fluentbit:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n  resources:\n    enabled: false\n    fluentbit:\n      limits:\n        memory: '400Mi'\n        cpu: '400m'\n      requests:\n        memory: '100Mi'\n        cpu: '100m'\n  tolerations:\n    fluentbit:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n      - key: node-role.kubernetes.io/node\n        operator: Exists\n  mounts:\n    fluentbit:\n      fluentbit:\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  daemonset_fluentbit: true\n  job_image_repo_sync: true\n  secret_registry: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "fluentd/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.10.1\ndescription: OpenStack-Helm Fluentd\nname: fluentd\nversion: 2025.2.0\nhome: https://www.fluentd.org/\nsources:\n  - https://github.com/fluent/fluentd\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit/\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "fluentd/templates/bin/_fluentd.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  chmod 1777 /tmp\n  exec fluentd -c /fluentd/etc/main.conf\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "fluentd/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.Release.Name \"fluentd-bin\" | quote }}\ndata:\n  fluentd.sh: |\n{{ tuple \"bin/_fluentd.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "fluentd/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"fluentd_main\" }}\n{{- $path := .Values.conf.fluentd.path}}\n{{- range $name, $conf := .Values.conf.fluentd.conf }}\n{{ printf \"%s %s/%s.conf\" \"@include\" $path $name | indent 4}}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.configmap_etc }}\n{{ $envAll := .}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.Release.Name \"fluentd-etc\" | quote }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\ntype: Opaque\nstringData:\n  main.conf: |\n{{- template \"fluentd_main\" . }}\ndata:\n{{- range $name, $config := .Values.conf.fluentd.conf }}\n{{- $filename := printf \"%s.conf\" $name}}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" $config \"key\" $filename \"format\" \"Secret\") | indent 2 }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "fluentd/templates/daemonset.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"probeTemplate\" }}\ntcpSocket:\n  port: {{ tuple \"fluentd\" \"internal\" \"service\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- if .Values.manifests.daemonset }}\n{{- $envAll := . }}\n\n{{- $config_path := .Values.conf.fluentd.path }}\n{{- $mounts_fluentd := .Values.pod.mounts.fluentd.fluentd }}\n{{- $prometheus_annotations := $envAll.Values.monitoring.prometheus.fluentd }}\n\n{{- $kafkaBroker := tuple \"kafka\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\n{{- $kafkaBrokerPort := tuple \"kafka\" \"internal\" \"broker\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $kafkaBrokerURI := printf \"%s\" $kafkaBroker }}\n\n{{- $rcControllerName := printf \"%s-%s\" $envAll.Release.Name \"fluentd\"  }}\n{{ tuple $envAll \"fluentd\" $rcControllerName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ $rcControllerName | quote }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $rcControllerName | quote }}\n    namespace: {{ .Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $rcControllerName | quote }}\n  apiGroup: rbac.authorization.k8s.io\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: {{ $rcControllerName | quote }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - namespaces\n      - nodes\n      - pods\n      - services\n      - replicationcontrollers\n      - limitranges\n    verbs:\n      - get\n      - list\n      - watch\n  - apiGroups:\n      - apps\n    resources:\n      - statefulsets\n      - daemonsets\n      - deployments\n      - replicasets\n    verbs:\n      - get\n      - list\n      - watch\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: {{ $rcControllerName | quote }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"fluentd\" \"internal\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n{{ tuple $envAll \"fluentd\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"fluentd\" \"internal\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"fluentd\" \"internal\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{- if .Values.monitoring.prometheus.enabled }}\n{{ tuple $prometheus_annotations | include \"helm-toolkit.snippets.prometheus_pod_annotations\" | indent 8 }}\n{{- end }}\n{{ dict \"envAll\" $envAll \"podName\" \"fluentd\" \"containerNames\" (list \"fluentd\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"fluentd\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $rcControllerName | quote }}\n{{ if $envAll.Values.pod.tolerations.fluentd.enabled }}\n{{ tuple $envAll \"fluentd\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.fluentd.node_selector_key }}: {{ .Values.labels.fluentd.node_selector_value | quote }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.fluentd.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"fluentd\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: fluentd\n{{ tuple $envAll \"fluentd\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.fluentd | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"fluentd\" \"container\" \"fluentd\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/fluentd.sh\n            - start\n          ports:\n            - name: forward\n              containerPort: {{ tuple \"fluentd\" \"internal\" \"service\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            - name: metrics\n              containerPort: {{ tuple \"fluentd\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{ dict \"envAll\" . \"component\" \"fluentd\" \"container\" \"fluentd\" \"type\" \"readiness\" \"probeTemplate\" (include \"probeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" . \"component\" \"fluentd\" \"container\" \"fluentd\" \"type\" \"liveness\" \"probeTemplate\" (include \"probeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          env:\n            - name: NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.name\n            - name: FLUENTD_PORT\n              value: {{ tuple \"fluentd\" \"internal\" \"service\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: ELASTICSEARCH_HOST\n              value: {{ tuple \"elasticsearch\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" | quote }}\n            - name: ELASTICSEARCH_PORT\n              value: {{ tuple \"elasticsearch\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: ELASTICSEARCH_SCHEME\n              value: {{ tuple \"elasticsearch\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | quote }}\n            - name: KAFKA_BROKER\n              value: {{ $kafkaBrokerURI }}\n{{- if .Values.pod.env.fluentd.vars }}\n{{ include \"helm-toolkit.utils.to_k8s_env_vars\" .Values.pod.env.fluentd.vars | indent 12 }}\n{{- end }}\n{{- if .Values.pod.env.fluentd.secrets }}\n{{ tuple $envAll .Values.pod.env.fluentd.secrets | include \"helm-toolkit.utils.to_k8s_env_secret_vars\" | indent 12 }}\n{{- end }}\n            - name: ELASTICSEARCH_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ printf \"%s-%s\" $envAll.Release.Name \"elasticsearch-user\" | quote }}\n                  key: ELASTICSEARCH_USERNAME\n            - name: ELASTICSEARCH_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ printf \"%s-%s\" $envAll.Release.Name \"elasticsearch-user\" | quote }}\n                  key: ELASTICSEARCH_PASSWORD\n{{- if .Values.manifests.secret_kafka }}\n            - name: KAFKA_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ printf \"%s-%s\" $envAll.Release.Name \"kafka-user\" | quote }}\n                  key: KAFKA_USERNAME\n            - name: KAFKA_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ printf \"%s-%s\" $envAll.Release.Name \"kafka-user\" | quote }}\n                  key: KAFKA_PASSWORD\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: varlog\n              mountPath: /var/log\n            - name: varlibdockercontainers\n              mountPath: /var/lib/docker/containers\n              readOnly: true\n            - name: pod-etc-fluentd\n              mountPath: /fluentd/etc\n            - name: fluentd-etc\n              mountPath: {{ printf \"%s/%s.conf\" $config_path \"main\" }}\n              subPath: {{ printf \"%s.conf\" \"main\"}}\n              readOnly: true\n{{- range $name, $config := .Values.conf.fluentd.conf }}\n            - name: fluentd-etc\n              mountPath: {{ printf \"%s/%s.conf\" $config_path $name }}\n              subPath: {{ printf \"%s.conf\" $name }}\n              readOnly: true\n{{- end }}\n            - name: fluentd-bin\n              mountPath: /tmp/fluentd.sh\n              subPath: fluentd.sh\n              readOnly: true\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.elasticsearch.auth.admin.secret.tls.internal \"path\" \"/etc/elasticsearch/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_fluentd.volumeMounts }}{{ toYaml $mounts_fluentd.volumeMounts | indent 12 }}{{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: varlog\n          hostPath:\n            path: /var/log\n        - name: varlibdockercontainers\n          hostPath:\n            path: /var/lib/docker/containers\n        - name: pod-etc-fluentd\n          emptyDir: {}\n{{ if and (.Values.manifests.secret_fluentd_env) (.Values.pod.env.fluentd.secrets) }}\n        - name: {{ printf \"%s-%s\" $envAll.Release.Name \"env-secret\" | quote }}\n          secret:\n            secretName: {{ printf \"%s-%s\" $envAll.Release.Name \"env-secret\" | quote }}\n            defaultMode: 0444\n{{- end }}\n        - name: fluentd-etc\n          secret:\n            secretName: {{ printf \"%s-%s\" $envAll.Release.Name \"fluentd-etc\" | quote }}\n            defaultMode: 0444\n        - name: fluentd-bin\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"fluentd-bin\" | quote }}\n            defaultMode: 0555\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.elasticsearch.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_fluentd.volumes }}{{ toYaml $mounts_fluentd.volumes | indent 8 }}{{- end }}\n{{- end }}\n"
  },
  {
    "path": "fluentd/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "fluentd/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"fluentd\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "fluentd/templates/network_policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License. */}}\n\n{{- if .Values.manifests.network_policy -}}\n{{ $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"fluentd\" }}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "fluentd/templates/secret-elasticsearch-creds.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_elasticsearch }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.Release.Name \"elasticsearch-user\" | quote }}\ntype: Opaque\ndata:\n  ELASTICSEARCH_USERNAME: {{ .Values.endpoints.elasticsearch.auth.admin.username | b64enc }}\n  ELASTICSEARCH_PASSWORD: {{ .Values.endpoints.elasticsearch.auth.admin.password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "fluentd/templates/secret-fluentd.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and (.Values.manifests.secret_fluentd_env) (.Values.pod.env.fluentd.secrets) }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.Release.Name \"env-secret\" | quote }}\ntype: Opaque\ndata:\n  {{ range $key, $value := .Values.pod.env.fluentd.secrets }}\n  {{$key | upper}}: {{ $value | b64enc }}\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "fluentd/templates/secret-kafka-creds.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_kafka }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.Release.Name \"kafka-user\" | quote }}\ntype: Opaque\ndata:\n  KAFKA_USERNAME: {{ .Values.endpoints.kafka.auth.admin.username | b64enc }}\n  KAFKA_PASSWORD: {{ .Values.endpoints.kafka.auth.admin.password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "fluentd/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "fluentd/templates/service-fluentd.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_fluentd }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"fluentd\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: forward\n    port: {{ tuple \"fluentd\" \"internal\" \"service\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.fluentd.node_port.enabled }}\n    nodePort: {{ .Values.network.fluentd.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"fluentd\" \"internal\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.fluentd.node_port.enabled }}\n  type: NodePort\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "fluentd/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for fluentd.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nrelease_group: null\n\nlabels:\n  fluentd:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  tags:\n    fluentd: quay.io/airshipit/fluentd:latest-debian\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - fluentd-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    fluentd:\n      services: null\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\nconf:\n  fluentd:\n    path: /fluentd/etc\n    conf:\n      input: |\n        <source>\n          bind 0.0.0.0\n          port \"#{ENV['FLUENTD_PORT']}\"\n          @type forward\n        </source>\n        <source>\n          <parse>\n            time_format %Y-%m-%dT%H:%M:%S.%NZ\n            @type json\n          </parse>\n          path /var/log/containers/*.log\n          read_from_head true\n          tag kubernetes.*\n          @type tail\n        </source>\n        <match **>\n          @type relabel\n          @label @output\n        </match>\n      output: |\n        <label @output>\n          <match **>\n            <buffer>\n              chunk_limit_size 512K\n              flush_interval 5s\n              flush_thread_count 8\n              queue_limit_length 32\n              retry_forever false\n              retry_max_interval 30\n            </buffer>\n            host \"#{ENV['ELASTICSEARCH_HOST']}\"\n            reload_connections false\n            reconnect_on_error true\n            reload_on_failure true\n            include_tag_key true\n            logstash_format true\n            password \"#{ENV['ELASTICSEARCH_PASSWORD']}\"\n            port \"#{ENV['ELASTICSEARCH_PORT']}\"\n            @type elasticsearch\n            user \"#{ENV['ELASTICSEARCH_USERNAME']}\"\n          </match>\n        </label>\n\nsecrets:\n  oci_image_registry:\n    fluentd: fluentd-oci-image-registry-key\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      fluentd:\n        username: fluentd\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  elasticsearch:\n    namespace: null\n    name: elasticsearch\n    auth:\n      admin:\n        username: admin\n        password: changeme\n        secret:\n          tls:\n            internal: elasticsearch-tls-api\n    hosts:\n      data: elasticsearch-data\n      default: elasticsearch-logging\n      discovery: elasticsearch-discovery\n      public: elasticsearch\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      http:\n        default: 80\n  fluentd:\n    namespace: null\n    name: fluentd\n    hosts:\n      default: fluentd-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      service:\n        default: 24224\n      metrics:\n        default: 24231\n  kafka:\n    namespace: null\n    name: kafka\n    auth:\n      admin:\n        username: admin\n        password: changeme\n    hosts:\n      default: kafka-broker\n      public: kafka\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: kafka\n    port:\n      broker:\n        default: 9092\n        public: 80\n\nmonitoring:\n  prometheus:\n    enabled: true\n    fluentd:\n      scrape: true\n      port: 24231\n\nnetwork:\n  fluentd:\n    node_port:\n      enabled: false\n      port: 32329\n\nnetwork_policy:\n  fluentd:\n    ingress:\n      - {}\n    egress:\n      - {}\n\npod:\n  env:\n    fluentd:\n      vars: null\n      secrets: null\n  tolerations:\n    fluentd:\n      enabled: false\n  security_context:\n    fluentd:\n      pod:\n        runAsUser: 0\n      container:\n        fluentd:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n  lifecycle:\n    upgrades:\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        fluentd:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n    termination_grace_period:\n      fluentd:\n        timeout: 30\n  resources:\n    enabled: false\n    fluentd:\n      limits:\n        memory: '1024Mi'\n        cpu: '2000m'\n      requests:\n        memory: '128Mi'\n        cpu: '500m'\n  mounts:\n    fluentd:\n      fluentd:\n  probes:\n    fluentd:\n      fluentd:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 90\n            timeoutSeconds: 30\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 180\n            timeoutSeconds: 30\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  daemonset: true\n  job_image_repo_sync: true\n  network_policy: false\n  secret_elasticsearch: true\n  secret_fluentd_env: true\n  secret_kafka: false\n  secret_registry: true\n  service_fluentd: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "freezer/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nname: freezer\ndescription: OpenStack Freezer Backup and Restore Service platform\ntype: application\nversion: 2025.2.0\nappVersion: v1.0.0\nhome: https://docs.openstack.org/freezer/latest\nicon: https://www.openstack.org/software/images/mascots/freezer.png\nsources:\n  - https://opendev.org/openstack/freezer\n  - https://opendev.org/openstack/freezer-api\nkeywords:\n  - openstack\n  - backup\n  - restore\n  - helm\nmaintainers:\n  - name: OpenStack Helm Team\n    email: openstack-helm@lists.openstack.org\nannotations:\n  \"helm.sh/hook-weight\": \"-5\"\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "freezer/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "freezer/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nfreezer-manage --config-file /etc/freezer/freezer.conf db sync\n"
  },
  {
    "path": "freezer/templates/bin/_freezer-api.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec freezer-api --config-file /etc/freezer/freezer.conf\n}\nfunction stop () {\n  kill -TERM 1\n}\n$COMMAND\n"
  },
  {
    "path": "freezer/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: freezer-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  freezer-api.sh: |\n{{ tuple \"bin/_freezer-api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "freezer/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n{{- if empty .Values.conf.freezer.keystone_authtoken.auth_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.freezer.keystone_authtoken \"auth_uri\" -}}\n{{- end -}}\n{{- if empty .Values.conf.freezer.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.freezer.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.freezer.keystone_authtoken.region_name -}}\n{{- $_ := set .Values.conf.freezer.keystone_authtoken \"region_name\" .Values.endpoints.identity.auth.freezer.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.freezer.keystone_authtoken.project_name -}}\n{{- $_ := set .Values.conf.freezer.keystone_authtoken \"project_name\" .Values.endpoints.identity.auth.freezer.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.freezer.keystone_authtoken.project_domain_name -}}\n{{- $_ := set .Values.conf.freezer.keystone_authtoken \"project_domain_name\" .Values.endpoints.identity.auth.freezer.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.freezer.keystone_authtoken.user_domain_name -}}\n{{- $_ := set .Values.conf.freezer.keystone_authtoken \"user_domain_name\" .Values.endpoints.identity.auth.freezer.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.freezer.keystone_authtoken.username -}}\n{{- $_ := set .Values.conf.freezer.keystone_authtoken \"username\" .Values.endpoints.identity.auth.freezer.username -}}\n{{- end -}}\n{{- if empty .Values.conf.freezer.keystone_authtoken.password -}}\n{{- $_ := set .Values.conf.freezer.keystone_authtoken \"password\" .Values.endpoints.identity.auth.freezer.password -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.freezer.database.connection)) (empty .Values.conf.freezer.database.connection) -}}\n{{- $_ := tuple \"oslo_db\" \"internal\" \"freezer\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | set .Values.conf.freezer.database \"connection\" -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.handler_fluent) (has \"fluent\" .Values.conf.logging.handlers.keys) -}}\n{{- $fluentd_host := tuple \"fluentd\" \"internal\" $envAll | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\n{{- $fluentd_port := tuple \"fluentd\" \"internal\" \"service\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $fluent_args := printf \"('%s.%s', '%s', %s)\" .Release.Namespace .Release.Name $fluentd_host $fluentd_port }}\n{{- $handler_fluent := dict \"class\" \"fluent.handler.FluentHandler\" \"formatter\" \"fluent\" \"args\" $fluent_args -}}\n{{- $_ := set .Values.conf.logging \"handler_fluent\" $handler_fluent -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.formatter_fluent) (has \"fluent\" .Values.conf.logging.formatters.keys) -}}\n{{- $formatter_fluent := dict \"class\" \"oslo_log.formatters.FluentFormatter\" -}}\n{{- $_ := set .Values.conf.logging \"formatter_fluent\" $formatter_fluent -}}\n{{- end -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: freezer-etc\ntype: Opaque\ndata:\n  freezer.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.freezer | b64enc }}\n  api-paste.ini: {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.paste | b64enc }}\n  policy.yaml: {{ toYaml .Values.conf.policy | b64enc }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.logging | b64enc }}\n{{- end }}\n...\n"
  },
  {
    "path": "freezer/templates/deployment-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- define \"probeTemplate\" }}\n{{- $health_path := tuple \"backup\" \"healthcheck\" \"internal\" . | include \"helm-toolkit.endpoints.keystone_endpoint_path_lookup\" }}\nhttpGet:\n  scheme: {{ tuple \"backup\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n  path: {{ $health_path }}\n  port: {{ tuple \"backup\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n{{- if .Values.manifests.deployment_api }}\n{{- $envAll := . }}\n\n{{- $mounts_freezer_api := .Values.pod.mounts.freezer_api.freezer_api }}\n{{- $mounts_freezer_api_init := .Values.pod.mounts.freezer_api.init_container }}\n\n{{- $serviceAccountName := \"freezer-api\" }}\n{{ tuple $envAll \"api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: freezer-api\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"freezer\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"freezer\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"freezer\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"freezer_api\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"freezer-api\" \"containerNames\" (list \"init\" \"freezer-api\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"freezer\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"freezer_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"freezer_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"freezer\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.api.node_selector_key }}: {{ .Values.labels.api.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.freezer.enabled }}\n{{ tuple $envAll \"freezer\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.api.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"api\" $mounts_freezer_api_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: freezer-api\n{{ tuple $envAll \"freezer_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"freezer\" \"container\" \"freezer_api\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"api\" \"container\" \"freezer_api\" \"type\" \"readiness\" \"probeTemplate\" (include \"probeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"api\" \"container\" \"freezer_api\" \"type\" \"liveness\" \"probeTemplate\" (include \"probeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          command:\n            - /tmp/freezer-api.sh\n            - start\n          env:\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/freezer/certs/ca.crt\"\n{{- end }}\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/freezer-api.sh\n                  - stop\n          ports:\n            - name: freezer-api\n              containerPort: {{ tuple \"backup\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          volumeMounts:\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.freezer.oslo_concurrency.lock_path }}\n            - name: freezer-bin\n              mountPath: /tmp/freezer-api.sh\n              subPath: freezer-api.sh\n              readOnly: true\n            - name: freezer-etc\n              mountPath: /etc/freezer/freezer.conf\n              subPath: freezer.conf\n              readOnly: true\n            - name: freezer-etc\n              mountPath: /etc/freezer/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: freezer-etc\n              mountPath: /etc/freezer/logging.conf\n              subPath: logging.conf\n              readOnly: true\n            - name: freezer-etc\n              mountPath: /etc/freezer/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n{{ if $mounts_freezer_api.volumeMounts }}{{ toYaml $mounts_freezer_api.volumeMounts | indent 12 }}{{ end }}\n{{- dict \"enabled\" .Values.tls.oslo_db \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" .Values.tls.identity \"name\" .Values.secrets.tls.backup.api.internal \"path\" \"/etc/freezer/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: freezer-bin\n          configMap:\n            name: freezer-bin\n            defaultMode: 0555\n        - name: freezer-etc\n          secret:\n            secretName: freezer-etc\n            defaultMode: 0444\n{{- dict \"enabled\" .Values.tls.oslo_db \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" .Values.tls.identity \"name\" .Values.secrets.tls.backup.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_freezer_api.volumes }}{{ toYaml $mounts_freezer_api.volumes | indent 8 }}\n{{ end }}\n{{- end }}\n"
  },
  {
    "path": "freezer/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "freezer/templates/ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_api .Values.network.api.ingress.public }}\n{{- $ingressOpts := dict \"envAll\" . \"backendServiceType\" \"backup\" \"backendPort\" \"f-api\" -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "freezer/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $bootstrapJob := dict \"envAll\" . \"serviceName\" \"freezer\" \"keystoneUser\" .Values.bootstrap.ks_user -}}\n{{- if .Values.pod.tolerations.freezer.enabled -}}\n{{- $_ := set $bootstrapJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $bootstrapJob | include \"helm-toolkit.manifests.job_bootstrap\" }}\n{{- end }}\n"
  },
  {
    "path": "freezer/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" \"freezer\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbDropJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- if .Values.pod.tolerations.freezer.enabled -}}\n{{- $_ := set $dbDropJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "freezer/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"freezer\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbInitJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbInitJob \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.freezer.enabled -}}\n{{- $_ := set $dbInitJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "freezer/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"freezer\" \"podVolMounts\" .Values.pod.mounts.freezer_db_sync.freezer_db_sync.volumeMounts \"podVols\" .Values.pod.mounts.freezer_db_sync.freezer_db_sync.volumes -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbSyncJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.freezer.enabled -}}\n{{- $_ := set $dbSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "freezer/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.repo_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\n{{- end }}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"freezer\" \"jobAnnotations\" (include \"metadata.annotations.job.repo_sync\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.freezer.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "freezer/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_endpoints\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-2\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksEndpointsJob := dict \"envAll\" . \"serviceName\" \"freezer\" \"serviceTypes\" ( tuple \"backup\" ) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksEndpointsJob \"tlsSecret\" .Values.secrets.tls.backup.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksEndpointsJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_endpoints\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.freezer.enabled -}}\n{{- $_ := set $ksEndpointsJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksEndpointsJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "freezer/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_service\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"freezer\" \"serviceTypes\" ( tuple \"backup\" ) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.backup.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_service\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.freezer.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "freezer/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"freezer\" -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksUserJob \"tlsSecret\" .Values.secrets.tls.backup.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksUserJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.freezer.enabled -}}\n{{- $_ := set $ksUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "freezer/templates/pdb-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_api }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: freezer-api\n  labels:\n{{ tuple $envAll \"freezer\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n{{- if .Values.pod.lifecycle.disruption_budget.api.min_available }}\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.api.min_available }}\n{{- else }}\n  maxUnavailable: {{ .Values.pod.lifecycle.disruption_budget.api.max_unavailable | default 1 }}\n{{- end }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"freezer\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n...\n"
  },
  {
    "path": "freezer/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"freezer\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n{{- $connection := tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- if $envAll.Values.manifests.certificates }}\n  DB_CONNECTION: {{ (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | b64enc -}}\n{{- else }}\n  DB_CONNECTION: {{  $connection | b64enc -}}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "freezer/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"freezer\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "freezer/templates/service-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_api }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"backup\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: f-api\n    port: {{ tuple \"backup\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.api.node_port.enabled }}\n    nodePort: {{ .Values.network.api.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"freezer\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.api.node_port.enabled }}\n  type: NodePort\n  {{ if .Values.network.api.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{ end }}\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "freezer/templates/service-ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_api .Values.network.api.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"backup\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "freezer/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nrelease_uuid: null\n\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  pull_policy: IfNotPresent\n  tags:\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    freezer_db_sync: quay.io/airshipit/freezer-api:2025.1-ubuntu_jammy\n    freezer_api: quay.io/airshipit/freezer-api:2025.1-ubuntu_jammy\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: docker.io/docker:17.07.0\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n        interface: internal\n      freezer:\n        role: admin,service\n        region_name: RegionOne\n        username: freezer\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  backup:\n    name: freezer\n    hosts:\n      default: freezer-api\n      internal: freezer-api\n      public: freezer\n    host_fqdn_override:\n      default: null\n    path:\n      default: /\n      healthcheck: /healthcheck\n    scheme:\n      default: http\n    port:\n      api:\n        default: 9090\n        public: 80\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n        secret:\n          tls:\n            internal: mariadb-tls-direct\n        secretNamespace: openstack\n      freezer:\n        username: freezer\n        password: password\n        secret: freezer-db-password\n        secretNamespace: openstack\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /freezer\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_cache:\n    auth:\n      # NOTE(portdirect): this is used to define the value for keystone\n      # authtoken cache encryption key, will be set to a random value\n      # if not specified.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n\nsecrets:\n  identity:\n    admin: freezer-keystone-admin\n    freezer: freezer-keystone-user\n  oslo_db:\n    admin: freezer-db-admin\n    freezer: freezer-db-user\n  tls:\n    backup:\n      api:\n        public: freezer-tls-public\n        internal: freezer-tls-internal\n        nginx: freezer-tls-nginx\n        nginx_cluster: freezer-tls-nginx-cluster\n\nbootstrap:\n  enabled: false\n  ks_user: freezer\n  script: \"\"\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30090\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - freezer-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    api:\n      jobs:\n        - freezer-db-sync\n        - freezer-ks-user\n        - freezer-ks-endpoints\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n    bootstrap:\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: backup\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - freezer-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_drop:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_endpoints:\n      jobs:\n        - freezer-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n\npod:\n  probes:\n    rpc_timeout: 60\n    rpc_retries: 2\n    api:\n      freezer_api:\n        liveness:\n          enabled: True\n          params:\n            initialDelaySeconds: 60\n            periodSeconds: 10\n            timeoutSeconds: 5\n        readiness:\n          enabled: True\n          params:\n            initialDelaySeconds: 60\n            periodSeconds: 10\n            timeoutSeconds: 5\n  security_context:\n    freezer:\n      pod:\n        runAsUser: 42424\n      container:\n        freezer_api:\n          runAsUser: 0\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    freezer:\n      enabled: false\n      tolerations:\n        - key: node-role.kubernetes.io/master\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/control-plane\n          operator: Exists\n          effect: NoSchedule\n  disruption_budget:\n    api:\n      min_available: 0\n  replicas:\n    api: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    disruption_budget:\n      api:\n        min_available: 0\n        max_unavailable: 0\n    termination_grace_period:\n      api:\n        timeout: 30\n  resources:\n    enabled: false\n    api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n  mounts:\n    freezer_api:\n      init_container: null\n      freezer_api:\n        volumeMounts:\n        volumes:\n    freezer_bootstrap:\n      init_container: null\n      freezer_bootstrap:\n        volumeMounts:\n        volumes:\n    freezer_db_sync:\n      init_container: null\n      freezer_db_sync:\n        volumeMounts:\n        volumes:\n\nconf:\n  freezer:\n    DEFAULT:\n      debug: true\n      log_config_append: /etc/freezer/logging.conf\n      bind_host: 0.0.0.0\n      bind_port: 9090\n    paste_deploy:\n      config_file: api-paste.ini\n    database:\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    oslo_policy:\n      policy_file: /etc/freezer/policy.yaml\n    oslo_concurrency:\n      lock_path: /var/lock\n    storage:\n      backend: sqlalchemy\n      driver: sqlalchemy\n    keystone_authtoken:\n      auth_version: v3\n      auth_type: password\n      # region_name: RegionOne\n      # project_domain_name: service\n      # project_name: service\n      # user_domain_name: service\n      # username: freezer\n      # password: password\n      # auth_url: http://keystone-api.openstack.svc.cluster.local:5000/v3\n      # auth_uri: http://keystone-api.openstack.svc.cluster.local:5000/v3\n\n  logging:\n    loggers:\n      keys:\n        - root\n        - freezer\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: WARNING\n      handlers: \"null\"\n    logger_freezer:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: freezer\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    formatter_default:\n      format: \"%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n  paste:\n    app:api_versions:\n      paste.app_factory: freezer_api.api.versions:api_versions\n    app:appv1:\n      paste.app_factory: freezer_api.service:freezer_appv1_factory\n    app:appv2:\n      paste.app_factory: freezer_api.service:freezer_appv2_factory\n    filter:authtoken:\n      paste.filter_factory: keystonemiddleware.auth_token:filter_factory\n    filter:healthcheck:\n      paste.filter_factory: oslo_middleware:Healthcheck.factory\n      backends: disable_by_file\n      disable_by_file_path: /etc/freezer/healthcheck_disable\n    filter:context:\n      paste.filter_factory: freezer_api.api.common.middleware:ContextMiddleware.factory\n    filter:versionsNegotiator:\n      paste.filter_factory: freezer_api.api.versions:VersionNegotiator.factory\n    filter:http_proxy_to_wsgi:\n      paste.filter_factory: oslo_middleware:HTTPProxyToWSGI.factory\n    pipeline:main:\n      pipeline: healthcheck http_proxy_to_wsgi versionsNegotiator authtoken context backupapp\n    pipeline:unauthenticated_freezer_api:\n      pipeline: http_proxy_to_wsgi healthcheck freezer_app\n    composite:backupapp:\n      paste.composite_factory: freezer_api.service:root_app_factory\n      /: api_versions\n      /v1: appv1\n      /v2: appv2\n\ntls:\n  identity: false\n  oslo_db: false\n\nmanifests:\n  certificates: false\n  configmap_bin: true\n  configmap_etc: true\n  deployment_api: true\n  ingress_api: true\n  job_bootstrap: true\n  job_db_init: true\n  job_db_sync: true\n  job_db_drop: false\n  job_image_repo_sync: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  pdb_api: true\n  secret_db: true\n  secret_keystone: true\n  service_api: true\n  service_ingress_api: true\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "glance/.helmignore",
    "content": "values_overrides\n"
  },
  {
    "path": "glance/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Glance\nname: glance\nversion: 2025.2.0\nhome: https://docs.openstack.org/glance/latest/\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Glance/OpenStack_Project_Glance_vertical.png\nsources:\n  - https://opendev.org/openstack/glance\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "glance/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport HOME=/tmp\n\ncd /tmp/images\n\n{{ range .Values.bootstrap.structured.images }}\nopenstack image show {{ .name  | quote }} || \\\n  { curl --fail -sSL -O {{ .source_url }}{{ .image_file }}; \\\n  openstack image create {{ .name | quote }} \\\n  {{ if .id -}} --id {{ .id }} {{ end -}} \\\n  --min-disk {{ .min_disk }} \\\n  --disk-format {{ .image_type }} \\\n  --file {{ .image_file }} \\\n  {{ if .properties -}} {{ range $key, $value := .properties }}--property {{$key}}={{$value}} {{ end }}{{ end -}} \\\n  --container-format {{ .container_format | quote }} \\\n  {{ if .private -}}\n  --private\n  {{- else -}}\n  --public\n  {{- end -}}; }\n{{ end }}\n\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "glance/templates/bin/_ceph-admin-keyring.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport HOME=/tmp\n\ncat > /etc/ceph/ceph.client.admin.keyring << EOF\n[client.admin]\n{{- if .Values.conf.ceph.admin_keyring }}\n    key = {{ .Values.conf.ceph.admin_keyring }}\n{{- else }}\n    key = $(cat /tmp/client-keyring)\n{{- end }}\nEOF\n\nexit 0\n"
  },
  {
    "path": "glance/templates/bin/_ceph-keyring.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport HOME=/tmp\n\ncat > /etc/ceph/ceph.client.${RBD_STORE_USER}.keyring <<EOF\n[client.${RBD_STORE_USER}]\n    key = $(cat /tmp/client-keyring)\nEOF\n\nexit 0\n"
  },
  {
    "path": "glance/templates/bin/_clean-image.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{ if .Values.bootstrap.enabled }}\n{{ range .Values.bootstrap.structured.images }}\n  {{ if .id -}}\n    openstack image show {{ .id | quote }} && \\\n      openstack image delete {{ .id | quote }}\n  {{ end -}}\n{{ end }}\n\n{{ end }}\nexit 0\n"
  },
  {
    "path": "glance/templates/bin/_clean-secrets.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec kubectl delete secret \\\n  --namespace ${NAMESPACE} \\\n  --ignore-not-found=true \\\n  ${RBD_POOL_SECRET}\n"
  },
  {
    "path": "glance/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nglance-manage db_sync\n"
  },
  {
    "path": "glance/templates/bin/_glance-api.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec uwsgi --ini /etc/glance/glance-api-uwsgi.ini\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "glance/templates/bin/_iscsiadm.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2020 The Openstack-Helm Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nchroot /mnt/host-rootfs /usr/bin/env -i PATH=\"/sbin:/bin:/usr/bin\" \\\n       iscsiadm \"${@:1}\"\n"
  },
  {
    "path": "glance/templates/bin/_metadefs-load.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n\nglance-manage \\\n    --config-file /etc/glance/glance-api.conf \\\n    --config-dir /etc/glance/glance.conf.d \\\n    db_load_metadefs /var/lib/openstack/etc/glance/metadefs\n"
  },
  {
    "path": "glance/templates/bin/_multipath.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nchroot /mnt/host-rootfs /usr/bin/env -i PATH=\"/sbin:/bin:/usr/bin\" \\\n      multipath \"${@:1}\"\n"
  },
  {
    "path": "glance/templates/bin/_multipathd.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nchroot /mnt/host-rootfs /usr/bin/env -i PATH=\"/sbin:/bin:/usr/bin\" \\\n      multipathd \"${@:1}\"\n"
  },
  {
    "path": "glance/templates/bin/_nginx.sh.tpl",
    "content": "#!/bin/sh\nset -xe\n\nCOMMAND=\"${@:-start}\"\n\nstart () {\n  envsubst < /etc/nginx/nginx.conf > /tmp/nginx.conf\n  cat /tmp/nginx.conf\n  nginx -t -c /tmp/nginx.conf\n  exec nginx -c /tmp/nginx.conf\n}\n\nstop () {\n  nginx -s stop\n}\n\n$COMMAND\n"
  },
  {
    "path": "glance/templates/bin/_storage-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\nif [ \"x$STORAGE_BACKEND\" == \"xrbd\" ]; then\n  SECRET=$(mktemp --suffix .yaml)\n  KEYRING=$(mktemp --suffix .keyring)\n  function cleanup {\n      rm -f \"${SECRET}\" \"${KEYRING}\"\n  }\n  trap cleanup EXIT\nfi\n\nSCHEME={{ tuple \"object_store\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" }}\nif [[ \"$SCHEME\" == \"https\" && -f /etc/ssl/certs/openstack-helm.crt ]]; then\n  export CURL_CA_BUNDLE=\"/etc/ssl/certs/openstack-helm.crt\"\nfi\n\nset -ex\nif [ \"x$STORAGE_BACKEND\" == \"xpvc\" ]; then\n  echo \"No action required.\"\nelif [ \"x$STORAGE_BACKEND\" == \"xswift\" ]; then\n  : ${OS_INTERFACE:=\"internal\"}\n  OS_TOKEN=\"$(openstack token issue -f value -c id)\"\n  OS_PROJECT_ID=\"$(openstack project show service -f value -c id)\"\n  OS_SWIFT_ENDPOINT_PREFIX=\"$(openstack endpoint list --service swift --interface ${OS_INTERFACE} --region ${OS_REGION_NAME} -f value -c URL | awk -F '$' '{ print $1 }')\"\n  OS_SWIFT_SCOPED_ENDPOINT=\"${OS_SWIFT_ENDPOINT_PREFIX}${OS_PROJECT_ID}\"\n  curl --fail -i -X POST \"${OS_SWIFT_SCOPED_ENDPOINT}\" \\\n    -H \"X-Auth-Token: ${OS_TOKEN}\" \\\n    -H \"X-Account-Meta-Temp-URL-Key: ${SWIFT_TMPURL_KEY}\"\nelif [ \"x$STORAGE_BACKEND\" == \"xrbd\" ]; then\n  ceph -s\n  function ensure_pool () {\n    ceph osd pool stats \"$1\" || ceph osd pool create \"$1\" \"$2\"\n    local test_version\n    if [[ $(ceph mgr versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then\n        ceph osd pool application enable $1 $3\n    fi\n    ceph osd pool set \"$1\" size \"${RBD_POOL_REPLICATION}\" --yes-i-really-mean-it\n    ceph osd pool set \"$1\" crush_rule \"${RBD_POOL_CRUSH_RULE}\"\n  }\n  ensure_pool \"${RBD_POOL_NAME}\" \"${RBD_POOL_CHUNK_SIZE}\" \"${RBD_POOL_APP_NAME}\"\n\n  if USERINFO=$(ceph auth get \"client.${RBD_POOL_USER}\"); then\n    echo \"Cephx user client.${RBD_POOL_USER} already exist.\"\n    echo \"Update its cephx caps\"\n    ceph auth caps client.${RBD_POOL_USER} \\\n      mon \"profile rbd\" \\\n      osd \"profile rbd pool=${RBD_POOL_NAME}\"\n    ceph auth get client.${RBD_POOL_USER} -o ${KEYRING}\n  else\n    #NOTE(JCL): Restrict Glance user to only what is needed. MON Read only and RBD access to the Glance Pool\n    ceph auth get-or-create \"client.${RBD_POOL_USER}\" \\\n      mon \"profile rbd\" \\\n      osd \"profile rbd pool=${RBD_POOL_NAME}\" \\\n      -o \"${KEYRING}\"\n  fi\n\n  ENCODED_KEYRING=$(sed -n 's/^[[:blank:]]*key[[:blank:]]\\+=[[:blank:]]\\(.*\\)/\\1/p' \"${KEYRING}\" | base64 -w0)\n  cat > \"${SECRET}\" <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: \"${RBD_POOL_SECRET}\"\ntype: kubernetes.io/rbd\ndata:\n  key: \"${ENCODED_KEYRING}\"\nEOF\n  kubectl apply --namespace \"${NAMESPACE}\" -f \"${SECRET}\"\nelif [ \"x${STORAGE_BACKEND}\" == \"xradosgw\" ]; then\n  radosgw-admin user stats --uid=\"${RADOSGW_USERNAME}\" || \\\n    radosgw-admin user create \\\n      --uid=\"${RADOSGW_USERNAME}\" \\\n      --display-name=\"${RADOSGW_USERNAME} user\"\n\n  radosgw-admin subuser create \\\n    --uid=\"${RADOSGW_USERNAME}\" \\\n    --subuser=\"${RADOSGW_USERNAME}:swift\" \\\n    --access=full\n\n  radosgw-admin key create \\\n    --subuser=\"${RADOSGW_USERNAME}:swift\" \\\n    --key-type=swift \\\n    --secret=\"${RADOSGW_PASSWORD}\"\n\n  radosgw-admin user modify \\\n    --uid=\"${RADOSGW_USERNAME}\" \\\n    --temp-url-key=\"${RADOSGW_TMPURL_KEY}\"\nfi\n"
  },
  {
    "path": "glance/templates/certificates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.certificates -}}\n{{  dict \"envAll\" . \"service\" \"image\" \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end -}}\n"
  },
  {
    "path": "glance/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n{{- $rallyTests := .Values.conf.rally_tests }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: glance-bin\ndata:\n{{- if eq .Values.storage \"cinder\" }}\n  iscsiadm: |\n{{ tuple \"bin/_iscsiadm.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  multipath: |\n{{ tuple \"bin/_multipath.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  multipathd: |\n{{ tuple \"bin/_multipathd.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  rally-test.sh: |\n{{ tuple $rallyTests | include \"helm-toolkit.scripts.rally_test\" | indent 4 }}\n  storage-init.sh: |\n{{ tuple \"bin/_storage-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  metadefs-load.sh: |\n{{ tuple \"bin/_metadefs-load.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  glance-api.sh: |\n{{ tuple \"bin/_glance-api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ceph-keyring.sh: |\n{{ tuple \"bin/_ceph-keyring.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ceph-admin-keyring.sh: |\n{{ tuple \"bin/_ceph-admin-keyring.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  clean-image.sh: |\n{{ tuple \"bin/_clean-image.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  clean-secrets.sh: |\n{{ tuple \"bin/_clean-secrets.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" . | indent 4 }}\n{{- if .Values.manifests.certificates }}\n  nginx.sh: |\n{{ tuple \"bin/_nginx.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n{{- if empty .Values.conf.glance.keystone_authtoken.auth_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.glance.keystone_authtoken \"auth_uri\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.glance.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.glance.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.glance.keystone_authtoken.region_name -}}\n{{- $_ := set .Values.conf.glance.keystone_authtoken \"region_name\" .Values.endpoints.identity.auth.glance.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.glance.keystone_authtoken.project_name -}}\n{{- $_ := set .Values.conf.glance.keystone_authtoken \"project_name\" .Values.endpoints.identity.auth.glance.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.glance.keystone_authtoken.project_domain_name -}}\n{{- $_ := set .Values.conf.glance.keystone_authtoken \"project_domain_name\" .Values.endpoints.identity.auth.glance.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.glance.keystone_authtoken.user_domain_name -}}\n{{- $_ := set .Values.conf.glance.keystone_authtoken \"user_domain_name\" .Values.endpoints.identity.auth.glance.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.glance.keystone_authtoken.username -}}\n{{- $_ := set .Values.conf.glance.keystone_authtoken \"username\" .Values.endpoints.identity.auth.glance.username -}}\n{{- end -}}\n{{- if empty .Values.conf.glance.keystone_authtoken.password -}}\n{{- $_ := set .Values.conf.glance.keystone_authtoken \"password\" .Values.endpoints.identity.auth.glance.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.glance.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.glance.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n{{- if empty .Values.conf.glance.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.glance.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.glance.database.connection)) (empty .Values.conf.glance.database.connection) -}}\n{{- $connection := tuple \"oslo_db\" \"internal\" \"glance\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | set .Values.conf.glance.database \"connection\" -}}\n{{- else -}}\n{{- $_ := set .Values.conf.glance.database \"connection\" $connection -}}\n{{- end -}}\n{{- end -}}\n\n{{- if empty .Values.conf.glance.DEFAULT.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"glance\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.glance.DEFAULT \"transport_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.glance.DEFAULT.public_endpoint -}}\n{{- $_ := tuple \"image\" \"public\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.glance.DEFAULT \"public_endpoint\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.glance.DEFAULT.enabled_backends -}}\n{{- if eq .Values.storage \"rbd\" }}\n{{- $_ := \"file:file,http:http,rbd:rbd\" | set .Values.conf.glance.DEFAULT \"enabled_backends\" -}}\n{{- else if or (eq .Values.storage \"radosgw\") (eq .Values.storage \"swift\") }}\n{{- $_ := \"file:file,http:http,swift:swift\" | set .Values.conf.glance.DEFAULT \"enabled_backends\" -}}\n{{- else -}}\n{{/* pvc, local (hostPath) or other/ephemeral (emptyDir) */}}\n{{- $_ := \"file:file,http:http\" | set .Values.conf.glance.DEFAULT \"enabled_backends\" -}}\n{{- end -}}\n{{- end -}}\n\n{{- if empty .Values.conf.glance.glance_store.default_backend -}}\n{{- if eq .Values.storage \"rbd\" }}\n{{- $_ := \"rbd\" | set .Values.conf.glance.glance_store \"default_backend\" -}}\n{{- else if or (eq .Values.storage \"radosgw\") (eq .Values.storage \"swift\") }}\n{{- $_ := \"swift\" | set .Values.conf.glance.glance_store \"default_backend\" -}}\n{{- else -}}\n{{/* pvc, local (hostPath) or other/ephemeral (emptyDir) */}}\n{{- $_ := \"file\" | set .Values.conf.glance.glance_store \"default_backend\" -}}\n{{- end -}}\n{{- end -}}\n\n{{- if empty .Values.conf.glance.DEFAULT.bind_port -}}\n{{- $_ := tuple \"image\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set .Values.conf.glance.DEFAULT \"bind_port\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.glance_api_uwsgi.uwsgi.processes -}}\n{{- $_ := set .Values.conf.glance_api_uwsgi.uwsgi \"processes\" .Values.conf.glance.DEFAULT.workers -}}\n{{- end -}}\n{{- if empty (index .Values.conf.glance_api_uwsgi.uwsgi \"http-socket\") -}}\n{{- $http_socket_port := tuple \"image\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | toString }}\n{{- $http_socket := printf \"0.0.0.0:%s\" $http_socket_port }}\n{{- $_ := set .Values.conf.glance_api_uwsgi.uwsgi \"http-socket\" $http_socket -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.handler_fluent) (has \"fluent\" .Values.conf.logging.handlers.keys) -}}\n{{- $fluentd_host := tuple \"fluentd\" \"internal\" $envAll | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\n{{- $fluentd_port := tuple \"fluentd\" \"internal\" \"service\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $fluent_args := printf \"('%s.%s', '%s', %s)\" .Release.Namespace .deployment_name $fluentd_host $fluentd_port }}\n{{- $handler_fluent := dict \"class\" \"fluent.handler.FluentHandler\" \"formatter\" \"fluent\" \"args\" $fluent_args -}}\n{{- $_ := set .Values.conf.logging \"handler_fluent\" $handler_fluent -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.formatter_fluent) (has \"fluent\" .Values.conf.logging.formatters.keys) -}}\n{{- $formatter_fluent := dict \"class\" \"oslo_log.formatters.FluentFormatter\" -}}\n{{- $_ := set .Values.conf.logging \"formatter_fluent\" $formatter_fluent -}}\n{{- end -}}\n\n{{- if empty .Values.conf.glance.cors.allowed_origin -}}\n{{- $endpointScheme := tuple \"dashboard\" \"public\" \"web\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" }}\n{{- $endpointHost := tuple \"dashboard\" \"public\" . | include \"helm-toolkit.endpoints.endpoint_host_lookup\" }}\n{{- $endpointPort := tuple \"dashboard\" \"public\" \"web\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{- if eq $endpointPort \"80\" \"443\" -}}\n        {{/*\n          Common browsers don't add default ports like 80 and 443 to the headers\n          and URLs therefore CORS should allow to use URLs both with 80,443 and\n          without it in the URL.\n        */}}\n        {{- $_ := set $envAll.Values.conf.glance.cors \"allowed_origin\" ( list ) }}\n        {{- $__allowed_origin := append $envAll.Values.conf.glance.cors.allowed_origin (printf \"%s://%s\" $endpointScheme $endpointHost) }}\n        {{- $_ := set $envAll.Values.conf.glance.cors \"allowed_origin\" $__allowed_origin }}\n        {{- $__allowed_origin := append $envAll.Values.conf.glance.cors.allowed_origin (printf \"%s://%s:%s\" $endpointScheme $endpointHost $endpointPort) }}\n        {{- $_ := set $envAll.Values.conf.glance.cors \"allowed_origin\" $__allowed_origin }}\n    {{- else }}\n        {{- printf \"%s://%s:%s\" $endpointScheme $endpointHost $endpointPort | set .Values.conf.glance.cors \"allowed_origin\" }}\n    {{- end }}\n{{- end -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: glance-etc\ntype: Opaque\ndata:\n  rally_tests.yaml: {{ toYaml .Values.conf.rally_tests.tests | b64enc }}\n  glance-api.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.glance | b64enc }}\n  glance-api-uwsgi.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.glance_api_uwsgi | b64enc }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.logging | b64enc }}\n  glance-api-paste.ini: {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.paste | b64enc }}\n  policy.yaml: {{ toYaml .Values.conf.policy | b64enc }}\n  api_audit_map.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.api_audit_map | b64enc }}\n  glance_sudoers: {{ $envAll.Values.conf.glance_sudoers | b64enc }}\n  rootwrap.conf: {{ $envAll.Values.conf.rootwrap | b64enc }}\n{{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n{{- $filePrefix := replace \"_\" \"-\"  $key }}\n  {{ printf \"%s.filters\" $filePrefix }}: {{ $value.content | b64enc }}\n{{- end }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" ( dict \"envAll\" $envAll \"template\" .Values.conf.swift_store \"key\" \"swift-store.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" ( dict \"envAll\" $envAll \"template\" .Values.conf.nginx \"key\" \"nginx.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/deployment-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"probeTemplate\" }}\n{{- $health_path := tuple \"image\" \"healthcheck\" \"internal\" . | include \"helm-toolkit.endpoints.keystone_endpoint_path_lookup\" }}\n{{- $health_port := tuple \"image\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- if .Values.manifests.certificates }}\nexec:\n    command:\n    - python\n    - -c\n    - \"import requests; r = requests.get('http://127.0.0.1:{{ $health_port }}{{ $health_path }}'); r.raise_for_status()\"\n{{- else }}\nhttpGet:\n  path: {{ $health_path }}\n  scheme: {{ tuple \"image\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n  port: {{ $health_port }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.deployment_api }}\n{{- $envAll := . }}\n\n{{- $mounts_glance_api := .Values.pod.mounts.glance_api.glance_api }}\n{{- $mounts_glance_api_init := .Values.pod.mounts.glance_api.init_container }}\n\n{{- $serviceAccountName := \"glance-api\" }}\n{{ tuple $envAll \"api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.glance_api }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: glance-api\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"glance\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"glance\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"glance\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"glance_api\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"glance-api\" \"containerNames\" ( list \"glance-perms\" \"glance-api\" \"init\" \"nginx\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"glance_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"glance_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"glance\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"glance\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n{{ if $envAll.Values.pod.tolerations.glance.enabled }}\n{{ tuple $envAll \"glance\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.api.node_selector_key }}: {{ .Values.labels.api.node_selector_value }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.api.timeout | default \"30\" }}\n{{- if .Values.pod.useHostNetwork.api }}\n      hostNetwork: true\n      dnsPolicy: ClusterFirstWithHostNet\n{{- end }}\n{{- if eq .Values.storage \"cinder\" }}\n      hostIPC: true\n{{- end }}\n      initContainers:\n{{ tuple $envAll \"api\" $mounts_glance_api_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: glance-perms\n{{ tuple $envAll \"glance_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"glance\" \"container\" \"glance_perms\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - chown\n            - -R\n            - \"glance:\"\n            - {{ .Values.conf.glance.file.filesystem_store_datadir }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: glance-images\n              mountPath: {{ .Values.conf.glance.file.filesystem_store_datadir }}\n        {{ if eq .Values.storage \"rbd\" }}\n        - name: ceph-keyring-placement\n{{ tuple $envAll \"glance_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"glance\" \"container\" \"ceph_keyring_placement\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: RBD_STORE_USER\n              value: {{ .Values.conf.glance.rbd.rbd_store_user | quote }}\n          command:\n            - /tmp/ceph-keyring.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: glance-bin\n              mountPath: /tmp/ceph-keyring.sh\n              subPath: ceph-keyring.sh\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n        {{ end }}\n      containers:\n        {{- if $envAll.Values.manifests.certificates }}\n        - name: nginx\n{{ tuple $envAll \"nginx\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.nginx | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"glance\" \"container\" \"nginx\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          ports:\n            - name: g-api\n              containerPort: {{ tuple \"image\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          env:\n            - name: PORT\n              value: {{ tuple \"image\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: POD_IP\n              valueFrom:\n                fieldRef:\n                  fieldPath: status.podIP\n            - name: SHORTNAME\n              value: {{ tuple \"image\" \"internal\" .  | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" | quote }}\n          readinessProbe:\n            tcpSocket:\n              port: {{ tuple \"image\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 30\n          command:\n            - /tmp/nginx.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/nginx.sh\n                  - stop\n          volumeMounts:\n            - name: glance-bin\n              mountPath: /tmp/nginx.sh\n              subPath: nginx.sh\n              readOnly: true\n            - name: glance-etc\n              mountPath: /etc/nginx/nginx.conf\n              subPath: nginx.conf\n              readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.image.api.internal \"path\" \"/etc/nginx/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n        {{- end }}\n        - name: glance-api\n{{ tuple $envAll \"glance_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"glance\" \"container\" \"glance_api\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"api\" \"container\" \"glance-api\" \"type\" \"readiness\" \"probeTemplate\" (include \"probeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"api\" \"container\" \"glance-api\" \"type\" \"liveness\" \"probeTemplate\" (include \"probeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          command:\n            - /tmp/glance-api.sh\n            - start\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n          env:\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/glance/certs/ca.crt\"\n{{- end }}\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/glance-api.sh\n                  - stop\n          {{- if not $envAll.Values.manifests.certificates }}\n          ports:\n            - name: g-api\n              containerPort: {{ tuple \"image\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          {{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.glance.oslo_concurrency.lock_path }}\n            - name: glance-tmp\n              mountPath: /var/lib/glance/tmp\n            - name: etcglance\n              mountPath: /etc/glance\n            - name: glance-bin\n              mountPath: /tmp/glance-api.sh\n              subPath: glance-api.sh\n              readOnly: true\n            - name: glance-etc\n              mountPath: /etc/glance/glance-api.conf\n              subPath: glance-api.conf\n              readOnly: true\n            - name: glance-etc-snippets\n              mountPath: /etc/glance/glance.conf.d/\n              readOnly: true\n            - name: glance-etc\n              mountPath: /etc/glance/glance-api-uwsgi.ini\n              subPath: glance-api-uwsgi.ini\n              readOnly: true\n            {{- if .Values.conf.glance.DEFAULT.log_config_append }}\n            - name: glance-etc\n              mountPath: {{ .Values.conf.glance.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.glance.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: glance-etc\n              mountPath: /etc/glance/glance-api-paste.ini\n              subPath: glance-api-paste.ini\n              readOnly: true\n            - name: glance-etc\n              mountPath: /etc/glance/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: glance-etc\n              mountPath: /etc/glance/api_audit_map.conf\n              subPath: api_audit_map.conf\n              readOnly: true\n            - name: glance-etc\n              # NOTE (Portdirect): We mount here to override Kollas\n              # custom sudoers file when using Kolla images, this\n              # location will also work fine for other images.\n              mountPath: /etc/sudoers.d/kolla_glance_sudoers\n              subPath: glance_sudoers\n              readOnly: true\n            - name: glance-etc\n              mountPath: /etc/glance/rootwrap.conf\n              subPath: rootwrap.conf\n              readOnly: true\n            {{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n            {{- if ( has \"api\" $value.pods ) }}\n            {{- $filePrefix := replace \"_\" \"-\"  $key }}\n            {{- $rootwrapFile := printf \"/etc/glance/rootwrap.d/%s.filters\" $filePrefix }}\n            - name: glance-etc\n              mountPath: {{ $rootwrapFile }}\n              subPath: {{ base $rootwrapFile }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            - name: glance-etc\n              mountPath: {{ .Values.conf.glance.swift.swift_store_config_file }}\n              subPath: swift-store.conf\n              readOnly: true\n            - name: glance-images\n              mountPath: {{ .Values.conf.glance.file.filesystem_store_datadir }}\n{{- if eq .Values.storage \"cinder\" }}\n            - name: host-rootfs\n              mountPath: /mnt/host-rootfs\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: HostToContainer\n              {{- end }}\n            - name: host-dev\n              mountPath: /dev\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: HostToContainer\n              {{- end }}\n            - name: runlock\n              mountPath: /run/lock\n            - name: etciscsi\n              mountPath: /etc/iscsi\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: HostToContainer\n              {{- end }}\n            - name: usrlocalsbin\n              mountPath: /usr/local/sbin\n            - name: glance-bin\n              mountPath: /usr/local/sbin/iscsiadm\n              subPath: iscsiadm\n            - name: glance-bin\n              mountPath: /usr/local/sbin/multipath\n              subPath: multipath\n            - name: glance-bin\n              mountPath: /usr/local/sbin/multipathd\n              subPath: multipathd\n            - name: etcmultipath\n              mountPath: /etc/multipath\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: Bidirectional\n              {{- end }}\n            - name: sys\n              mountPath: /sys\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: HostToContainer\n              {{- end }}\n{{- end }}\n{{- if eq .Values.storage \"rbd\" }}\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: ceph-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n{{- end }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" $envAll.Values.secrets.tls.image.api.internal \"path\" \"/etc/glance/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_glance_api.volumeMounts }}{{ toYaml $mounts_glance_api.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: glance-tmp\n          emptyDir: {}\n        - name: etcglance\n          emptyDir: {}\n        - name: glance-bin\n          configMap:\n            name: glance-bin\n            defaultMode: 0555\n        - name: glance-etc\n          secret:\n            secretName: glance-etc\n            defaultMode: 0444\n        - name: glance-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{- if eq .Values.storage \"pvc\" }}\n        - name: glance-images\n          persistentVolumeClaim:\n            claimName: glance-images\n{{ else if eq .Values.storage \"local\" }}\n        - name: glance-images\n          hostPath:\n            path: {{ .Values.conf.glance.file.filesystem_store_datadir }}\n            type: DirectoryOrCreate\n{{- else }}\n        - name: glance-images\n          emptyDir: {}\n{{- end }}\n{{- if eq .Values.storage \"rbd\" }}\n        - name: etcceph\n          emptyDir: {}\n        - name: ceph-etc\n          configMap:\n            name: {{ .Values.ceph_client.configmap }}\n            defaultMode: 0444\n        - name: ceph-keyring\n          secret:\n            secretName: {{ .Values.secrets.rbd | quote }}\n{{- end }}\n{{- if eq .Values.storage \"cinder\" }}\n        - name: host-rootfs\n          hostPath:\n            path: /\n        - name: host-dev\n          hostPath:\n            path: /dev\n        - name: runlock\n          hostPath:\n            path: /run/lock\n        - name: etciscsi\n          hostPath:\n            path: /etc/iscsi\n        - name: usrlocalsbin\n          emptyDir: {}\n        - name: etcmultipath\n          hostPath:\n            path: /etc/multipath\n        - name: sys\n          hostPath:\n            path: /sys\n{{- end }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" $envAll.Values.secrets.tls.image.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_glance_api.volumes }}{{ toYaml $mounts_glance_api.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "glance/templates/ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_api .Values.network.api.ingress.public }}\n{{- $envAll := . }}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendServiceType\" \"image\" \"backendPort\" \"g-api\" -}}\n{{- $secretName := $envAll.Values.secrets.tls.image.api.internal -}}\n{{- if and .Values.manifests.certificates $secretName -}}\n{{- $_ := set $ingressOpts \"certIssuer\" .Values.endpoints.image.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.bootstrap\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"5\"\n{{- end }}\n\n{{- define \"glance.templates._job_bootstrap.pod_volumes\" -}}\n{{- $envAll := index . 0 -}}\nvolumeMounts:\n  - name: imagedir\n    mountPath: /tmp/images\nvolumes:\n  - name: imagedir\n    emptyDir: {}\n{{- end }}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $podVolumes := tuple . | include \"glance.templates._job_bootstrap.pod_volumes\" | toString | fromYaml }}\n{{- $bootstrapJob := dict \"envAll\" . \"serviceName\" \"glance\" \"keystoneUser\" .Values.bootstrap.ks_user \"logConfigFile\" .Values.conf.glance.DEFAULT.log_config_append \"podVolMounts\" $podVolumes.volumeMounts \"podVols\" $podVolumes.volumes -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $bootstrapJob \"tlsSecret\" .Values.secrets.tls.image.api.internal -}}\n{{- end -}}\n{{- $_ := set $bootstrapJob \"jobAnnotations\" (include \"metadata.annotations.job.bootstrap\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.glance.enabled -}}\n{{- $_ := set $bootstrapJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n# The configFile path shouble be /etc/glance/glance-api.conf\n# not default /etc/glance/glance.conf defined by helm-toolkit,\n# since secrets mounted in '/etc/glance' have glance-api.conf not glance.conf in it.\n# The wrong path '/etc/glance/glance.conf' would be dir in bootstarp container,\n# and lead to all config files in '/etc/glance' dir unreachable.\n{{- $_ := set $bootstrapJob \"configFile\" \"/etc/glance/glance-api.conf\" -}}\n{{ $bootstrapJob | include \"helm-toolkit.manifests.job_bootstrap\" }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/job-clean.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_clean }}\n{{- $envAll := . }}\n{{- if .Values.bootstrap.enabled }}\n\n{{- $serviceAccountName := print \"glance-clean\" }}\n{{ tuple $envAll \"clean\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - delete\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ print \"glance-clean\" }}\n  labels:\n{{ tuple $envAll \"glance\" \"clean\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": pre-delete\n    \"helm.sh/hook-delete-policy\": hook-succeeded\n{{ tuple $serviceAccountName $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 -}}\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"glance\" \"clean\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"clean\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      restartPolicy: OnFailure\n{{ if $envAll.Values.pod.tolerations.glance.enabled }}\n{{ tuple $envAll \"glance\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"clean\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        {{- if eq .Values.storage \"rbd\" }}\n        - name: glance-secret-clean\n{{ tuple $envAll \"glance_storage_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"clean\" \"container\" \"glance_secret_clean\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: RBD_POOL_SECRET\n              value: {{ .Values.secrets.rbd | quote }}\n          command:\n            - /tmp/clean-secrets.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: glance-bin\n              mountPath: /tmp/clean-secrets.sh\n              subPath: clean-secrets.sh\n              readOnly: true\n        {{ end }}\n        - name: glance-image-clean\n{{ tuple $envAll \"bootstrap\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n          command:\n            - /tmp/clean-image.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: glance-bin\n              mountPath: /tmp/clean-image.sh\n              subPath: clean-image.sh\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: glance-bin\n          configMap:\n            name: glance-bin\n            defaultMode: 0555\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $serviceName := \"glance\" -}}\n{{- $dbToDrop := dict \"adminSecret\" .Values.secrets.oslo_db.admin \"configFile\" (printf \"/etc/%s/%s.conf\" $serviceName \"glance-api\" ) \"logConfigFile\" (printf \"/etc/%s/logging.conf\" $serviceName ) \"configDbSection\" \"database\" \"configDbKey\" \"connection\" -}}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" $serviceName \"dbToDrop\" $dbToDrop -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbToDrop \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- if .Values.pod.tolerations.glance.enabled -}}\n{{- $_ := set $dbDropJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $serviceName := \"glance\" -}}\n{{- $dbToInit := dict \"adminSecret\" .Values.secrets.oslo_db.admin \"configFile\" (printf \"/etc/%s/%s.conf\" $serviceName \"glance-api\" ) \"logConfigFile\" (printf \"/etc/%s/logging.conf\" $serviceName ) \"configDbSection\" \"database\" \"configDbKey\" \"connection\" -}}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" $serviceName \"dbToInit\" $dbToInit -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbInitJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbInitJob \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.glance.enabled -}}\n{{- $_ := set $dbInitJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $dbToSync := index . \"dbToSync\" | default ( dict \"configFile\" (printf \"/etc/%s/%s.conf\" \"glance\" \"glance-api\" ) \"configDir\" (printf \"/etc/%s/%s.conf.d\" \"glance\" \"glance\") \"logConfigFile\" (printf \"/etc/%s/logging.conf\" \"glance\" ) \"image\" ( index .Values.images.tags ( printf \"%s_db_sync\" \"glance\" )) ) -}}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"glance\" \"podVolMounts\" .Values.pod.mounts.glance_db_sync.glance_db_sync.volumeMounts \"podVols\" .Values.pod.mounts.glance_db_sync.glance_db_sync.volumes \"dbToSync\" $dbToSync -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbSyncJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.glance.enabled -}}\n{{- $_ := set $dbSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.repo_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\n{{- end }}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"glance\" -}}\n{{- $_ := $imageRepoSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.repo_sync\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.glance.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_endpoints\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-2\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"glance\" \"serviceTypes\" ( tuple \"image\" ) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.image.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_endpoints\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.glance.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_service\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"glance\" \"serviceTypes\" ( tuple \"image\" ) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.image.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_service\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.glance.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"glance\" -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksUserJob \"tlsSecret\" .Values.secrets.tls.image.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksUserJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.glance.enabled -}}\n{{- $_ := set $ksUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/job-metadefs-load.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_metadefs_load }}\n{{- $envAll := . }}\n\n{{- $etcSources := .Values.pod.etcSources.glance_metadefs_load }}\n\n{{- $serviceAccountName := \"glance-metadefs-load\" }}\n{{ tuple $envAll \"metadefs_load\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: glance-metadefs-load\n  labels:\n{{ tuple $envAll \"glance\" \"metadefs-load\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    helm.sh/hook: post-install,post-upgrade\n{{ tuple $serviceAccountName $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 }}\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"glance\" \"metadefs_load\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"glance-metadefs-load\" \"containerNames\" (list \"init\" \"glance-metadefs-load\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"metadefs_load\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      restartPolicy: OnFailure\n{{ if $envAll.Values.pod.tolerations.glance.enabled }}\n{{ tuple $envAll \"glance\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"metadefs_load\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: glance-metadefs-load\n{{ tuple $envAll \"glance_metadefs_load\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.metadefs_load | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"metadefs_load\" \"container\" \"glance_metadefs_load\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n          command:\n            - /tmp/metadefs-load.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: glance-bin\n              mountPath: /tmp/metadefs-load.sh\n              subPath: metadefs-load.sh\n              readOnly: true\n            - name: etcglance\n              mountPath: /etc/glance\n            - name: glance-etc\n              mountPath: /etc/glance/glance-api.conf\n              subPath: glance-api.conf\n              readOnly: true\n            - name: glance-etc-snippets\n              mountPath: /etc/glance/glance.conf.d/\n              readOnly: true\n            {{- if .Values.conf.glance.DEFAULT.log_config_append }}\n            - name: glance-etc\n              mountPath: {{ .Values.conf.glance.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.glance.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: glance-bin\n          configMap:\n            name: glance-bin\n            defaultMode: 0555\n        - name: etcglance\n          emptyDir: {}\n        - name: glance-etc\n          secret:\n            secretName: glance-etc\n            defaultMode: 0444\n        - name: glance-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/job-rabbit-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.rabbit_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rmqUserJob := dict \"envAll\" . \"serviceName\" \"glance\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $rmqUserJob \"tlsSecret\" .Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $rmqUserJob \"jobAnnotations\" (include \"metadata.annotations.job.rabbit_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.glance.enabled -}}\n{{- $_ := set $rmqUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $rmqUserJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/job-storage-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_storage_init }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"glance-storage-init\" }}\n{{ tuple $envAll \"storage_init\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - create\n      - update\n      - patch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: glance-storage-init\n  labels:\n{{ tuple $envAll \"glance\" \"storage-init\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    helm.sh/hook: post-install,post-upgrade\n{{ tuple $serviceAccountName $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 }}\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"glance\" \"storage-init\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"glance-storage-init\" \"containerNames\" (list \"init\" \"glance-storage-init\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"storage_init\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      restartPolicy: OnFailure\n{{ if $envAll.Values.pod.tolerations.glance.enabled }}\n{{ tuple $envAll \"glance\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"storage_init\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        {{ if or (eq .Values.storage \"rbd\") (eq .Values.storage \"radosgw\") }}\n        - name: ceph-keyring-placement\n{{ tuple $envAll \"glance_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"storage_init\" \"container\" \"ceph_keyring_placement\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/ceph-admin-keyring.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: glance-bin\n              mountPath: /tmp/ceph-admin-keyring.sh\n              subPath: ceph-admin-keyring.sh\n              readOnly: true\n            {{- if empty .Values.conf.ceph.admin_keyring }}\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n            {{ end }}\n        {{ end }}\n      containers:\n        - name: glance-storage-init\n{{ tuple $envAll \"glance_storage_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.storage_init | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"storage_init\" \"container\" \"glance_storage_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: STORAGE_BACKEND\n              value: {{ .Values.storage | quote }}\n            {{- if eq .Values.storage \"rbd\" }}\n            - name: RBD_POOL_NAME\n              value: {{ .Values.conf.glance.rbd.rbd_store_pool | quote }}\n            - name: RBD_POOL_APP_NAME\n              value: {{ .Values.conf.software.rbd.rbd_store_pool_app_name | quote }}\n            - name: RBD_POOL_USER\n              value: {{ .Values.conf.glance.rbd.rbd_store_user | quote }}\n            - name: RBD_POOL_REPLICATION\n              value: {{ .Values.conf.glance.rbd.rbd_store_replication | quote }}\n            - name: RBD_POOL_CRUSH_RULE\n              value: {{ .Values.conf.glance.rbd.rbd_store_crush_rule | quote }}\n            - name: RBD_POOL_CHUNK_SIZE\n              value: {{ .Values.conf.glance.rbd.rbd_store_chunk_size | quote }}\n            - name: RBD_POOL_SECRET\n              value: {{ .Values.secrets.rbd | quote }}\n            {{ end }}\n            {{- if eq .Values.storage \"radosgw\" }}\n            - name: RADOSGW_USERNAME\n              value: {{ .Values.endpoints.ceph_object_store.auth.glance.username | quote }}\n            - name: RADOSGW_PASSWORD\n              value: {{ .Values.endpoints.ceph_object_store.auth.glance.password | quote }}\n            - name: RADOSGW_TMPURL_KEY\n              value: {{ .Values.endpoints.ceph_object_store.auth.glance.tmpurlkey | quote }}\n            {{ end }}\n            {{- if eq .Values.storage \"swift\" }}\n            - name: SWIFT_TMPURL_KEY\n              value: {{ .Values.endpoints.object_store.auth.glance.tmpurlkey | quote }}\n{{- with $env := dict \"ksUserSecret\" $envAll.Values.secrets.identity.glance \"useCA\" .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n            {{ end }}\n          command:\n            - /tmp/storage-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: glance-bin\n              mountPath: /tmp/storage-init.sh\n              subPath: storage-init.sh\n              readOnly: true\n            {{ if or (eq .Values.storage \"rbd\") (eq .Values.storage \"radosgw\") }}\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: ceph-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            {{- if empty .Values.conf.ceph.admin_keyring }}\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n            {{ end }}\n            {{ end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.image.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: glance-bin\n          configMap:\n            name: glance-bin\n            defaultMode: 0555\n        {{ if or (eq .Values.storage \"rbd\") (eq .Values.storage \"radosgw\") }}\n        - name: etcceph\n          emptyDir: {}\n        - name: ceph-etc\n          configMap:\n            name: {{ .Values.ceph_client.configmap }}\n            defaultMode: 0444\n        {{- if empty .Values.conf.ceph.admin_keyring }}\n        - name: ceph-keyring\n          secret:\n            secretName: {{ .Values.ceph_client.user_secret_name }}\n        {{ end }}\n        {{ end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.image.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/network_policy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"glance\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "glance/templates/pdb-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_api }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: glance-api\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.api.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"glance\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/pod-rally-test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if .Values.manifests.pod_rally_test }}\n{{- $envAll := . }}\n\n{{- $mounts_tests := .Values.pod.mounts.glance_tests.glance_tests }}\n{{- $mounts_tests_init := .Values.pod.mounts.glance_tests.init_container }}\n\n{{- $serviceAccountName := print $envAll.deployment_name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: {{ print $envAll.Chart.Name \"-test\" }}\n  labels:\n{{ tuple $envAll \"glance\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n{{ dict \"envAll\" $envAll \"podName\" \"glance-test\" \"containerNames\" (list \"init\" \"glance-test\" \"glance-test-ks-user\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n{{ if $envAll.Values.pod.tolerations.glance.enabled }}\n{{ tuple $envAll \"glance\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 2 }}\n{{ end }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 2 }}\n  restartPolicy: Never\n{{ tuple \"glance_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 2 }}\n{{ tuple \"glance_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 2 }}\n  serviceAccountName: {{ $serviceAccountName }}\n  initContainers:\n{{ tuple $envAll \"tests\" $mounts_tests_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n    - name: glance-test-ks-user\n{{ tuple $envAll \"ks_user\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.ks_user | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" \"container\" \"glance_test_ks_user\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      command:\n        - /tmp/ks-user.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: glance-bin\n          mountPath: /tmp/ks-user.sh\n          subPath: ks-user.sh\n          readOnly: true\n{{ dict \"enabled\" .Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.image.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\"  | indent 8 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_SERVICE_NAME\n          value: \"test\"\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.test }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_ROLE\n          value: {{ .Values.endpoints.identity.auth.test.role | quote }}\n  containers:\n    - name: glance-test\n{{ tuple $envAll \"test\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" \"container\" \"glance_test\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.test }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: RALLY_ENV_NAME\n          value: {{.Chart.Name}}\n      command:\n        - /tmp/rally-test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: glance-etc\n          mountPath: /etc/rally/rally_tests.yaml\n          subPath: rally_tests.yaml\n          readOnly: true\n        - name: glance-bin\n          mountPath: /tmp/rally-test.sh\n          subPath: rally-test.sh\n          readOnly: true\n        - name: rally-db\n          mountPath: /var/lib/rally\n        - name: rally-work\n          mountPath: /home/rally/.rally\n{{ dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.image.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\"  | indent 8 }}\n{{ if $mounts_tests.volumeMounts }}{{ toYaml $mounts_tests.volumeMounts | indent 8 }}{{ end }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: glance-etc\n      secret:\n        secretName: glance-etc\n        defaultMode: 0444\n    - name: glance-bin\n      configMap:\n        name: glance-bin\n        defaultMode: 0555\n    - name: rally-db\n      emptyDir: {}\n    - name: rally-work\n      emptyDir: {}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.image.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 4 }}\n{{ if $mounts_tests.volumes }}{{ toYaml $mounts_tests.volumes | indent 4 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/pvc-images.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pvc_images }}\n{{- $envAll := . }}\n{{- if eq .Values.storage \"pvc\" }}\n---\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: glance-images\n  {{- if .Values.keep_pvc }}\n  annotations:\n    helm.sh/resource-policy: keep\n  {{- end }}\nspec:\n  accessModes: {{ .Values.volume.accessModes }}\n  resources:\n    requests:\n      storage: {{ .Values.volume.size }}\n  storageClassName: {{ .Values.volume.class_name }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"glance\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n{{- $connection := tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- if $envAll.Values.manifests.certificates }}\n  DB_CONNECTION: {{ (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | b64enc -}}\n{{- else }}\n  DB_CONNECTION: {{  $connection | b64enc -}}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{- include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"image\" ) }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"glance\" \"test\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- $rabbitmqProtocol := \"http\" }}\n{{- if $envAll.Values.manifests.certificates }}\n{{- $rabbitmqProtocol = \"https\" }}\n{{- end }}\n{{- range $key1, $userClass := tuple \"admin\" \"glance\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_messaging\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass $rabbitmqProtocol $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/service-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_api }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"image\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: g-api\n      port: {{ tuple \"image\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.api.node_port.enabled }}\n      nodePort: {{ .Values.network.api.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"glance\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.api.node_port.enabled }}\n  type: NodePort\n  {{ if .Values.network.api.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{ end }}\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "glance/templates/service-ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_api .Values.network.api.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"image\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "glance/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for glance.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n# radosgw, rbd, swift or pvc\n---\nstorage: swift\n\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nrelease_group: null\n\nimages:\n  tags:\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    glance_storage_init: quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\n    glance_metadefs_load: quay.io/airshipit/glance:2025.1-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    glance_db_sync: quay.io/airshipit/glance:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    glance_api: quay.io/airshipit/glance:2025.1-ubuntu_noble\n    # Bootstrap image requires curl\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: docker.io/docker:17.07.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nbootstrap:\n  enabled: true\n  ks_user: admin\n  script: null\n  structured:\n    images:\n      cirros:\n        id: null\n        name: \"Cirros 0.6.2 64-bit\"\n        source_url: \"http://download.cirros-cloud.net/0.6.2/\"\n        image_file: \"cirros-0.6.2-x86_64-disk.img\"\n        min_disk: 1\n        image_type: qcow2\n        container_format: bare\n        private: true\n        properties:\n          # NOTE: If you want to restrict hypervisor type for this image,\n          # uncomment this and write specific hypervisor type.\n          # hypervisor_type: \"qemu\"\n          os_distro: \"cirros\"\n\nceph_client:\n  configmap: ceph-etc\n  user_secret_name: pvc-ceph-client-key\n\nnetwork_policy:\n  glance:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nconf:\n  software:\n    rbd:\n      rbd_store_pool_app_name: glance-image\n  rally_tests:\n    run_tempest: false\n    tests:\n      GlanceImages.create_and_delete_image:\n        - args:\n            container_format: bare\n            disk_format: qcow2\n            image_location: http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      GlanceImages.create_and_list_image:\n        - args:\n            container_format: bare\n            disk_format: qcow2\n            image_location: http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n  ceph:\n    monitors: []\n    admin_keyring: null\n    override:\n    append:\n  ceph_client:\n    override:\n    append:\n  paste:\n    pipeline:glance-api:\n      pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler unauthenticated-context rootapp\n    pipeline:glance-api-caching:\n      pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler unauthenticated-context cache rootapp\n    pipeline:glance-api-cachemanagement:\n      pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler unauthenticated-context cache cachemanage rootapp\n    pipeline:glance-api-keystone:\n      pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler authtoken audit context  rootapp\n    pipeline:glance-api-keystone+caching:\n      pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler authtoken audit context cache rootapp\n    pipeline:glance-api-keystone+cachemanagement:\n      pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler authtoken audit context cache cachemanage rootapp\n    pipeline:glance-api-trusted-auth:\n      pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler context rootapp\n    pipeline:glance-api-trusted-auth+cachemanagement:\n      pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler context cache cachemanage rootapp\n    composite:rootapp:\n      paste.composite_factory: glance.api:root_app_factory\n      /: apiversions\n      /v1: apiv1app\n      /v2: apiv2app\n    app:apiversions:\n      paste.app_factory: glance.api.versions:create_resource\n    app:apiv1app:\n      paste.app_factory: glance.api.v1.router:API.factory\n    app:apiv2app:\n      paste.app_factory: glance.api.v2.router:API.factory\n    filter:healthcheck:\n      paste.filter_factory: oslo_middleware:Healthcheck.factory\n      backends: disable_by_file\n      disable_by_file_path: /etc/glance/healthcheck_disable\n    filter:versionnegotiation:\n      paste.filter_factory: glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory\n    filter:cache:\n      paste.filter_factory: glance.api.middleware.cache:CacheFilter.factory\n    filter:cachemanage:\n      paste.filter_factory: glance.api.middleware.cache_manage:CacheManageFilter.factory\n    filter:context:\n      paste.filter_factory: glance.api.middleware.context:ContextMiddleware.factory\n    filter:unauthenticated-context:\n      paste.filter_factory: glance.api.middleware.context:UnauthenticatedContextMiddleware.factory\n    filter:authtoken:\n      paste.filter_factory: keystonemiddleware.auth_token:filter_factory\n      delay_auth_decision: true\n    filter:audit:\n      paste.filter_factory: keystonemiddleware.audit:filter_factory\n      audit_map_file: /etc/glance/api_audit_map.conf\n    filter:gzip:\n      paste.filter_factory: glance.api.middleware.gzip:GzipMiddleware.factory\n    filter:osprofiler:\n      paste.filter_factory: osprofiler.web:WsgiMiddleware.factory\n      hmac_keys: SECRET_KEY  # DEPRECATED\n      enabled: yes  # DEPRECATED\n    filter:cors:\n      paste.filter_factory: oslo_middleware.cors:filter_factory\n      oslo_config_project: glance\n      oslo_config_program: glance-api\n    filter:http_proxy_to_wsgi:\n      paste.filter_factory: oslo_middleware:HTTPProxyToWSGI.factory\n  policy: {}\n  glance_sudoers: |\n    # This sudoers file supports rootwrap for both Kolla and LOCI Images.\n    Defaults !requiretty\n    Defaults secure_path=\"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin\"\n    glance ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/glance-rootwrap /etc/glance/rootwrap.conf *, /var/lib/openstack/bin/glance-rootwrap /etc/glance/rootwrap.conf *\n  rootwrap: |\n    # Configuration for glance-rootwrap\n    # This file should be owned by (and only-writable by) the root user\n\n    [DEFAULT]\n    # List of directories to load filter definitions from (separated by ',').\n    # These directories MUST all be only writeable by root !\n    filters_path=/etc/glance/rootwrap.d,/usr/share/glance/rootwrap\n\n    # List of directories to search executables in, in case filters do not\n    # explicitely specify a full path (separated by ',')\n    # If not specified, defaults to system PATH environment variable.\n    # These directories MUST all be only writeable by root !\n    exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin\n\n    # Enable logging to syslog\n    # Default value is False\n    use_syslog=False\n\n    # Which syslog facility to use.\n    # Valid values include auth, authpriv, syslog, local0, local1...\n    # Default value is 'syslog'\n    syslog_log_facility=syslog\n\n    # Which messages to log.\n    # INFO means log all usage\n    # ERROR means only log unsuccessful attempts\n    syslog_log_level=ERROR\n  rootwrap_filters:\n    glance_cinder_store:\n      pods:\n        - api\n      content: |\n        # glance-rootwrap command filters for glance cinder store\n        # This file should be owned by (and only-writable by) the root user\n\n        [Filters]\n        # cinder store driver\n        disk_chown: RegExpFilter, chown, root, chown, \\d+, /dev/(?!.*/\\.\\.).*\n\n        # os-brick library commands\n        # os_brick.privileged.run_as_root oslo.privsep context\n        # This line ties the superuser privs with the config files, context name,\n        # and (implicitly) the actual python code invoked.\n        privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\\.\\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.*\n\n        chown: CommandFilter, chown, root\n        mount: CommandFilter, mount, root\n        umount: CommandFilter, umount, root\n  glance:\n    DEFAULT:\n      log_config_append: /etc/glance/logging.conf\n      # NOTE(portdirect): the bind port should not be defined, and is manipulated\n      # via the endpoints section.\n      bind_port: null\n      workers: 1\n      enable_v1_api: False\n    oslo_middleware:\n      enable_proxy_headers_parsing: true\n    keystone_authtoken:\n      service_token_roles: service\n      service_token_roles_required: true\n      auth_type: password\n      auth_version: v3\n      memcache_security_strategy: ENCRYPT\n      service_type: image\n    glance_store:\n      # Since 2024.1 this section must contain the only key 'default_backend'.\n      # Other keys should be defined in the corresponding per-backend sections.\n      # This is for backward compatibility.\n      filesystem_store_datadir: /var/lib/glance/images\n      cinder_catalog_info: volumev3::internalURL\n      rbd_store_chunk_size: 8\n      rbd_store_replication: 3\n      rbd_store_crush_rule: replicated_rule\n      rbd_store_pool: glance.images\n      rbd_store_user: glance\n      rbd_store_ceph_conf: /etc/ceph/ceph.conf\n      default_swift_reference: ref1\n      swift_store_container: glance\n      swift_store_create_container_on_put: true\n      swift_store_config_file: /etc/glance/swift-store.conf\n      swift_store_endpoint_type: internalURL\n    file:\n      filesystem_store_datadir: /var/lib/glance/images\n    # These two sections os_glance_tasks_store and os_glance_staging_store\n    # are mandatory. Glance will be unable to delete images from if these\n    # two are not properly configured.\n    os_glance_tasks_store:\n      filesystem_store_datadir: /var/lib/glance/tmp/os_glance_tasks_store\n    os_glance_staging_store:\n      filesystem_store_datadir: /var/lib/glance/tmp/os_glance_staging_store\n    rbd:\n      rbd_store_chunk_size: 8\n      rbd_store_replication: 3\n      rbd_store_crush_rule: replicated_rule\n      rbd_store_pool: glance.images\n      rbd_store_user: glance\n      rbd_store_ceph_conf: /etc/ceph/ceph.conf\n    swift:\n      default_swift_reference: ref1\n      swift_store_container: glance\n      swift_store_create_container_on_put: true\n      swift_store_config_file: /etc/glance/swift-store.conf\n      swift_store_endpoint_type: internalURL\n    paste_deploy:\n      flavor: keystone\n    database:\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    oslo_concurrency:\n      lock_path: /var/lock\n    oslo_messaging_notifications:\n      driver: messagingv2\n    oslo_messaging_rabbit:\n      rabbit_ha_queues: true\n    oslo_policy:\n      policy_file: /etc/glance/policy.yaml\n    cors: {}\n  logging:\n    loggers:\n      keys:\n        - root\n        - glance\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: WARNING\n      handlers: 'null'\n    logger_glance:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: glance\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    formatter_default:\n      format: \"%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n  api_audit_map:\n    DEFAULT:\n      target_endpoint_type: None\n    path_keywords:\n      detail: None\n      file: None\n      images: image\n      members: member\n      tags: tag\n    service_endpoints:\n      image: 'service/storage/image'\n  swift_store: |\n    [{{ .Values.conf.glance.glance_store.default_swift_reference }}]\n    {{- if eq .Values.storage \"radosgw\" }}\n    auth_version = 1\n    auth_address = {{ tuple \"ceph_object_store\" \"public\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" }}\n    user = {{ .Values.endpoints.ceph_object_store.auth.glance.username }}:swift\n    key = {{ .Values.endpoints.ceph_object_store.auth.glance.password }}\n    {{- else }}\n    user = {{ .Values.endpoints.identity.auth.glance.project_name }}:{{ .Values.endpoints.identity.auth.glance.username }}\n    key = {{ .Values.endpoints.identity.auth.glance.password }}\n    auth_address = {{ tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" }}\n    user_domain_name = {{ .Values.endpoints.identity.auth.glance.user_domain_name }}\n    project_domain_name = {{ .Values.endpoints.identity.auth.glance.project_domain_name }}\n    auth_version = 3\n    # NOTE(portdirect): https://bugs.launchpad.net/glance-store/+bug/1620999\n    project_domain_id =\n    user_domain_id =\n    {{- end -}}\n  rabbitmq:\n    # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones\n    policies:\n      - vhost: \"glance\"\n        name: \"ha_ttl_glance\"\n        definition:\n          # mirror messges to other nodes in rmq cluster\n          ha-mode: \"all\"\n          ha-sync-mode: \"automatic\"\n          # 70s\n          message-ttl: 70000\n        priority: 0\n        apply-to: all\n        pattern: '^(?!(amq\\.|reply_)).*'\n  glance_api_uwsgi:\n    uwsgi:\n      add-header: \"Connection: close\"\n      buffer-size: 65535\n      die-on-term: true\n      enable-threads: true\n      exit-on-reload: false\n      hook-master-start: unix_signal:15 gracefully_kill_them_all\n      lazy-apps: true\n      log-x-forwarded-for: true\n      master: true\n      procname-prefix-spaced: \"glance-api:\"\n      route-user-agent: '^kube-probe.* donotlog:'\n      thunder-lock: true\n      worker-reload-mercy: 80\n      wsgi-file: /var/lib/openstack/bin/glance-wsgi-api\n      stats: 0.0.0.0:1717\n      stats-http: true\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        nginx.ingress.kubernetes.io/proxy-body-size: \"0\"\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30092\n\nvolume:\n  class_name: general\n  size: 2Gi\n  accessModes:\n    - ReadWriteOnce\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - glance-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    api:\n      jobs:\n        - glance-storage-init\n        - glance-db-sync\n        - glance-rabbit-init\n        - glance-ks-user\n        - glance-ks-endpoints\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_messaging\n    bootstrap:\n      jobs: null\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: image\n    clean:\n      jobs: null\n    db_drop:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - glance-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    ks_endpoints:\n      jobs:\n        - glance-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    rabbit_init:\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n    storage_init:\n      jobs:\n        - glance-ks-user\n      services: null\n    metadefs_load:\n      jobs:\n        - glance-db-sync\n      services: null\n    tests:\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: image\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\n# Names of secrets used by bootstrap and environmental checks\nsecrets:\n  identity:\n    admin: glance-keystone-admin\n    glance: glance-keystone-user\n    test: glance-keystone-test\n  oslo_db:\n    admin: glance-db-admin\n    glance: glance-db-user\n  rbd: images-rbd-keyring\n  oslo_messaging:\n    admin: glance-rabbitmq-admin\n    glance: glance-rabbitmq-user\n  tls:\n    image:\n      api:\n        public: glance-tls-public\n        internal: glance-tls-api\n  oci_image_registry:\n    glance: glance-oci-image-registry\n\n# typically overridden by environmental\n# values, but should include all endpoints\n# required by this chart\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      glance:\n        username: glance\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      glance:\n        role: admin\n        region_name: RegionOne\n        username: glance\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      test:\n        role: admin\n        region_name: RegionOne\n        username: glance-test\n        password: password\n        project_name: test\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  image:\n    name: glance\n    hosts:\n      default: glance-api\n      public: glance\n    host_fqdn_override:\n      default: null\n      # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public\n      # endpoints using the following format:\n      # public:\n      #   host: null\n      #   tls:\n      #     crt: null\n      #     key: null\n    path:\n      default: null\n      healthcheck: /healthcheck\n    scheme:\n      default: http\n      service: http\n    port:\n      api:\n        default: 9292\n        public: 80\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n        secret:\n          tls:\n            internal: mariadb-tls-direct\n      glance:\n        username: glance\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /glance\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_cache:\n    auth:\n      # NOTE(portdirect): this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  oslo_messaging:\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n        secret:\n          tls:\n            internal: rabbitmq-tls-direct\n      glance:\n        username: glance\n        password: password\n    statefulset:\n      replicas: 2\n      name: rabbitmq-rabbitmq\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /glance\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n  object_store:\n    name: swift\n    namespace: ceph\n    auth:\n      glance:\n        tmpurlkey: supersecret\n    hosts:\n      default: ceph-rgw\n      public: radosgw\n    host_fqdn_override:\n      default: null\n    path:\n      default: /swift/v1/KEY_$(tenant_id)s\n    scheme:\n      default: http\n    port:\n      api:\n        default: 8088\n        public: 80\n  ceph_object_store:\n    name: radosgw\n    namespace: ceph\n    auth:\n      glance:\n        username: glance\n        password: password\n        tmpurlkey: supersecret\n    hosts:\n      default: ceph-rgw\n      public: radosgw\n    host_fqdn_override:\n      default: null\n    path:\n      default: /auth/v1.0\n    scheme:\n      default: http\n    port:\n      api:\n        default: 8088\n        public: 80\n  fluentd:\n    namespace: null\n    name: fluentd\n    hosts:\n      default: fluentd-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: 'http'\n    port:\n      service:\n        default: 24224\n      metrics:\n        default: 24220\n  dashboard:\n    name: horizon\n    hosts:\n      default: horizon-int\n      public: horizon\n    host_fqdn_override:\n      default: null\n      # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public\n      # endpoints using the following format:\n      # public:\n      #   host: null\n      #   tls:\n      #     crt: null\n      #     key: null\n    path:\n      default: null\n    scheme:\n      default: http\n      public: https\n    port:\n      web:\n        default: 80\n        public: 443\n  # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress\n  # They are using to enable the Egress K8s network policy.\n  kube_dns:\n    namespace: kube-system\n    name: kubernetes-dns\n    hosts:\n      default: kube-dns\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: http\n    port:\n      dns:\n        default: 53\n        protocol: UDP\n  ingress:\n    namespace: null\n    name: ingress\n    hosts:\n      default: ingress\n    port:\n      ingress:\n        default: 80\n\npod:\n  security_context:\n    glance:\n      pod:\n        runAsUser: 42424\n      container:\n        glance_perms:\n          readOnlyRootFilesystem: true\n          runAsUser: 0\n        ceph_keyring_placement:\n          readOnlyRootFilesystem: true\n          runAsUser: 0\n        glance_api:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        nginx:\n          readOnlyRootFilesystem: false\n          runAsUser: 0\n    clean:\n      pod:\n        runAsUser: 42424\n      container:\n        glance_secret_clean:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    metadefs_load:\n      pod:\n        runAsUser: 42424\n      container:\n        glance_metadefs_load:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    storage_init:\n      pod:\n        runAsUser: 42424\n      container:\n        ceph_keyring_placement:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        glance_storage_init:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    test:\n      pod:\n        runAsUser: 42424\n      container:\n        glance_test_ks_user:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        glance_test:\n          runAsUser: 65500\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    glance:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n        effect: NoSchedule\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n        effect: NoSchedule\n  useHostNetwork:\n    api: false\n  mounts:\n    glance_api:\n      init_container: null\n      glance_api:\n        volumeMounts:\n        volumes:\n    glance_tests:\n      init_container: null\n      glance_tests:\n        volumeMounts:\n        volumes:\n    glance_db_sync:\n      glance_db_sync:\n        volumeMounts:\n        volumes:\n  # -- This allows users to add Kubernetes Projected Volumes to be mounted at /etc/glance/glance.conf.d/\n  ## This is a list of projected volume source objects for each deployment/statefulset/daemonset/cronjob\n  ## https://kubernetes.io/docs/concepts/storage/projected-volumes/\n  etcSources:\n    glance_api: []\n    glance_db_sync: []\n    glance_metadefs_load: []\n  replicas:\n    api: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    disruption_budget:\n      api:\n        min_available: 0\n    termination_grace_period:\n      api:\n        timeout: 30\n  probes:\n    api:\n      glance-api:\n        readiness:\n          enabled: true\n          params:\n            periodSeconds: 10\n            timeoutSeconds: 5\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 5\n            periodSeconds: 10\n            timeoutSeconds: 5\n  resources:\n    enabled: false\n    api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      storage_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      metadefs_load:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rabbit_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\ntls:\n  identity: false\n  oslo_messaging: false\n  oslo_db: false\n\nmanifests:\n  certificates: false\n  configmap_bin: true\n  configmap_etc: true\n  deployment_api: true\n  ingress_api: true\n  job_bootstrap: true\n  job_clean: true\n  job_db_init: true\n  job_db_sync: true\n  job_db_drop: false\n  job_image_repo_sync: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  job_storage_init: true\n  job_metadefs_load: true\n  job_rabbit_init: true\n  pdb_api: true\n  pod_rally_test: true\n  pvc_images: true\n  network_policy: false\n  secret_db: true\n  secret_ingress_tls: true\n  secret_keystone: true\n  secret_rabbitmq: true\n  secret_registry: true\n  service_ingress_api: true\n  service_api: true\n\n# NOTE: This is for enable helm resource-policy to keep glance-images PVC.\n# set keep_pvc: true when allow helm resource-policy to keep for PVC.\n# This will requires mannual delete for PVC.\n# set keep_pvc: false when disallow helm resource-policy to keep for PVC.\n# This will allow helm to delete the PVC.\nkeep_pvc: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "gnocchi/.helmignore",
    "content": "# Patterns to ignore when building packages.\n# This supports shell glob matching, relative path matching, and\n# negation (prefixed with !). Only one pattern per line.\n.DS_Store\n# Common VCS dirs\n.git/\n.gitignore\n.bzr/\n.bzrignore\n.hg/\n.hgignore\n.svn/\n# Common backup files\n*.swp\n*.bak\n*.tmp\n*~\n# Various IDEs\n.project\n.idea/\n*.tmproj\n"
  },
  {
    "path": "gnocchi/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v3.0.3\ndescription: OpenStack-Helm Gnocchi\nname: gnocchi\nversion: 2025.2.0\nhome: https://gnocchi.xyz/\nicon: https://gnocchi.xyz/_static/gnocchi-logo.png\nsources:\n  - https://github.com/gnocchixyz/gnocchi\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "gnocchi/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "gnocchi/templates/bin/_ceph-admin-keyring.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport HOME=/tmp\n\ncat <<EOF > /etc/ceph/ceph.client.admin.keyring\n[client.admin]\n{{- if .Values.conf.ceph.admin_keyring }}\n    key = {{ .Values.conf.ceph.admin_keyring }}\n{{- else }}\n    key = $(cat /tmp/client-keyring)\n{{- end }}\nEOF\n\nexit 0\n"
  },
  {
    "path": "gnocchi/templates/bin/_ceph-keyring.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport HOME=/tmp\n\ncat <<EOF > /etc/ceph/ceph.client.{{ .Values.conf.gnocchi.storage.ceph_username }}.keyring\n\n[client.{{ .Values.conf.gnocchi.storage.ceph_username }}]\n{{- if .Values.conf.gnocchi.storage.provided_keyring }}\n    key = {{ .Values.conf.gnocchi.storage.provided_keyring }}\n{{- else }}\n    key = $(cat /tmp/client-keyring)\n{{- end }}\nEOF\n\nexit 0\n"
  },
  {
    "path": "gnocchi/templates/bin/_clean-secrets.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec kubectl delete secret \\\n  --namespace ${NAMESPACE} \\\n  --ignore-not-found=true \\\n  ${RBD_POOL_SECRET}\n"
  },
  {
    "path": "gnocchi/templates/bin/_db-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport HOME=/tmp\n\npgsql_superuser_cmd () {\n  DB_COMMAND=\"$1\"\n  if [[ ! -z $2 ]]; then\n      EXPORT PGDATABASE=$2\n  fi\n  if [[ ! -z \"${ROOT_DB_PASS}\" ]]; then\n      export PGPASSWORD=\"${ROOT_DB_PASS}\"\n  fi\n  psql \\\n  -h ${DB_FQDN} \\\n  -p ${DB_PORT} \\\n  -U ${ROOT_DB_USER} \\\n  --command=\"${DB_COMMAND}\"\n  unset PGPASSWORD\n}\n\nif [[ ! -v ROOT_DB_CONNECTION ]]; then\n    echo \"environment variable ROOT_DB_CONNECTION not set\"\n    exit 1\nelse\n    echo \"Got DB root connection\"\nfi\n\nif [[ -v OPENSTACK_CONFIG_FILE ]]; then\n    if [[ ! -v OPENSTACK_CONFIG_DB_SECTION ]]; then\n        echo \"Environment variable OPENSTACK_CONFIG_DB_SECTION not set\"\n        exit 1\n    elif [[ ! -v OPENSTACK_CONFIG_DB_KEY ]]; then\n        echo \"Environment variable OPENSTACK_CONFIG_DB_KEY not set\"\n        exit 1\n    fi\n\n    echo \"Using ${OPENSTACK_CONFIG_FILE} as db config source\"\n    echo \"Trying to load db config from ${OPENSTACK_CONFIG_DB_SECTION}:${OPENSTACK_CONFIG_DB_KEY}\"\n\n    DB_CONN=$(awk -v key=$OPENSTACK_CONFIG_DB_KEY \"/^\\[${OPENSTACK_CONFIG_DB_SECTION}\\]/{f=1} f==1&&/^$OPENSTACK_CONFIG_DB_KEY/{print \\$3;exit}\" \"${OPENSTACK_CONFIG_FILE}\")\n\n    echo \"Found DB connection: $DB_CONN\"\nelif [[ -v DB_CONNECTION ]]; then\n    DB_CONN=${DB_CONNECTION}\n    echo \"Got config from DB_CONNECTION env var\"\nelse\n    echo \"Could not get dbconfig\"\n    exit 1\nfi\n\nROOT_DB_PROTO=\"$(echo $ROOT_DB_CONNECTION | grep '//' | sed -e's,^\\(.*://\\).*,\\1,g')\"\nROOT_DB_URL=\"$(echo $ROOT_DB_CONNECTION | sed -e s,$ROOT_DB_PROTO,,g)\"\nROOT_DB_USER=\"$(echo $ROOT_DB_URL | grep @ | cut -d@ -f1 | cut -d: -f1)\"\nROOT_DB_PASS=\"$(echo $ROOT_DB_URL | grep @ | cut -d@ -f1 | cut -d: -f2)\"\n\nDB_FQDN=\"$(echo $ROOT_DB_URL | sed -e s,$ROOT_DB_USER:$ROOT_DB_PASS@,,g | cut -d/ -f1 | cut -d: -f1)\"\nDB_PORT=\"$(echo $ROOT_DB_URL | sed -e s,$ROOT_DB_USER:$ROOT_DB_PASS@,,g | cut -d/ -f1 | cut -d: -f2)\"\nDB_NAME=\"$(echo $ROOT_DB_URL | sed -e s,$ROOT_DB_USER:$ROOT_DB_PASS@,,g | cut -d/ -f2 | cut -d? -f1)\"\n\nDB_PROTO=\"$(echo $DB_CONN | grep '//' | sed -e's,^\\(.*://\\).*,\\1,g')\"\nDB_URL=\"$(echo $DB_CONN | sed -e s,$DB_PROTO,,g)\"\nDB_USER=\"$( echo $DB_URL | grep @ | cut -d@ -f1 | cut -d: -f1)\"\nDB_PASS=\"$( echo $DB_URL | grep @ | cut -d@ -f1 | cut -d: -f2)\"\n\n#create db\npgsql_superuser_cmd \"SELECT 1 FROM pg_database WHERE datname = '$DB_NAME'\" | grep -q 1 || pgsql_superuser_cmd \"CREATE DATABASE $DB_NAME\"\n\n#create db user\npgsql_superuser_cmd \"SELECT * FROM pg_roles WHERE rolname = '$DB_USER';\" | tail -n +3 | head -n -2 | grep -q 1 || \\\n    pgsql_superuser_cmd \"CREATE ROLE ${DB_USER} LOGIN PASSWORD '$DB_PASS';\" && pgsql_superuser_cmd \"ALTER USER ${DB_USER} WITH SUPERUSER\"\n\n#give permissions to user\npgsql_superuser_cmd \"GRANT ALL PRIVILEGES ON DATABASE $DB_NAME to $DB_USER;\"\n\n"
  },
  {
    "path": "gnocchi/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec gnocchi-upgrade\n"
  },
  {
    "path": "gnocchi/templates/bin/_gnocchi-api.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  if [ -f /etc/apache2/envvars ]; then\n    # Loading Apache2 ENV variables\n    source /etc/apache2/envvars\n  fi\n  exec apache2 -DFOREGROUND\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "gnocchi/templates/bin/_gnocchi-metricd.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\nexec gnocchi-metricd \\\n      --config-file /etc/gnocchi/gnocchi.conf\n"
  },
  {
    "path": "gnocchi/templates/bin/_gnocchi-resources-cleaner.sh.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\necho \"Purging the deleted resources with its associated metrics which have lived more than ${DELETED_RESOURCES_TTL}\"\ngnocchi resource batch delete \"ended_at < '-${DELETED_RESOURCES_TTL}'\"\n\nexit 0\n"
  },
  {
    "path": "gnocchi/templates/bin/_gnocchi-statsd.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\nexec gnocchi-statsd \\\n      --config-file /etc/gnocchi/gnocchi.conf\n"
  },
  {
    "path": "gnocchi/templates/bin/_gnocchi-test.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport HOME=/tmp\n\necho \"Test: list archive policies\"\ngnocchi archive-policy list\n\necho \"Test: create metric\"\ngnocchi metric create --archive-policy-name low\nMETRIC_UUID=$(gnocchi metric list -c id -f value | head -1)\nsleep 5\n\necho \"Test: show metric\"\ngnocchi metric show ${METRIC_UUID}\n\nsleep 5\n\necho \"Test: add measures\"\ngnocchi measures add -m 2017-06-27T12:00:00@31 \\\n  -m 2017-06-27T12:03:27@20 \\\n  -m 2017-06-27T12:06:51@41 \\\n  ${METRIC_UUID}\n\nsleep 15\n\necho \"Test: show measures\"\ngnocchi measures show ${METRIC_UUID}\ngnocchi measures show --aggregation min ${METRIC_UUID}\n\necho \"Test: delete metric\"\ngnocchi metric delete ${METRIC_UUID}\n\nRESOURCE_UUID={{ uuidv4 }}\n\necho \"Test: create resource type\"\ngnocchi resource-type create --attribute name:string --attribute host:string test\n\necho \"Test: list resource types\"\ngnocchi resource-type list\n\necho \"Test: create resource\"\ngnocchi resource create --attribute name:test --attribute host:testnode1 --create-metric cpu:medium --create-metric memory:low --type test ${RESOURCE_UUID}\n\necho \"Test: show resource history\"\ngnocchi resource history --format json --details ${RESOURCE_UUID}\necho \"Test: delete resource\"\ngnocchi resource delete ${RESOURCE_UUID}\necho \"Test: delete resource type\"\ngnocchi resource-type delete test\n\nexit 0\n"
  },
  {
    "path": "gnocchi/templates/bin/_storage-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\nSECRET=$(mktemp --suffix .yaml)\nKEYRING=$(mktemp --suffix .keyring)\nfunction cleanup {\n    rm -f ${SECRET} ${KEYRING}\n}\ntrap cleanup EXIT\n\nset -ex\nceph -s\nfunction ensure_pool () {\n  ceph osd pool stats $1 || ceph osd pool create $1 $2\n  local test_version=$(ceph tell osd.* version | egrep -c \"nautilus|mimic|luminous\" | xargs echo)\n  if [[ ${test_version} -gt 0 ]]; then\n    ceph osd pool application enable $1 $3\n  fi\n}\nensure_pool ${RBD_POOL_NAME} ${RBD_POOL_CHUNK_SIZE} \"gnocchi-metrics\"\n\nif USERINFO=$(ceph auth get client.${RBD_POOL_USER}); then\n  echo \"Cephx user client.${RBD_POOL_USER} already exist.\"\n  echo \"Update its cephx caps\"\n  ceph auth caps client.${RBD_POOL_USER} \\\n    mon \"profile rbd\" \\\n    osd \"profile rbd pool=${RBD_POOL_NAME}\" \\\n    mgr \"allow r\"\n  ceph auth get client.${RBD_POOL_USER} -o ${KEYRING}\nelse\n  ceph auth get-or-create client.${RBD_POOL_USER} \\\n    mon \"profile rbd\" \\\n    osd \"profile rbd pool=${RBD_POOL_NAME}\" \\\n    mgr \"allow r\" \\\n    -o ${KEYRING}\nfi\n\nENCODED_KEYRING=$(sed -n 's/^[[:blank:]]*key[[:blank:]]\\+=[[:blank:]]\\(.*\\)/\\1/p' ${KEYRING} | base64 -w0)\ncat > ${SECRET} <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: \"${RBD_POOL_SECRET}\"\ntype: kubernetes.io/rbd\ndata:\n  key: $( echo ${ENCODED_KEYRING} )\nEOF\nkubectl apply --namespace ${NAMESPACE} -f ${SECRET}\n"
  },
  {
    "path": "gnocchi/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: gnocchi-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  storage-init.sh: |\n{{ tuple \"bin/_storage-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  clean-secrets.sh: |\n{{ tuple \"bin/_clean-secrets.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-init.sh: |\n{{ tuple \"bin/_db-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  gnocchi-api.sh: |\n{{ tuple \"bin/_gnocchi-api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  gnocchi-metricd.sh: |\n{{ tuple \"bin/_gnocchi-metricd.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  gnocchi-statsd.sh: |\n{{ tuple \"bin/_gnocchi-statsd.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  gnocchi-resources-cleaner.sh: |\n{{ tuple \"bin/_gnocchi-resources-cleaner.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  ks-domain-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_domain_user\" . | indent 4 }}\n  ceph-keyring.sh: |\n{{ tuple \"bin/_ceph-keyring.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ceph-admin-keyring.sh: |\n{{ tuple \"bin/_ceph-admin-keyring.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  gnocchi-test.sh: |\n{{ tuple \"bin/_gnocchi-test.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n\n{{- if empty .Values.conf.gnocchi.keystone_authtoken.auth_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.gnocchi.keystone_authtoken \"auth_uri\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.gnocchi.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.gnocchi.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.gnocchi.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.gnocchi.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n{{- if empty .Values.conf.gnocchi.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.gnocchi.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if empty .Values.conf.gnocchi.keystone_authtoken.region_name -}}\n{{- $_ := set .Values.conf.gnocchi.keystone_authtoken \"region_name\" .Values.endpoints.identity.auth.gnocchi.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.gnocchi.keystone_authtoken.project_name -}}\n{{- $_ := set .Values.conf.gnocchi.keystone_authtoken \"project_name\" .Values.endpoints.identity.auth.gnocchi.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.gnocchi.keystone_authtoken.project_domain_name -}}\n{{- $_ := set .Values.conf.gnocchi.keystone_authtoken \"project_domain_name\" .Values.endpoints.identity.auth.gnocchi.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.gnocchi.keystone_authtoken.user_domain_name -}}\n{{- $_ := set .Values.conf.gnocchi.keystone_authtoken \"user_domain_name\" .Values.endpoints.identity.auth.gnocchi.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.gnocchi.keystone_authtoken.username -}}\n{{- $_ := set .Values.conf.gnocchi.keystone_authtoken \"username\" .Values.endpoints.identity.auth.gnocchi.username -}}\n{{- end -}}\n{{- if empty .Values.conf.gnocchi.keystone_authtoken.password -}}\n{{- $_ := set .Values.conf.gnocchi.keystone_authtoken \"password\" .Values.endpoints.identity.auth.gnocchi.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.gnocchi.DEFAULT.coordination_url -}}\n{{- $endpointUrl := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n{{- $driver := .Values.endpoints.oslo_cache.hosts.default -}}\n{{- $_ := printf \"%s://%s\" $driver $endpointUrl | set .Values.conf.gnocchi.DEFAULT \"coordination_url\" -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.gnocchi.database.connection)) (empty .Values.conf.gnocchi.database.connection) -}}\n{{- $_ := tuple \"oslo_db\" \"internal\" \"gnocchi\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | set .Values.conf.gnocchi.database \"connection\" -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.gnocchi.indexer.url)) (empty .Values.conf.gnocchi.indexer.url) -}}\n{{ if eq .Values.conf.gnocchi.indexer.driver \"postgresql\" }}\n{{- $_ := tuple \"oslo_db_postgresql\" \"internal\" \"gnocchi\" \"postgresql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | set .Values.conf.gnocchi.indexer \"url\" -}}\n{{ else }}\n{{- $_ := tuple \"oslo_db\" \"internal\" \"gnocchi\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | set .Values.conf.gnocchi.indexer \"url\" -}}\n{{ end }}\n{{- end -}}\n\n{{- if empty .Values.conf.gnocchi.statsd.resource_id -}}\n{{- $_ := uuidv4 | set .Values.conf.gnocchi.statsd \"resource_id\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.gnocchi.statsd.user_id -}}\n{{- $_ := uuidv4 | set .Values.conf.gnocchi.statsd \"user_id\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.gnocchi.statsd.project_id -}}\n{{- $_ := uuidv4 | set .Values.conf.gnocchi.statsd \"project_id\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.gnocchi.api.port -}}\n{{- $_ := tuple \"metric\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set .Values.conf.gnocchi.api \"port\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.gnocchi.statsd.port -}}\n{{- $_ := tuple \"metric_statsd\" \"internal\" \"statsd\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set .Values.conf.gnocchi.statsd \"port\" -}}\n{{- end -}}\n\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: gnocchi-etc\ntype: Opaque\ndata:\n  gnocchi.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.gnocchi | b64enc }}\n  api-paste.ini: {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.paste | b64enc }}\n  policy.yaml: {{ toYaml .Values.conf.policy | b64enc }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.apache \"key\" \"wsgi-gnocchi.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/cron-job-resources-cleaner.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.cron_job_resources_cleaner }}\n{{- $envAll := . }}\n\n{{- $mounts_gnocchi_resources_cleaner := .Values.pod.mounts.gnocchi_resources_cleaner.gnocchi_resources_cleaner }}\n{{- $mounts_gnocchi_resources_cleaner_init := .Values.pod.mounts.gnocchi_resources_cleaner.init_container }}\n\n{{- $serviceAccountName := \"gnocchi-resources-cleaner\" }}\n{{ tuple $envAll \"resources_cleaner\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: gnocchi-resources-cleaner\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"gnocchi\" \"resources-cleaner\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  schedule: {{ .Values.jobs.resources_cleaner.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.resources_cleaner.history.success }}\n  failedJobsHistoryLimit: {{ .Values.jobs.resources_cleaner.history.failed }}\n  concurrencyPolicy: Forbid\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"gnocchi\" \"resources-cleaner\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"gnocchi\" \"resources-cleaner\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n        spec:\n          serviceAccountName: {{ $serviceAccountName }}\n          restartPolicy: OnFailure\n          nodeSelector:\n            {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.gnocchi.enabled }}\n{{ tuple $envAll \"gnocchi\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 10 }}\n{{ end }}\n          initContainers:\n{{ tuple $envAll \"resources_cleaner\" $mounts_gnocchi_resources_cleaner_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 12 }}\n          containers:\n            - name: gnocchi-resources-cleaner\n{{ tuple $envAll \"gnocchi_resources_cleaner\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.resources_cleaner | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n              env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 16 }}\n{{- end }}\n                - name: OS_AUTH_TYPE\n                  valueFrom:\n                    secretKeyRef:\n                      name: {{ $.Values.secrets.identity.admin }}\n                      key: OS_AUTH_TYPE\n                - name: OS_TENANT_NAME\n                  valueFrom:\n                    secretKeyRef:\n                      name: {{ $.Values.secrets.identity.admin }}\n                      key: OS_TENANT_NAME\n                - name: DELETED_RESOURCES_TTL\n                  value: {{ .Values.jobs.resources_cleaner.deleted_resources_ttl | quote }}\n              command:\n                - /tmp/gnocchi-resources-cleaner.sh\n              volumeMounts:\n                - name: pod-tmp\n                  mountPath: /tmp\n                - name: gnocchi-bin\n                  mountPath: /tmp/gnocchi-resources-cleaner.sh\n                  subPath: gnocchi-resources-cleaner.sh\n                  readOnly: true\n                - name: pod-etc-gnocchi\n                  mountPath: /etc/gnocchi\n                - name: gnocchi-etc\n                  mountPath: /etc/gnocchi/gnocchi.conf\n                  subPath: gnocchi.conf\n                  readOnly: true\n{{ if $mounts_gnocchi_resources_cleaner.volumeMounts }}{{ toYaml $mounts_gnocchi_resources_cleaner.volumeMounts | indent 16 }}{{ end }}\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: pod-etc-gnocchi\n              emptyDir: {}\n            - name: gnocchi-etc\n              secret:\n                secretName: gnocchi-etc\n                defaultMode: 0444\n            - name: gnocchi-bin\n              configMap:\n                name: gnocchi-bin\n                defaultMode: 0555\n{{ if $mounts_gnocchi_resources_cleaner.volumes }}{{ toYaml $mounts_gnocchi_resources_cleaner.volumes | indent 12 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/daemonset-metricd.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.daemonset_metricd }}\n{{- $envAll := . }}\n\n{{- $mounts_gnocchi_metricd := .Values.pod.mounts.gnocchi_metricd.gnocchi_metricd }}\n{{- $mounts_gnocchi_metricd_init := .Values.pod.mounts.gnocchi_metricd.init_container }}\n\n{{- $serviceAccountName := \"gnocchi-metricd\" }}\n{{ tuple $envAll \"metricd\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: gnocchi-metricd\n  labels:\n{{ tuple $envAll \"gnocchi\" \"metricd\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"gnocchi\" \"metricd\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"metricd\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"gnocchi\" \"metricd\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      nodeSelector:\n        {{ .Values.labels.metricd.node_selector_key }}: {{ .Values.labels.metricd.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.gnocchi.enabled }}\n{{ tuple $envAll \"gnocchi\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"metricd\" $mounts_gnocchi_metricd_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: ceph-keyring-placement\n{{ tuple $envAll \"gnocchi_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n          command:\n            - /tmp/ceph-keyring.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: gnocchi-bin\n              mountPath: /tmp/ceph-keyring.sh\n              subPath: ceph-keyring.sh\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n      containers:\n        - name: gnocchi-metricd\n{{ tuple $envAll \"gnocchi_metricd\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.metricd | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/gnocchi-metricd.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.gnocchi.oslo_concurrency.lock_path }}\n            - name: pod-etc-gnocchi\n              mountPath: /etc/gnocchi\n            - name: gnocchi-etc\n              mountPath: /etc/gnocchi/gnocchi.conf\n              subPath: gnocchi.conf\n              readOnly: true\n            - name: gnocchi-etc\n              mountPath: /etc/gnocchi/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: gnocchi-bin\n              mountPath: /tmp/gnocchi-metricd.sh\n              subPath: gnocchi-metricd.sh\n              readOnly: true\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: ceph-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n{{ if $mounts_gnocchi_metricd.volumeMounts }}{{ toYaml $mounts_gnocchi_metricd.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-gnocchi\n          emptyDir: {}\n        - name: gnocchi-etc\n          secret:\n            secretName: gnocchi-etc\n            defaultMode: 0444\n        - name: gnocchi-bin\n          configMap:\n            name: gnocchi-bin\n            defaultMode: 0555\n        - name: etcceph\n          emptyDir: {}\n        - name: ceph-etc\n          configMap:\n            name: {{ .Values.ceph_client.configmap }}\n        - name: ceph-keyring\n          secret:\n            secretName: {{ .Values.secrets.rbd | quote }}\n{{ if $mounts_gnocchi_metricd.volumes }}{{ toYaml $mounts_gnocchi_metricd.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/daemonset-statsd.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.daemonset_statsd }}\n{{- $envAll := . }}\n\n{{- $mounts_gnocchi_statsd := .Values.pod.mounts.gnocchi_statsd.gnocchi_statsd }}\n{{- $mounts_gnocchi_statsd_init := .Values.pod.mounts.gnocchi_statsd.init_container }}\n\n{{- $serviceAccountName := \"gnocchi-statsd\" }}\n{{ tuple $envAll \"statsd\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: gnocchi-statsd\n  labels:\n{{ tuple $envAll \"gnocchi\" \"metricd\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"gnocchi\" \"metricd\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"gnocchi\" \"metricd\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      nodeSelector:\n        {{ .Values.labels.statsd.node_selector_key }}: {{ .Values.labels.statsd.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.gnocchi.enabled }}\n{{ tuple $envAll \"gnocchi\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"statsd\" $mounts_gnocchi_statsd_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: ceph-keyring-placement\n{{ tuple $envAll \"gnocchi_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n          command:\n            - /tmp/ceph-keyring.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: gnocchi-bin\n              mountPath: /tmp/ceph-keyring.sh\n              subPath: ceph-keyring.sh\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n      containers:\n        - name: gnocchi-statsd\n{{ tuple $envAll \"gnocchi_statsd\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.statsd | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/gnocchi-statsd.sh\n          ports:\n            - name: gn-stats\n              containerPort: {{ tuple \"metric_statsd\" \"internal\" \"statsd\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.gnocchi.oslo_concurrency.lock_path }}\n            - name: pod-etc-gnocchi\n              mountPath: /etc/gnocchi\n            - name: gnocchi-etc\n              mountPath: /etc/gnocchi/gnocchi.conf\n              subPath: gnocchi.conf\n              readOnly: true\n            - name: gnocchi-etc\n              mountPath: /etc/gnocchi/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: gnocchi-etc\n              mountPath: /etc/gnocchi/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: gnocchi-bin\n              mountPath: /tmp/gnocchi-statsd.sh\n              subPath: gnocchi-statsd.sh\n              readOnly: true\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: ceph-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n{{ if $mounts_gnocchi_statsd.volumeMounts }}{{ toYaml $mounts_gnocchi_statsd.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-gnocchi\n          emptyDir: {}\n        - name: gnocchi-etc\n          secret:\n            secretName: gnocchi-etc\n            defaultMode: 0444\n        - name: gnocchi-bin\n          configMap:\n            name: gnocchi-bin\n            defaultMode: 0555\n        - name: etcceph\n          emptyDir: {}\n        - name: ceph-etc\n          configMap:\n            name: {{ .Values.ceph_client.configmap }}\n        - name: ceph-keyring\n          secret:\n            secretName: {{ .Values.secrets.rbd | quote }}\n{{ if $mounts_gnocchi_statsd.volumes }}{{ toYaml $mounts_gnocchi_statsd.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/deployment-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_api }}\n{{- $envAll := . }}\n\n{{- $mounts_gnocchi_api := .Values.pod.mounts.gnocchi_api.gnocchi_api }}\n{{- $mounts_gnocchi_api_init := .Values.pod.mounts.gnocchi_api.init_container }}\n\n{{- $serviceAccountName := \"gnocchi-api\" }}\n{{ tuple $envAll \"api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: gnocchi-api\n  labels:\n{{ tuple $envAll \"gnocchi\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"gnocchi\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"gnocchi\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"gnocchi\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.api.node_selector_key }}: {{ .Values.labels.api.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.gnocchi.enabled }}\n{{ tuple $envAll \"gnocchi\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.api.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"api\" $mounts_gnocchi_api_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: ceph-keyring-placement\n{{ tuple $envAll \"gnocchi_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n          command:\n            - /tmp/ceph-keyring.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: gnocchi-bin\n              mountPath: /tmp/ceph-keyring.sh\n              subPath: ceph-keyring.sh\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n      containers:\n        - name: gnocchi-api\n{{ tuple $envAll \"gnocchi_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/gnocchi-api.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/gnocchi-api.sh\n                  - stop\n          ports:\n            - name: gn-api\n              containerPort: {{ tuple \"metric\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            tcpSocket:\n              port: {{ tuple \"metric\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.gnocchi.oslo_concurrency.lock_path }}\n            - name: pod-etc-gnocchi\n              mountPath: /etc/gnocchi\n            - name: gnocchi-etc\n              mountPath: /etc/gnocchi/gnocchi.conf\n              subPath: gnocchi.conf\n              readOnly: true\n            {{- if .Values.conf.enable_paste }}\n            - name: gnocchi-etc\n              mountPath: /etc/gnocchi/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            {{- end }}\n            - name: gnocchi-etc\n              mountPath: /etc/gnocchi/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: gnocchi-etc\n              mountPath: /etc/apache2/conf-enabled/wsgi-gnocchi.conf\n              subPath: wsgi-gnocchi.conf\n              readOnly: true\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: ceph-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n            - name: gnocchi-bin\n              mountPath: /tmp/gnocchi-api.sh\n              subPath: gnocchi-api.sh\n              readOnly: true\n{{ if $mounts_gnocchi_api.volumeMounts }}{{ toYaml $mounts_gnocchi_api.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-gnocchi\n          emptyDir: {}\n        - name: gnocchi-etc\n          secret:\n            secretName: gnocchi-etc\n            defaultMode: 0444\n        - name: gnocchi-bin\n          configMap:\n            name: gnocchi-bin\n            defaultMode: 0555\n        - name: etcceph\n          emptyDir: {}\n        - name: ceph-etc\n          configMap:\n            name: {{ .Values.ceph_client.configmap }}\n        - name: ceph-keyring\n          secret:\n            secretName: {{ .Values.secrets.rbd | quote }}\n{{ if $mounts_gnocchi_api.volumes }}{{ toYaml $mounts_gnocchi_api.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "gnocchi/templates/ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_api .Values.network.api.ingress.public }}\n{{- $ingressOpts := dict \"envAll\" . \"backendServiceType\" \"metric\" \"backendPort\" \"gn-api\" -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $bootstrapJob := dict \"envAll\" . \"serviceName\" \"gnocchi\" \"keystoneUser\" .Values.bootstrap.ks_user -}}\n{{- if .Values.pod.tolerations.gnocchi.enabled -}}\n{{- $_ := set $bootstrapJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $bootstrapJob | include \"helm-toolkit.manifests.job_bootstrap\" }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/job-clean.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_clean }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := print \"gnocchi-clean\" }}\n{{ tuple $envAll \"clean\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - delete\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ print \"gnocchi-clean\" }}\n  labels:\n{{ tuple $envAll \"gnocchi\" \"clean\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": pre-delete\n    \"helm.sh/hook-delete-policy\": hook-succeeded\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"gnocchi\" \"clean\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.gnocchi.enabled }}\n{{ tuple $envAll \"gnocchi\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"clean\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: gnocchi-rbd-secret-clean\n{{ tuple $envAll \"gnocchi_storage_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.clean | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: RBD_POOL_SECRET\n              value: {{ .Values.secrets.rbd | quote }}\n          command:\n            - /tmp/clean-secrets.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: gnocchi-bin\n              mountPath: /tmp/clean-secrets.sh\n              subPath: clean-secrets.sh\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: gnocchi-bin\n          configMap:\n            name: gnocchi-bin\n            defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" \"gnocchi\" -}}\n{{- if .Values.pod.tolerations.gnocchi.enabled -}}\n{{- $_ := set $dbDropJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/job-db-init-indexer.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_init_indexer }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"gnocchi-db-init-indexer\" }}\n{{ tuple $envAll \"db_init_postgresql\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: gnocchi-db-init-indexer\n  labels:\n{{ tuple $envAll \"gnocchi\" \"db-init-indexer\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"gnocchi\" \"db-init-indexer\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.gnocchi.enabled }}\n{{ tuple $envAll \"gnocchi\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"db_init_postgresql\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: gnocchi-db-init-indexer\n{{ tuple $envAll \"db_init_indexer\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.db_init_indexer | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n            - name: ROOT_DB_CONNECTION\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.oslo_db_indexer.admin }}\n                  key: DB_CONNECTION_INDEXER\n            - name: OPENSTACK_CONFIG_FILE\n              value: /etc/gnocchi/gnocchi.conf\n            - name: OPENSTACK_CONFIG_DB_SECTION\n              value: indexer\n            - name: OPENSTACK_CONFIG_DB_KEY\n              value: url\n          command:\n            - /tmp/db-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: gnocchi-etc\n              mountPath: /etc/gnocchi/gnocchi.conf\n              subPath: gnocchi.conf\n            - name: pod-etc-gnocchi\n              mountPath: /etc/gnocchi\n            - name: gnocchi-bin\n              mountPath: /tmp/db-init.sh\n              subPath: db-init.sh\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: gnocchi-etc\n          secret:\n            secretName: gnocchi-etc\n            defaultMode: 0444\n        - name: pod-etc-gnocchi\n          emptyDir: {}\n        - name: gnocchi-bin\n          configMap:\n            name: gnocchi-bin\n            defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"gnocchi\" -}}\n{{- if .Values.pod.tolerations.gnocchi.enabled -}}\n{{- $_ := set $dbInitJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"gnocchi-db-sync\" }}\n{{ tuple $envAll \"db_sync\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: gnocchi-db-sync\n  labels:\n{{ tuple $envAll \"gnocchi\" \"db-sync\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"gnocchi\" \"db-sync\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.gnocchi.enabled }}\n{{ tuple $envAll \"gnocchi\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"db_sync\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: ceph-keyring-placement\n{{ tuple $envAll \"gnocchi_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n          command:\n            - /tmp/ceph-keyring.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: gnocchi-bin\n              mountPath: /tmp/ceph-keyring.sh\n              subPath: ceph-keyring.sh\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n      containers:\n        - name: gnocchi-db-sync\n{{ tuple $envAll \"db_sync\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.db_sync | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/db-sync.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: gnocchi-etc\n              mountPath: /etc/gnocchi/gnocchi.conf\n              subPath: gnocchi.conf\n            - name: gnocchi-bin\n              mountPath: /tmp/db-sync.sh\n              subPath: db-sync.sh\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: ceph-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: gnocchi-etc\n          secret:\n            secretName: gnocchi-etc\n            defaultMode: 0444\n        - name: gnocchi-bin\n          configMap:\n            name: gnocchi-bin\n            defaultMode: 0555\n        - name: etcceph\n          emptyDir: {}\n        - name: ceph-etc\n          configMap:\n            name: {{ .Values.ceph_client.configmap }}\n        - name: ceph-keyring\n          secret:\n            secretName: {{ .Values.secrets.rbd | quote }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"gnocchi\" -}}\n{{- if .Values.pod.tolerations.gnocchi.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksEndpointsJob := dict \"envAll\" . \"serviceName\" \"gnocchi\" \"serviceTypes\" ( tuple \"metric\" ) -}}\n{{- if .Values.pod.tolerations.gnocchi.enabled -}}\n{{- $_ := set $ksEndpointsJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksEndpointsJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"gnocchi\" \"serviceTypes\" ( tuple \"metric\" ) -}}\n{{- if .Values.pod.tolerations.gnocchi.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"gnocchi\" -}}\n{{- if .Values.pod.tolerations.gnocchi.enabled -}}\n{{- $_ := set $ksUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/job-storage-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_storage_init }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"gnocchi-storage-init\" }}\n{{ tuple $envAll \"storage_init\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - create\n      - update\n      - patch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: gnocchi-storage-init\n  labels:\n{{ tuple $envAll \"gnocchi\" \"storage-init\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"gnocchi\" \"storage-init\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.gnocchi.enabled }}\n{{ tuple $envAll \"gnocchi\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"storage_init\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: ceph-keyring-placement\n{{ tuple $envAll \"gnocchi_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n          securityContext:\n            runAsUser: {{ .Values.pod.user.gnocchi.uid }}\n          command:\n            - /tmp/ceph-admin-keyring.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: gnocchi-bin\n              mountPath: /tmp/ceph-admin-keyring.sh\n              subPath: ceph-admin-keyring.sh\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n      containers:\n        - name: gnocchi-storage-init\n{{ tuple $envAll \"gnocchi_storage_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.storage_init | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: RBD_POOL_NAME\n              value: {{ .Values.conf.gnocchi.storage.ceph_pool | quote }}\n            - name: RBD_POOL_USER\n              value: {{ .Values.conf.gnocchi.storage.ceph_username | quote }}\n            - name: RBD_POOL_CHUNK_SIZE\n              value: \"8\"\n            - name: RBD_POOL_SECRET\n              value: {{ .Values.secrets.rbd | quote }}\n          command:\n            - /tmp/storage-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: gnocchi-bin\n              mountPath: /tmp/storage-init.sh\n              subPath: storage-init.sh\n              readOnly: true\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: ceph-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: gnocchi-bin\n          configMap:\n            name: gnocchi-bin\n            defaultMode: 0555\n        - name: etcceph\n          emptyDir: {}\n        - name: ceph-etc\n          configMap:\n            name: {{ .Values.ceph_client.configmap }}\n            defaultMode: 0444\n        - name: ceph-keyring\n          secret:\n            secretName: {{ .Values.ceph_client.user_secret_name }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/pdb-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_api }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: gnocchi-api\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.api.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"gnocchi\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/pod-gnocchi-test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pod_gnocchi_test }}\n{{- $envAll := . }}\n\n{{- $mounts_gnocchi_tests := .Values.pod.mounts.gnocchi_tests.gnocchi_tests }}\n{{- $mounts_gnocchi_tests_init := .Values.pod.mounts.gnocchi_tests.init_container }}\n\n{{- $serviceAccountName := print .Release.Name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: \"{{.Release.Name}}-test\"\n  labels:\n{{ tuple $envAll \"gnocchi\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\nspec:\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.gnocchi.enabled }}\n{{ tuple $envAll \"gnocchi\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 2 }}\n{{ end }}\n  serviceAccountName: {{ $serviceAccountName }}\n  restartPolicy: Never\n  initContainers:\n{{ tuple $envAll \"tests\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n  containers:\n    - name: {{.Release.Name}}-helm-tests\n{{ tuple $envAll \"gnocchi_api\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n        - name: OS_AUTH_TYPE\n          valueFrom:\n            secretKeyRef:\n              name: {{ $.Values.secrets.identity.admin }}\n              key: OS_AUTH_TYPE\n        - name: OS_TENANT_NAME\n          valueFrom:\n            secretKeyRef:\n              name: {{ $.Values.secrets.identity.admin }}\n              key: OS_TENANT_NAME\n{{- end }}\n      command:\n        - /tmp/gnocchi-test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: gnocchi-etc\n          mountPath: /etc/gnocchi/gnocchi.conf\n          subPath: gnocchi.conf\n          readOnly: true\n        - name: gnocchi-bin\n          mountPath: /tmp/gnocchi-test.sh\n          subPath: gnocchi-test.sh\n          readOnly: true\n{{ if $mounts_gnocchi_tests.volumeMounts }}{{ toYaml $mounts_gnocchi_tests.volumeMounts | indent 8 }}{{ end }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: gnocchi-etc\n      secret:\n        secretName: gnocchi-etc\n        defaultMode: 0444\n    - name: gnocchi-bin\n      configMap:\n        name: gnocchi-bin\n        defaultMode: 0555\n{{ if $mounts_gnocchi_tests.volumes }}{{ toYaml $mounts_gnocchi_tests.volumes | indent 4 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/secret-db-indexer.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db_indexer }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"gnocchi\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db_indexer $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n  DB_CONNECTION_INDEXER: {{ tuple \"oslo_db_postgresql\" \"internal\" $userClass \"postgresql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"gnocchi\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n  DB_CONNECTION: {{ tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nCopyright 2019 Wind River Systems, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{- include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"metric\" ) }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"gnocchi\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n{{- $auth := index $envAll.Values.endpoints.identity.auth $userClass }}\n{{ $osAuthType := $auth.os_auth_type }}\n{{ $osTenantName := $auth.os_tenant_name }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 }}\n  OS_AUTH_TYPE: {{ $osAuthType  | b64enc }}\n  OS_TENANT_NAME: {{ $osTenantName | b64enc }}\n{{ end }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/service-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_api }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"metric\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: gn-api\n      port: {{ tuple \"metric\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.api.node_port.enabled }}\n      nodePort: {{ .Values.network.api.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"gnocchi\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.api.node_port.enabled }}\n  type: NodePort\n  {{ if .Values.network.api.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{ end }}\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/service-ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_api .Values.network.api.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"metric\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/templates/service-statsd.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_statsd }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"metric_statsd\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: gn-stats\n      port: {{ tuple \"metric_statsd\" \"internal\" \"statsd\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.statsd.node_port.enabled }}\n      nodePort: {{ .Values.network.statsd.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"gnocchi\" \"statsd\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.statsd.node_port.enabled }}\n  type: NodePort\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "gnocchi/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for gnocchi.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  metricd:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  statsd:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nrelease_group: null\n\nimages:\n  tags:\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    gnocchi_storage_init: quay.io/airshipit/ceph-config-helper:ubuntu_jammy_20.2.1-1-20260407\n    db_init_indexer: docker.io/library/postgres:9.5\n    # using non-kolla images until kolla supports postgres as\n    # an indexer\n    db_init: quay.io/attcomdev/ubuntu-source-gnocchi-api:3.0.3\n    db_sync: quay.io/attcomdev/ubuntu-source-gnocchi-api:3.0.3\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    gnocchi_api: quay.io/attcomdev/ubuntu-source-gnocchi-api:3.0.3\n    gnocchi_statsd: quay.io/attcomdev/ubuntu-source-gnocchi-statsd:3.0.3\n    gnocchi_metricd: quay.io/attcomdev/ubuntu-source-gnocchi-metricd:3.0.3\n    gnocchi_resources_cleaner: quay.io/attcomdev/ubuntu-source-gnocchi-base:3.0.3\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\njobs:\n  resources_cleaner:\n    # daily\n    cron: \"0 */24 * * *\"\n    deleted_resources_ttl: '1day'\n    history:\n      success: 3\n      failed: 1\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 8041\n  statsd:\n    node_port:\n      enabled: false\n      port: 8125\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - gnocchi-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    api:\n      jobs:\n        - gnocchi-storage-init\n        - gnocchi-db-sync\n        - gnocchi-ks-endpoints\n        - gnocchi-ks-service\n        - gnocchi-ks-user\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_db\n    clean:\n      services: null\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_init_postgresql:\n      jobs: null\n      services:\n        - endpoint: internal\n          service: oslo_db_postgresql\n    db_sync:\n      jobs:\n        - gnocchi-storage-init\n        - gnocchi-db-init\n        - gnocchi-db-init-indexer\n      services:\n        - endpoint: internal\n          service: oslo_db_postgresql\n    ks_endpoints:\n      jobs:\n        - gnocchi-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    metricd:\n      jobs:\n        - gnocchi-storage-init\n        - gnocchi-db-sync\n        - gnocchi-ks-user\n        - gnocchi-ks-service\n        - gnocchi-ks-endpoints\n      services:\n        - endpoint: internal\n          service: oslo_db_postgresql\n        - endpoint: internal\n          service: metric\n    statsd:\n      jobs:\n        - gnocchi-storage-init\n        - gnocchi-db-sync\n        - gnocchi-ks-user\n        - gnocchi-ks-service\n        - gnocchi-ks-endpoints\n      services:\n        - endpoint: internal\n          service: oslo_db_postgresql\n        - endpoint: internal\n          service: metric\n    resources_cleaner:\n      jobs:\n        - gnocchi-storage-init\n        - gnocchi-db-sync\n        - gnocchi-ks-user\n        - gnocchi-ks-endpoints\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: metric\n    storage_init:\n      services: null\n    tests:\n      jobs:\n        - gnocchi-storage-init\n        - gnocchi-db-sync\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_db_postgresql\n        - endpoint: internal\n          service: metric\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\npod:\n  user:\n    gnocchi:\n      uid: 1000\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    gnocchi:\n      enabled: false\n      tolerations:\n        - key: node-role.kubernetes.io/master\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/control-plane\n          operator: Exists\n          effect: NoSchedule\n  mounts:\n    gnocchi_api:\n      init_container: null\n      gnocchi_api:\n    gnocchi_statsd:\n      init_container: null\n      gnocchi_statsd:\n    gnocchi_metricd:\n      init_container: null\n      gnocchi_metricd:\n    gnocchi_resources_cleaner:\n      init_container: null\n      gnocchi_resources_cleaner:\n    gnocchi_tests:\n      init_container: null\n      gnocchi_tests:\n  replicas:\n    api: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        metricd:\n          enabled: false\n          min_ready_seconds: 0\n          max_unavailable: 1\n        statsd:\n          enabled: false\n          min_ready_seconds: 0\n          max_unavailable: 1\n    disruption_budget:\n      api:\n        min_available: 0\n    termination_grace_period:\n      api:\n        timeout: 30\n  resources:\n    enabled: false\n    api:\n      requests:\n        memory: \"124Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    statsd:\n      requests:\n        memory: \"124Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    metricd:\n      requests:\n        memory: \"124Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      clean:\n        requests:\n          memory: \"124Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"124Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"124Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"124Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"124Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"124Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      resources_cleaner:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"124Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nconf:\n  apache: |\n    Listen 0.0.0.0:{{ tuple \"metric\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n\n    SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n    CustomLog /dev/stdout combined env=!forwarded\n    CustomLog /dev/stdout proxy env=forwarded\n\n    <VirtualHost *:{{ tuple \"metric\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}>\n        WSGIDaemonProcess gnocchi processes=1 threads=2 user=gnocchi group=gnocchi display-name=%{GROUP}\n        WSGIProcessGroup gnocchi\n        WSGIScriptAlias / \"/var/lib/kolla/venv/lib/python2.7/site-packages/gnocchi/rest/app.wsgi\"\n        WSGIApplicationGroup %{GLOBAL}\n\n        ErrorLog /dev/stderr\n        SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n        CustomLog /dev/stdout combined env=!forwarded\n        CustomLog /dev/stdout proxy env=forwarded\n\n        <Directory \"/var/lib/kolla/venv/lib/python2.7/site-packages/gnocchi/rest\">\n              Require all granted\n        </Directory>\n    </VirtualHost>\n  ceph:\n    monitors: []\n    admin_keyring: null\n    override:\n    append:\n  enable_paste: True\n  paste:\n    pipeline:main:\n      pipeline: gnocchi+auth\n    composite:gnocchi+noauth:\n      use: egg:Paste#urlmap\n      /: gnocchiversions\n      /v1: gnocchiv1+noauth\n    composite:gnocchi+auth:\n      use: egg:Paste#urlmap\n      /: gnocchiversions\n      /v1: gnocchiv1+auth\n    pipeline:gnocchiv1+noauth:\n      pipeline: gnocchiv1\n    pipeline:gnocchiv1+auth:\n      pipeline: keystone_authtoken gnocchiv1\n    app:gnocchiversions:\n      paste.app_factory: gnocchi.rest.app:app_factory\n      root: gnocchi.rest.VersionsController\n    app:gnocchiv1:\n      paste.app_factory: gnocchi.rest.app:app_factory\n      root: gnocchi.rest.V1Controller\n    filter:keystone_authtoken:\n      paste.filter_factory: keystonemiddleware.auth_token:filter_factory\n      oslo_config_project: gnocchi\n  policy:\n    admin_or_creator: 'role:admin or project_id:%(created_by_project_id)s'\n    resource_owner: 'project_id:%(project_id)s'\n    metric_owner: 'project_id:%(resource.project_id)s'\n    get status: 'role:admin'\n    create resource: ''\n    get resource: 'rule:admin_or_creator or rule:resource_owner'\n    update resource: 'rule:admin_or_creator'\n    delete resource: 'rule:admin_or_creator'\n    delete resources: 'rule:admin_or_creator'\n    list resource: 'rule:admin_or_creator or rule:resource_owner'\n    search resource: 'rule:admin_or_creator or rule:resource_owner'\n    create resource type: 'role:admin'\n    delete resource type: 'role:admin'\n    update resource type: 'role:admin'\n    list resource type: ''\n    get resource type: ''\n    get archive policy: ''\n    list archive policy: ''\n    create archive policy: 'role:admin'\n    update archive policy: 'role:admin'\n    delete archive policy: 'role:admin'\n    create archive policy rule: 'role:admin'\n    get archive policy rule: ''\n    list archive policy rule: ''\n    delete archive policy rule: 'role:admin'\n    create metric: ''\n    delete metric: 'rule:admin_or_creator'\n    get metric: 'rule:admin_or_creator or rule:metric_owner'\n    search metric: 'rule:admin_or_creator or rule:metric_owner'\n    list metric: ''\n    list all metric: 'role:admin'\n    get measures: 'rule:admin_or_creator or rule:metric_owner'\n    post measures: 'rule:admin_or_creator'\n  gnocchi:\n    DEFAULT:\n      debug: false\n    token:\n      provider: uuid\n    api:\n      auth_mode: keystone\n      # NOTE(portdirect): the bind port should not be defined, and is manipulated\n      # via the endpoints section.\n      port: null\n    statsd:\n      # NOTE(portdirect): the bind port should not be defined, and is manipulated\n      # via the endpoints section.\n      port: null\n    metricd:\n      workers: 1\n    database:\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    storage:\n      driver: ceph\n      ceph_pool: gnocchi.metrics\n      ceph_username: gnocchi\n      ceph_keyring: /etc/ceph/ceph.client.gnocchi.keyring\n      ceph_conffile: /etc/ceph/ceph.conf\n      file_basepath: /var/lib/gnocchi\n      provided_keyring: null\n    indexer:\n      driver: postgresql\n      # -- Indexer connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db (or oslo_db_postgresql). Set to null to\n      ## disable auto-generation.\n      url: \"\"\n    keystone_authtoken:\n      auth_type: password\n      auth_version: v3\n      memcache_security_strategy: ENCRYPT\n    oslo_concurrency:\n      lock_path: /var/lock\n\nceph_client:\n  configmap: ceph-etc\n  user_secret_name: pvc-ceph-client-key\n\nsecrets:\n  identity:\n    admin: gnocchi-keystone-admin\n    gnocchi: gnocchi-keystone-user\n  oslo_db:\n    admin: gnocchi-db-admin\n    gnocchi: gnocchi-db-user\n  oslo_db_indexer:\n    admin: gnocchi-db-indexer-admin\n    gnocchi: gnocchi-db-indexer-user\n  rbd: gnocchi-rbd-keyring\n  tls:\n    metric:\n      api:\n        public: gnocchi-tls-public\n\nbootstrap:\n  enabled: false\n  ks_user: gnocchi\n  script: |\n    openstack token issue\n\n# typically overridden by environmental\n# values, but should include all endpoints\n# required by this chart\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  identity:\n    name: keystone\n    auth:\n      admin:\n        username: \"admin\"\n        user_domain_name: \"default\"\n        password: \"password\"\n        project_name: \"admin\"\n        project_domain_name: \"default\"\n        region_name: \"RegionOne\"\n        os_auth_type: \"password\"\n        os_tenant_name: \"admin\"\n      gnocchi:\n        username: \"gnocchi\"\n        role: \"admin\"\n        password: \"password\"\n        project_name: \"service\"\n        region_name: \"RegionOne\"\n        os_auth_type: \"password\"\n        os_tenant_name: \"service\"\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 80\n        internal: 5000\n  metric:\n    name: gnocchi\n    hosts:\n      default: gnocchi-api\n      public: gnocchi\n    host_fqdn_override:\n      default: null\n      # NOTE: this chart supports TLS for fqdn over-ridden public\n      # endpoints using the following format:\n      # public:\n      #   host: null\n      #   tls:\n      #     crt: null\n      #     key: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 8041\n        public: 80\n  metric_statsd:\n    name: gnocchi-statsd\n    hosts:\n      default: gnocchi-statsd\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: null\n    port:\n      statsd:\n        default: 8125\n  oslo_db_postgresql:\n    auth:\n      admin:\n        username: postgres\n        password: password\n      gnocchi:\n        username: gnocchi\n        password: password\n    hosts:\n      default: postgresql\n    host_fqdn_override:\n      default: null\n    path: /gnocchi\n    scheme: postgresql\n    port:\n      postgresql:\n        default: 5432\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n      gnocchi:\n        username: gnocchi\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /gnocchi\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_cache:\n    auth:\n      # NOTE(portdirect): this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  cron_job_resources_cleaner: true\n  daemonset_metricd: true\n  daemonset_statsd: true\n  deployment_api: true\n  ingress_api: true\n  job_bootstrap: true\n  job_clean: true\n  job_db_drop: false\n  job_db_init_indexer: true\n  job_db_init: true\n  job_image_repo_sync: true\n  secret_db_indexer: true\n  job_db_sync: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  job_storage_init: true\n  pdb_api: true\n  pod_gnocchi_test: true\n  secret_db: true\n  secret_keystone: true\n  secret_ingress_tls: true\n  service_api: true\n  service_ingress_api: true\n  service_statsd: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "grafana/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v12.4.2\ndescription: OpenStack-Helm Grafana\nname: grafana\nversion: 2025.2.0\nhome: https://grafana.com/\nsources:\n  - https://github.com/grafana/grafana\n  - https://opendev.org/openstack/openstack-helm-addons\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "grafana/templates/bin/_db-session-sync.py.tpl",
    "content": "#!/usr/bin/env python\n\n# Creates db and user for an OpenStack Service:\n# Set ROOT_DB_CONNECTION and DB_CONNECTION environment variables to contain\n# SQLAlchemy strings for the root connection to the database and the one you\n# wish the service to use. Alternatively, you can use an ini formatted config\n# at the location specified by OPENSTACK_CONFIG_FILE, and extract the string\n# from the key OPENSTACK_CONFIG_DB_KEY, in the section specified by\n# OPENSTACK_CONFIG_DB_SECTION.\n\nimport os\nimport sys\nimport logging\nfrom sqlalchemy import create_engine, text\n\n# Create logger, console handler and formatter\nlogger = logging.getLogger('OpenStack-Helm DB Init')\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(lineno)d - %(funcName)s - %(message)s')\n\n# Set the formatter and add the handler\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n# Get the connection string for the service db\nif \"DB_CONNECTION\" in os.environ:\n    user_db_conn = os.environ['DB_CONNECTION']\n    logger.info('Got config from DB_CONNECTION env var')\nelse:\n    logger.critical('Could not get db config, either from config file or env var')\n    sys.exit(1)\n\n# User DB engine\ntry:\n    user_engine = create_engine(user_db_conn)\n    # Get our user data out of the user_engine\n    database = user_engine.url.database\n    user = user_engine.url.username\n    password = user_engine.url.password\n    host = user_engine.url.host\n    port = user_engine.url.port\n    logger.info('Got user db config')\nexcept:\n    logger.critical('Could not get user database config')\n    raise\n\n# Test connection\ntry:\n    connection = user_engine.connect()\n    connection.close()\n    logger.info(\"Tested connection to DB @ {0}:{1}/{2} as {3}\".format(\n        host, port, database, user))\nexcept:\n    logger.critical('Could not connect to database as user')\n    raise\n\n# Create Table\ntry:\n    with user_engine.connect() as conn:\n        conn.execute(text('''CREATE TABLE IF NOT EXISTS `session` (\n                            `key` CHAR(16) NOT NULL,\n                            `data` BLOB,\n                            `expiry` INT(11) UNSIGNED NOT NULL,\n                            PRIMARY KEY (`key`)\n                            ) ENGINE=MyISAM DEFAULT CHARSET=utf8;'''))\n        try:\n            conn.commit()\n        except AttributeError:\n            pass\n    logger.info('Created table for session cache')\nexcept Exception as e:\n    logger.critical(f'Could not create table for session cache: {e}')\n    raise\n"
  },
  {
    "path": "grafana/templates/bin/_grafana.sh.tpl",
    "content": "#!/bin/bash\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -exo pipefail\nCOMMAND=\"${@:-start}\"\nPORT={{ tuple \"grafana\" \"internal\" \"grafana\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\nPIDFILE=/tmp/pid\nDB_HOST={{ tuple \"oslo_db\" \"direct\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\nDB_PORT={{ tuple \"oslo_db\" \"direct\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\nMYSQL_PARAMS=\" \\\n  --defaults-file=/tmp/my.cnf \\\n  --host=${DB_HOST} \\\n  --port=${DB_PORT}\n{{- if .Values.manifests.certificates }}\n  --ssl-verify-server-cert=false \\\n  --ssl-ca=/etc/mysql/certs/ca.crt \\\n  --ssl-key=/etc/mysql/certs/tls.key \\\n  --ssl-cert=/etc/mysql/certs/tls.crt \\\n{{- end }}\n  \"\n\nfunction start () {\n  exec /usr/share/grafana/bin/grafana server -homepath=/usr/share/grafana -config=/etc/grafana/grafana.ini --pidfile=\"$PIDFILE\"\n}\n\nfunction run_migrator () {\n  BACKUP_FILE=$(mktemp)\n  LOG_FILE=$(mktemp)\n  STOP_FLAG=$(mktemp)\n  echo \"Making sure the database is reachable....\"\n  set +e\n  until mariadb ${MYSQL_PARAMS} grafana -e \"select 1;\"\n  do\n    echo \\\"Database ${DB_HOST} is not reachable. Sleeping for 10 seconds...\\\"\n    sleep 10\n  done\n  set -e\n  echo \"Preparing initial database backup...\"\n  mariadb-dump ${MYSQL_PARAMS} --add-drop-table --quote-names grafana > \"${BACKUP_FILE}\"\n  echo \"Backup SQL file ${BACKUP_FILE}\"\n  ls -lh \"${BACKUP_FILE}\"\n  {\n    # this is the background process that re-starts Grafana server\n    # in order to process grafana database migration\n    set +e\n    while true\n    do\n      start 2>&1 | tee \"$LOG_FILE\"\n      sleep 10\n      echo \"Restarting Grafana server...\"\n      stop\n      echo \"Emptying log file...\"\n      echo > \"$LOG_FILE\"\n      while [ -f ${STOP_FLAG} ]\n      do\n        echo \"Lock file still exists - ${STOP_FLAG}...\"\n        ls -la ${STOP_FLAG}\n        echo \"Waiting for lock file to get removed...\"\n        sleep 5\n      done\n      echo \"Lock file is removed, proceeding with grafana re-start..\"\n    done\n    set -e\n  } &\n  until cat \"${LOG_FILE}\" | grep -E \"migrations completed\"\n  do\n    echo \"The migrations are not completed yet...\"\n    if cat \"${LOG_FILE}\" | grep -E \"migration failed\"\n    then\n      echo \"Locking server restart by placing a flag file ${STOP_FLAG} ..\"\n      touch \"${STOP_FLAG}\"\n      echo \"Migration failure has been detected. Stopping Grafana server...\"\n      set +e\n      stop\n      set -e\n      echo \"Making sure the database is reachable....\"\n      set +e\n      until mariadb ${MYSQL_PARAMS} grafana -e \"select 1;\"\n      do\n        echo \\\"Database ${DB_HOST} is not reachable. Sleeping for 10 seconds...\\\"\n        sleep 10\n      done\n      set -e\n      echo \"Cleaning the database...\"\n      TABLES=$(\n        mariadb ${MYSQL_PARAMS} grafana -e \"show tables\\G;\" | grep Tables | cut -d \" \" -f 2\n      )\n      for TABLE in ${TABLES}\n      do\n        echo ${TABLE}\n        mariadb ${MYSQL_PARAMS} grafana -e \"drop table ${TABLE};\"\n      done\n      echo \"Restoring the database backup...\"\n      mariadb ${MYSQL_PARAMS} grafana < \"${BACKUP_FILE}\"\n      echo \"Removing lock file ${STOP_FLAG} ...\"\n      rm -f \"${STOP_FLAG}\"\n      echo \"${STOP_FLAG} has been removed\"\n    fi\n    sleep 10\n  done\n  stop\n  rm -f \"${BACKUP_FILE}\"\n}\n\nfunction stop () {\n  if [ -f \"$PIDFILE\" ]; then\n    echo -e \"Found pidfile, killing running Grafana server\"\n    kill -9 `cat $PIDFILE`\n    rm $PIDFILE\n  else\n    kill -TERM 1\n  fi\n}\n\n$COMMAND\n"
  },
  {
    "path": "grafana/templates/bin/_selenium-tests.py.tpl",
    "content": "#!/usr/bin/env python3\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nimport logging\nimport os\nimport sys\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.chrome.options import Options\n{{- if .Values.selenium_v4 }}\nfrom selenium.webdriver.chrome.service import Service\n{{- end }}\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import NoSuchElementException\n\n# Create logger, console handler and formatter\nlogger = logging.getLogger('Grafana Selenium Tests')\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\n    '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n)\n\n# Set the formatter and add the handler\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\ndef get_variable(env_var):\n    if env_var in os.environ:\n        logger.info('Found \"{}\"'.format(env_var))\n        return os.environ[env_var]\n    else:\n        logger.critical('Variable \"{}\" is not defined!'.format(env_var))\n        sys.exit(1)\n\nusername = get_variable('GRAFANA_USER')\npassword = get_variable('GRAFANA_PASSWORD')\ngrafana_uri = get_variable('GRAFANA_URI')\n\nchrome_driver = '/etc/selenium/chromedriver'\noptions = Options()\noptions.add_argument('--headless=new')\noptions.add_argument('--no-sandbox')\noptions.add_argument('--window-size=1920x1080')\n\n{{- if .Values.selenium_v4 }}\nservice = Service(executable_path=chrome_driver)\nbrowser = webdriver.Chrome(service=service, options=options)\n{{- else }}\nbrowser = webdriver.Chrome(chrome_driver, chrome_options=options)\n{{- end }}\n\nlogin_url = grafana_uri.rstrip('/') + '/login'\nlogger.info(\"Attempting to open Grafana login page at {}\".format(login_url))\ntry:\n    browser.get(login_url)\n    WebDriverWait(browser, 30).until(\n        EC.presence_of_element_located((By.NAME, 'user'))\n    )\n    WebDriverWait(browser, 30).until(\n        EC.presence_of_element_located((By.NAME, 'password'))\n    )\n    logger.info('Grafana login form is ready')\nexcept TimeoutException:\n    logger.critical('Timed out waiting for Grafana login form')\n    browser.quit()\n    sys.exit(1)\n\nlogger.info(\"Attempting to log into Grafana dashboard\")\ntry:\n{{- if .Values.selenium_v4 }}\n    browser.find_element(By.NAME, 'user').send_keys(username)\n    browser.find_element(By.NAME, 'password').send_keys(password)\n    browser.find_element(By.CSS_SELECTOR, '[type=\"submit\"]').click()\n{{- else }}\n    browser.find_element_by_name('user').send_keys(username)\n    browser.find_element_by_name('password').send_keys(password)\n    browser.find_element_by_css_selector('[type=\"submit\"]').click()\n{{- end }}\n    logger.info(\"Successfully logged in to Grafana\")\nexcept NoSuchElementException:\n    logger.error(\"Failed to log in to Grafana\")\n    browser.quit()\n    sys.exit(1)\n\nbrowser.quit()\n"
  },
  {
    "path": "grafana/templates/bin/_set-admin-password.sh.tpl",
    "content": "#!/bin/bash\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\necho \"Attempting to update Grafana admin user password\"\ngrafana-cli --homepath \"/usr/share/grafana\" --config /etc/grafana/grafana.ini admin reset-admin-password ${GF_SECURITY_ADMIN_PASSWORD}\n\nif [ \"$?\" == 1 ]; then\n  echo \"The Grafana admin user does not exist yet, so no need to update password\"\n  exit 0;\nelse\n  exit 0;\nfi\n"
  },
  {
    "path": "grafana/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: grafana-bin\ndata:\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-session-sync.py: |\n{{ tuple \"bin/_db-session-sync.py.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n  grafana.sh: |\n{{ tuple \"bin/_grafana.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  selenium-tests.py: |\n{{ tuple \"bin/_selenium-tests.py.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  set-admin-password.sh: |\n{{ tuple \"bin/_set-admin-password.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "grafana/templates/configmap-dashboards.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_dashboards }}\n{{ range $group, $dashboards := .Values.conf.dashboards }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: grafana-dashboards-{{$group}}\ndata:\n{{ range $key, $value := $dashboards }}\n  {{$key}}.json: {{ $value | toJson }}\n{{ end }}\n{{ end }}\n{{- end }}\n"
  },
  {
    "path": "grafana/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.grafana.database.url)) (empty .Values.conf.grafana.database.url) (not (eq .Values.conf.grafana.database.type \"sqlite3\") ) -}}\n\n{{- $url := tuple \"oslo_db\" \"internal\" \"user\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | replace \"mysql+pymysql://\" \"mysql://\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := (printf \"%s?charset=utf8\" $url ) | set .Values.conf.grafana.database \"url\" -}}\n{{- $_ := tuple \"oslo_db\" \"internal\" . | include \"helm-toolkit.endpoints.endpoint_host_lookup\" | set .Values.conf.grafana.database \"server_cert_name\" -}}\n{{- else -}}\n{{- $_ := set .Values.conf.grafana.database \"url\" $url -}}\n{{- end -}}\n{{- end -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: grafana-etc\ntype: Opaque\ndata:\n  dashboards.yaml: {{ toYaml .Values.conf.provisioning.dashboards | b64enc }}\n  grafana.ini: {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.grafana | b64enc }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.provisioning.datasources.template \"key\" \"datasources.yaml\" \"format\" \"Secret\") | indent 2 }}\n{{ if not (empty .Values.conf.ldap) }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.ldap.template \"key\" \"ldap.toml\" \"format\" \"Secret\") | indent 2 }}\n{{ end }}\n{{- end }}\n"
  },
  {
    "path": "grafana/templates/deployment.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment }}\n{{- $envAll := . }}\n\n{{- $mounts_grafana := .Values.pod.mounts.grafana.grafana }}\n\n{{- $serviceAccountName := \"grafana\" }}\n{{ tuple $envAll \"grafana\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: grafana\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"grafana\" \"dashboard\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.grafana }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"grafana\" \"dashboard\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"grafana\" \"dashboard\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"grafana\" \"containerNames\" (list \"grafana\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"dashboard\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"grafana\" \"dashboard\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.grafana.node_selector_key }}: {{ .Values.labels.grafana.node_selector_value | quote }}\n      initContainers:\n{{ tuple $envAll \"grafana\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n{{- if and .Values.conf.grafana.image_rendering_sidecar.enabled .Values.conf.grafana.image_rendering_sidecar.k8s_sidecar_feature_enabled }}\n        - name: grafana-image-renderer\n{{ tuple $envAll \"grafana_image_renderer\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n          restartPolicy: Always\n          ports:\n            - containerPort: {{ tuple \"grafana\" \"image_rendering\" \"grafana\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            tcpSocket:\n              port: {{ tuple \"grafana\" \"image_rendering\" \"grafana\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 15\n            periodSeconds: 10\n{{- end }}\n      containers:\n{{- if and .Values.conf.grafana.image_rendering_sidecar.enabled (not .Values.conf.grafana.image_rendering_sidecar.k8s_sidecar_feature_enabled) }}\n        - name: grafana-image-renderer\n{{ tuple $envAll \"grafana_image_renderer\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n          ports:\n            - containerPort: {{ tuple \"grafana\" \"image_rendering\" \"grafana\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            tcpSocket:\n              port: {{ tuple \"grafana\" \"image_rendering\" \"grafana\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 15\n            periodSeconds: 10\n{{- end }}\n        - name: grafana\n{{ tuple $envAll \"grafana\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.grafana | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"dashboard\" \"container\" \"grafana\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/grafana.sh\n            - start\n          ports:\n            - name: dashboard\n              containerPort: {{ tuple \"grafana\" \"internal\" \"grafana\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            httpGet:\n              path: /login\n              port: {{ tuple \"grafana\" \"internal\" \"grafana\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 30\n            timeoutSeconds: 30\n          env:\n            - name: GF_SECURITY_ADMIN_USER\n              valueFrom:\n                secretKeyRef:\n                  name: grafana-admin-creds\n                  key: GRAFANA_ADMIN_USERNAME\n            - name: GF_SECURITY_ADMIN_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: grafana-admin-creds\n                  key: GRAFANA_ADMIN_PASSWORD\n            - name: PROMETHEUS_URL\n              value: {{ tuple \"monitoring\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" }}\n{{- if .Values.manifests.certificates }}\n            - name: CACERT\n              valueFrom:\n                secretKeyRef:\n                  key: ca.crt\n                  name: prometheus-tls-api\n{{- end }}\n{{- if .Values.conf.grafana.image_rendering_sidecar.enabled }}\n            - name: GF_RENDERING_SERVER_URL\n              value: \"http://localhost:{{ tuple \"grafana\" \"image_rendering\" \"grafana\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/render\"\n            - name: GF_RENDERING_CALLBACK_URL\n              value: \"http://localhost:{{ tuple \"grafana\" \"internal\" \"grafana\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\"\n{{- end }}\n{{- if .Values.pod.env.grafana }}\n{{ include \"helm-toolkit.utils.to_k8s_env_vars\" .Values.pod.env.grafana | indent 12 }}\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-grafana\n              mountPath: /etc/grafana\n            - name: pod-screenshots-grafana\n              mountPath: /var/lib/grafana/png\n            - name: pod-pdf-grafana\n              mountPath: /var/lib/grafana/pdf\n            - name: pod-dashboards-grafana\n              mountPath: /etc/grafana/dashboards\n            - name: pod-provisioning-grafana\n              mountPath: {{ .Values.conf.grafana.paths.provisioning }}\n            - name: pod-alerting-grafana\n              mountPath: {{ .Values.conf.grafana.paths.alerting }}\n            - name: pod-csv-grafana\n              mountPath: {{ .Values.conf.grafana.paths.csv }}\n            - name: grafana-bin\n              mountPath: /tmp/grafana.sh\n              subPath: grafana.sh\n              readOnly: true\n            - name: grafana-etc\n              mountPath: {{ .Values.conf.grafana.paths.provisioning }}/dashboards/dashboards.yaml\n              subPath: dashboards.yaml\n            - name: grafana-etc\n              mountPath: {{ .Values.conf.grafana.paths.provisioning }}/datasources/datasources.yaml\n              subPath: datasources.yaml\n            - name: grafana-etc\n              mountPath: /etc/grafana/grafana.ini\n              subPath: grafana.ini\n            - name: grafana-etc\n              mountPath: /etc/grafana/ldap.toml\n              subPath: ldap.toml\n            - name: data\n              mountPath: /var/lib/grafana/data\n            - name: unified-storage\n              mountPath: {{ .Values.conf.grafana.unified_storage.index_path }}\n            {{- range $group, $dashboards := .Values.conf.dashboards }}\n            {{- range $key, $value := $dashboards }}\n            - name: grafana-dashboards-{{$group}}\n              mountPath: /etc/grafana/dashboards/{{$key}}.json\n              subPath: {{$key}}.json\n            {{- end }}\n            {{- end }}\n\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_grafana.volumeMounts }}{{ toYaml $mounts_grafana.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-grafana\n          emptyDir: {}\n        - name: pod-screenshots-grafana\n          emptyDir: {}\n        - name: pod-dashboards-grafana\n          emptyDir: {}\n        - name: pod-provisioning-grafana\n          emptyDir: {}\n        - name: pod-alerting-grafana\n          emptyDir: {}\n        - name: pod-csv-grafana\n          emptyDir: {}\n        - name: pod-pdf-grafana\n          emptyDir: {}\n        - name: grafana-bin\n          configMap:\n            name: grafana-bin\n            defaultMode: 0555\n        - name: grafana-etc\n          secret:\n            secretName: grafana-etc\n            defaultMode: 0444\n        {{- range $group, $dashboards := .Values.conf.dashboards }}\n        - name: grafana-dashboards-{{$group}}\n          configMap:\n            name: grafana-dashboards-{{$group}}\n            defaultMode: 0555\n        {{- end }}\n        - name: data\n          emptyDir: {}\n        - name: unified-storage\n          emptyDir: {}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_grafana.volumes }}{{ toYaml $mounts_grafana.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "grafana/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "grafana/templates/ingress-grafana.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress .Values.network.grafana.ingress.public }}\n{{- $envAll := . -}}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendService\" \"grafana\" \"backendServiceType\" \"grafana\" \"backendPort\" \"dashboard\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $ingressOpts \"certIssuer\" .Values.endpoints.grafana.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "grafana/templates/job-db-init-session.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_init_session }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"grafana-db-init-session\" }}\n{{ tuple $envAll \"db_init_session\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: grafana-db-init-session\n  labels:\n{{ tuple $envAll \"grafana\" \"db-init\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"grafana\" \"db-init\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"grafana-db-init-session\" \"containerNames\" (list \"grafana-db-init-session\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"db_init\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value | quote }}\n      initContainers:\n{{ tuple $envAll \"db_init_session\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n      containers:\n        - name: grafana-db-init-session\n{{ tuple $envAll \"db_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.db_init_session | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"db_init\" \"container\" \"grafana_db_init_session\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: ROOT_DB_CONNECTION\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.oslo_db_session.admin }}\n                  key: DB_CONNECTION\n            - name: DB_CONNECTION\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.oslo_db_session.user }}\n                  key: DB_CONNECTION\n{{- if $envAll.Values.manifests.certificates }}\n            - name: MARIADB_X509\n              value: \"REQUIRE X509\"\n{{- end }}\n          command:\n            - /tmp/db-init.py\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: grafana-bin\n              mountPath: /tmp/db-init.py\n              subPath: db-init.py\n              readOnly: true\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db_session.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: grafana-bin\n          configMap:\n            name: grafana-bin\n            defaultMode: 0555\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db_session.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "grafana/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"grafana-db-init\" }}\n{{ tuple $envAll \"db_init\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: grafana-db-init\n  labels:\n{{ tuple $envAll \"grafana\" \"db-init\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"grafana\" \"db-init\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"grafana-db-init\" \"containerNames\" (list \"grafana-db-init\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"db_init\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value | quote }}\n      initContainers:\n{{ tuple $envAll \"db_init\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: grafana-db-init\n{{ tuple $envAll \"db_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.db_init | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"db_init\" \"container\" \"grafana_db_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: ROOT_DB_CONNECTION\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.oslo_db.admin }}\n                  key: DB_CONNECTION\n            - name: DB_CONNECTION\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.oslo_db.user }}\n                  key: DB_CONNECTION\n{{- if $envAll.Values.manifests.certificates }}\n            - name: MARIADB_X509\n              value: \"REQUIRE X509\"\n{{- end }}\n          command:\n            - /tmp/db-init.py\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: grafana-bin\n              mountPath: /tmp/db-init.py\n              subPath: db-init.py\n              readOnly: true\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: grafana-bin\n          configMap:\n            name: grafana-bin\n            defaultMode: 0555\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "grafana/templates/job-db-session-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_session_sync }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"grafana-db-session-sync\" }}\n{{ tuple $envAll \"db_session_sync\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: grafana-db-session-sync\n  labels:\n{{ tuple $envAll \"grafana\" \"db-session-sync\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"grafana\" \"db-session-sync\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"grafana-db-session-sync\" \"containerNames\" (list \"grafana-db-session-sync\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"db_session_sync\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value | quote }}\n      initContainers:\n{{ tuple $envAll \"db_session_sync\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: grafana-db-session-sync\n{{ tuple $envAll \"grafana_db_session_sync\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.db_session_sync | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"db_session_sync\" \"container\" \"grafana_db_session_sync\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: DB_CONNECTION\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.oslo_db_session.user }}\n                  key: DB_CONNECTION\n{{- if $envAll.Values.manifests.certificates }}\n            - name: MARIADB_X509\n              value: \"REQUIRE X509\"\n{{- end }}\n          command:\n            - /tmp/db-session-sync.py\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: grafana-bin\n              mountPath: /tmp/db-session-sync.py\n              subPath: db-session-sync.py\n              readOnly: true\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db_session.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: grafana-bin\n          configMap:\n            name: grafana-bin\n            defaultMode: 0555\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db_session.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "grafana/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"grafana\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "grafana/templates/job-run-migrator.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_run_migrator }}\n{{- $envAll := . }}\n\n{{- $mounts_grafana := .Values.pod.mounts.grafana.grafana }}\n\n{{- $serviceAccountName := \"grafana-run-migrator\" }}\n{{ tuple $envAll \"run_migrator\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: prepare-grafana-migrator\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\ndata:\n  prepare-grafana-migrator.sh: |\n    #!/bin/bash\n    set -xe\n    cp -av /usr/share/grafana/* /usr/share/grafana-prepare/\n    exit 0\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: grafana-run-migrator\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"grafana\" \"run-migrator\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"grafana\" \"run-migrator\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"grafana-run-migrator\" \"containerNames\" (list \"prepare-grafana-migrator\" \"grafana-run-migrator\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"run_migrator\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value | quote }}\n      initContainers:\n{{ tuple $envAll \"run_migrator\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n        - name: prepare-grafana-migrator\n{{ tuple $envAll \"grafana\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"run_migrator\" \"container\" \"prepare_grafana_migrator\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/prepare-grafana-migrator.sh\n          resources: {}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: grafana-binary-image\n              mountPath: /usr/share/grafana-prepare\n            - name: prepare-grafana-migrator\n              mountPath: /tmp/prepare-grafana-migrator.sh\n              readOnly: true\n              subPath: prepare-grafana-migrator.sh\n      containers:\n        - name: grafana-run-migrator\n{{ tuple $envAll \"mariadb\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.run_migrator | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"run_migrator\" \"container\" \"grafana_run_migrator\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/grafana.sh\n            - run_migrator\n          ports:\n            - name: dashboard\n              containerPort: {{ tuple \"grafana\" \"internal\" \"grafana\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          # readinessProbe:\n          #   httpGet:\n          #     path: /login\n          #     port: {{ tuple \"grafana\" \"internal\" \"grafana\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          #   initialDelaySeconds: 30\n          #   timeoutSeconds: 30\n          env:\n            - name: GF_SECURITY_ADMIN_USER\n              valueFrom:\n                secretKeyRef:\n                  name: grafana-admin-creds\n                  key: GRAFANA_ADMIN_USERNAME\n            - name: GF_SECURITY_ADMIN_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: grafana-admin-creds\n                  key: GRAFANA_ADMIN_PASSWORD\n            - name: PROMETHEUS_URL\n              value: {{ tuple \"monitoring\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" }}\n{{- if .Values.manifests.certificates }}\n            - name: CACERT\n              valueFrom:\n                secretKeyRef:\n                  key: ca.crt\n                  name: prometheus-tls-api\n{{- end }}\n{{- if .Values.pod.env.grafana }}\n{{ include \"helm-toolkit.utils.to_k8s_env_vars\" .Values.pod.env.grafana | indent 12 }}\n{{- end }}\n{{- if .Values.pod.env.grafana_run_migrator }}\n{{ include \"helm-toolkit.utils.to_k8s_env_vars\" .Values.pod.env.grafana_run_migrator | indent 12 }}\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-etc-grafana\n              mountPath: /etc/grafana\n            - name: pod-screenshots-grafana\n              mountPath: /var/lib/grafana/png\n            - name: pod-pdf-grafana\n              mountPath: /var/lib/grafana/pdf\n            - name: pod-dashboards-grafana\n              mountPath: /etc/grafana/dashboards\n            - name: pod-provisioning-grafana\n              mountPath: {{ .Values.conf.grafana.paths.provisioning }}\n            - name: pod-alerting-grafana\n              mountPath: {{ .Values.conf.grafana.paths.alerting }}\n            - name: pod-csv-grafana\n              mountPath: {{ .Values.conf.grafana.paths.csv }}\n            - name: grafana-binary-image\n              mountPath: /usr/share/grafana\n            - name: grafana-bin\n              mountPath: /tmp/grafana.sh\n              subPath: grafana.sh\n              readOnly: true\n            - name: grafana-etc\n              mountPath: {{ .Values.conf.grafana.paths.provisioning }}/dashboards/dashboards.yaml\n              subPath: dashboards.yaml\n            - name: grafana-etc\n              mountPath: {{ .Values.conf.grafana.paths.provisioning }}/datasources/datasources.yaml\n              subPath: datasources.yaml\n            - name: grafana-etc\n              mountPath: /etc/grafana/grafana.ini\n              subPath: grafana.ini\n            - name: grafana-etc\n              mountPath: /etc/grafana/ldap.toml\n              subPath: ldap.toml\n            - name: grafana-db\n              mountPath: /tmp/my.cnf\n              subPath: my.cnf\n            - name: data\n              mountPath: /var/lib/grafana/data\n            - name: unified-storage\n              mountPath: {{ .Values.conf.grafana.unified_storage.index_path }}\n            {{- range $group, $dashboards := .Values.conf.dashboards }}\n            {{- range $key, $value := $dashboards }}\n            - name: grafana-dashboards-{{$group}}\n              mountPath: /etc/grafana/dashboards/{{$key}}.json\n              subPath: {{$key}}.json\n            {{- end }}\n            {{- end }}\n\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_grafana.volumeMounts }}{{ toYaml $mounts_grafana.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-grafana\n          emptyDir: {}\n        - name: pod-screenshots-grafana\n          emptyDir: {}\n        - name: pod-dashboards-grafana\n          emptyDir: {}\n        - name: pod-provisioning-grafana\n          emptyDir: {}\n        - name: pod-alerting-grafana\n          emptyDir: {}\n        - name: pod-csv-grafana\n          emptyDir: {}\n        - name: pod-pdf-grafana\n          emptyDir: {}\n        - name: grafana-binary-image\n          emptyDir: {}\n        - name: grafana-bin\n          configMap:\n            name: grafana-bin\n            defaultMode: 0555\n        - name: grafana-etc\n          secret:\n            secretName: grafana-etc\n            defaultMode: 0444\n        - name: grafana-db\n          secret:\n            secretName: grafana-db\n            defaultMode: 0444\n        {{- range $group, $dashboards := .Values.conf.dashboards }}\n        - name: grafana-dashboards-{{$group}}\n          configMap:\n            name: grafana-dashboards-{{$group}}\n            defaultMode: 0555\n        {{- end }}\n        - name: data\n          emptyDir: {}\n        - name: unified-storage\n          emptyDir: {}\n        - name: prepare-grafana-migrator\n          configMap:\n            defaultMode: 0555\n            name: prepare-grafana-migrator\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_grafana.volumes }}{{ toYaml $mounts_grafana.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "grafana/templates/job-set-admin-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_set_admin_user }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"grafana-set-admin-user\" }}\n{{ tuple $envAll \"set_admin_user\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: grafana-set-admin-user\n  labels:\n{{ tuple $envAll \"grafana\" \"set-admin-user\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"grafana\" \"set-admin-user\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"grafana-set-admin-user\" \"containerNames\" (list \"grafana-set-admin-password\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"set_admin_user\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value | quote }}\n      initContainers:\n{{ tuple $envAll \"set_admin_user\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n      containers:\n        - name: grafana-set-admin-password\n{{ tuple $envAll \"grafana\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.set_admin_user | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"set_admin_user\" \"container\" \"grafana_set_admin_password\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/set-admin-password.sh\n          env:\n            - name: GF_SECURITY_ADMIN_USER\n              valueFrom:\n                secretKeyRef:\n                  name: grafana-admin-creds\n                  key: GRAFANA_ADMIN_USERNAME\n            - name: GF_SECURITY_ADMIN_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: grafana-admin-creds\n                  key: GRAFANA_ADMIN_PASSWORD\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: grafana-etc\n              mountPath: /etc/grafana/grafana.ini\n              subPath: grafana.ini\n            - name: grafana-bin\n              mountPath: /tmp/set-admin-password.sh\n              subPath: set-admin-password.sh\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-etc-grafana\n          emptyDir: {}\n        - name: grafana-bin\n          configMap:\n            name: grafana-bin\n            defaultMode: 0555\n        - name: grafana-etc\n          secret:\n            secretName: grafana-etc\n            defaultMode: 0444\n{{- end }}\n"
  },
  {
    "path": "grafana/templates/network_policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"grafana\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "grafana/templates/pod-helm-tests.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.helm_tests }}\n{{- $dashboardCount := len .Values.conf.dashboards }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := print .Release.Name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: \"{{.Release.Name}}-test\"\n  labels:\n{{ tuple $envAll \"grafana\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"grafana-test\" \"containerNames\" (list \"init\" \"grafana-selenium-tests\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n{{ dict \"envAll\" $envAll \"application\" \"test\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 2 }}\n  serviceAccountName: {{ $serviceAccountName }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n  restartPolicy: Never\n  initContainers:\n{{ tuple $envAll \"tests\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n  containers:\n    - name: grafana-selenium-tests\n{{ tuple $envAll \"selenium_tests\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" \"container\" \"helm_tests\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      command:\n        - /tmp/selenium-tests.py\n      env:\n        - name: GRAFANA_USER\n          valueFrom:\n            secretKeyRef:\n              name: grafana-admin-creds\n              key: GRAFANA_ADMIN_USERNAME\n        - name: GRAFANA_PASSWORD\n          valueFrom:\n            secretKeyRef:\n              name: grafana-admin-creds\n              key: GRAFANA_ADMIN_PASSWORD\n        - name: GRAFANA_URI\n          value: {{ tuple \"grafana\" \"internal\" \"grafana\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" }}\n        - name: CHROME_CONFIG_HOME\n          value: /tmp/google-chrome\n        - name: XDG_CONFIG_HOME\n          value: /tmp/google-chrome\n        - name: XDG_CACHE_HOME\n          value: /tmp/google-chrome\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: grafana-bin\n          mountPath: /tmp/selenium-tests.py\n          subPath: selenium-tests.py\n          readOnly: true\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: grafana-bin\n      configMap:\n        name: grafana-bin\n        defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "grafana/templates/secret-admin-creds.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_admin_creds }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: grafana-admin-creds\ntype: Opaque\ndata:\n  GRAFANA_ADMIN_PASSWORD: {{ .Values.endpoints.grafana.auth.admin.password | b64enc }}\n  GRAFANA_ADMIN_USERNAME: {{ .Values.endpoints.grafana.auth.admin.username | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "grafana/templates/secret-db-session.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db_session }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"user\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db_session $userClass }}\n{{- $connection := tuple \"oslo_db_session\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n{{- if $envAll.Values.manifests.certificates }}\n  DB_CONNECTION: {{ (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | b64enc -}}\n{{- else }}\n  DB_CONNECTION: {{  $connection | b64enc -}}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "grafana/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"user\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n{{- $connection := tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n{{- if $envAll.Values.manifests.certificates }}\n  DB_CONNECTION: {{ (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | b64enc -}}\n{{- else }}\n  DB_CONNECTION: {{  $connection | b64enc -}}\n{{- end }}\n{{- end }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: grafana-db\ntype: Opaque\ndata:\n  my.cnf: {{ tuple \"secrets/_my.cnf.tpl\" . | include \"helm-toolkit.utils.template\"  | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "grafana/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{- include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"grafana\" \"backendService\" \"grafana\" ) }}\n{{- end }}\n"
  },
  {
    "path": "grafana/templates/secret-prom-creds.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_prom_creds }}\n{{- $envAll := . }}\n{{- $secretName := index $envAll.Values.secrets.prometheus.user }}\n\n{{- $prometheus_user := .Values.endpoints.monitoring.auth.user.username }}\n{{- $prometheus_password := .Values.endpoints.monitoring.auth.user.password }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n  PROMETHEUS_USERNAME: {{ .Values.endpoints.monitoring.auth.user.username | b64enc }}\n  PROMETHEUS_PASSWORD: {{ .Values.endpoints.monitoring.auth.user.password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "grafana/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "grafana/templates/secrets/_my.cnf.tpl",
    "content": "{{/*\n    Licensed under the Apache License, Version 2.0 (the \"License\");\n    you may not use this file except in compliance with the License.\n    You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n    Unless required by applicable law or agreed to in writing, software\n    distributed under the License is distributed on an \"AS IS\" BASIS,\n    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n    See the License for the specific language governing permissions and\n    limitations under the License.\n    */}}\n\n    [client]\n    user = {{ .Values.endpoints.oslo_db.auth.admin.username }}\n    password = {{ .Values.endpoints.oslo_db.auth.admin.password }}\n"
  },
  {
    "path": "grafana/templates/service-ingress.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress .Values.network.grafana.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"grafana\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "grafana/templates/service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"grafana\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: dashboard\n    port: {{ tuple \"grafana\" \"internal\" \"grafana\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.grafana.node_port.enabled }}\n    nodePort: {{ .Values.network.grafana.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"grafana\" \"dashboard\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.grafana.node_port.enabled }}\n  type: NodePort\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "grafana/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for grafana\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nimages:\n  tags:\n    grafana: docker.io/grafana/grafana:12.4.2\n    mariadb: quay.io/airshipit/mariadb:latest-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    grafana_db_session_sync: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    selenium_tests: quay.io/airshipit/osh-selenium:latest-ubuntu_noble\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n    grafana_image_renderer: docker.io/grafana/grafana-image-renderer:5.7.3\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\n# Use selenium v4 syntax\nselenium_v4: true\n\nlabels:\n  grafana:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\npod:\n  env:\n    grafana: null\n    grafana_run_migrator:\n      GF_DEFAULT_FORCE_MIGRATION: false\n  security_context:\n    dashboard:\n      pod:\n        runAsUser: 472\n      container:\n        grafana:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    db_init:\n      pod:\n        runAsUser: 472\n      container:\n        grafana_db_init_session:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        grafana_db_init:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    db_session_sync:\n      pod:\n        runAsUser: 472\n      container:\n        grafana_db_session_sync:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    set_admin_user:\n      pod:\n        runAsUser: 472\n      container:\n        grafana_set_admin_password:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    run_migrator:\n      pod:\n        runAsUser: 472\n      container:\n        prepare_grafana_migrator:\n          runAsUser: 0\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        grafana_run_migrator:\n          runAsUser: 65534\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        grafana_set_admin_password:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    test:\n      pod:\n        runAsUser: 0\n      container:\n        helm_tests:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  mounts:\n    grafana:\n      init_container: null\n      grafana:\n  replicas:\n    grafana: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    termination_grace_period:\n      grafana:\n        timeout: 600\n  resources:\n    enabled: false\n    jobs:\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init_session:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      grafana_db_session_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      set_admin_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      run_migrator:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n    grafana:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      grafana:\n        username: grafana\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  oslo_db:\n    namespace: null\n    auth:\n      admin:\n        username: root\n        password: password\n        secret:\n          tls:\n            internal: mariadb-tls-direct\n      user:\n        username: grafana\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /grafana\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_db_session:\n    namespace: null\n    auth:\n      admin:\n        username: root\n        password: password\n        secret:\n          tls:\n            internal: mariadb-tls-direct\n      user:\n        username: grafana_session\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /grafana_session\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  grafana:\n    name: grafana\n    namespace: null\n    auth:\n      admin:\n        username: admin\n        password: password\n    hosts:\n      default: grafana-dashboard\n      public: grafana\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      grafana:\n        default: 3000\n        public: 80\n        image_rendering: 8081\n  monitoring:\n    name: prometheus\n    namespace: null\n    auth:\n      user:\n        username: admin\n        password: changeme\n    hosts:\n      default: prom-metrics\n      public: prometheus\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        public: 80\n  ldap:\n    hosts:\n      default: ldap\n    auth:\n      admin:\n        bind_dn: \"cn=admin,dc=cluster,dc=local\"\n        password: password\n    host_fqdn_override:\n      default: null\n    path:\n      default: \"ou=People,dc=cluster,dc=local\"\n    scheme:\n      default: ldap\n    port:\n      ldap:\n        default: 389\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - grafana-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_init_session:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_session_sync:\n      jobs:\n        - grafana-db-init-session\n      services:\n        - endpoint: internal\n          service: oslo_db\n    grafana:\n      jobs:\n        - grafana-db-init\n        - grafana-db-session-sync\n        - grafana-set-admin-user\n        - grafana-run-migrator\n      services:\n        - endpoint: internal\n          service: oslo_db\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    set_admin_user:\n      jobs:\n        - grafana-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    run_migrator:\n      jobs:\n        - grafana-set-admin-user\n      services:\n        - endpoint: internal\n          service: oslo_db\n    tests:\n      services:\n        - endpoint: internal\n          service: grafana\n\nnetwork:\n  grafana:\n    node_port:\n      enabled: false\n      port: 30902\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n\nnetwork_policy:\n  grafana:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nsecrets:\n  oci_image_registry:\n    grafana: grafana-oci-image-registry-key\n  oslo_db:\n    admin: grafana-db-admin\n    user: grafana-db-user\n  oslo_db_session:\n    admin: grafana-session-db-admin\n    user: grafana-session-db-user\n  tls:\n    grafana:\n      grafana:\n        public: grafana-tls-public\n  prometheus:\n    user: prometheus-user-creds\n\nmanifests:\n  certificates: false\n  configmap_bin: true\n  configmap_etc: true\n  configmap_dashboards: true\n  deployment: true\n  ingress: true\n  helm_tests: true\n  job_db_init: true\n  job_db_init_session: true\n  job_db_session_sync: true\n  job_image_repo_sync: true\n  job_set_admin_user: true\n  job_run_migrator: true\n  network_policy: false\n  secret_db: true\n  secret_db_session: true\n  secret_admin_creds: true\n  secret_ingress_tls: true\n  secret_prom_creds: true\n  secret_registry: true\n  service: true\n  service_ingress: true\n\nconf:\n  ldap:\n    config:\n      base_dns:\n        search: \"dc=cluster,dc=local\"\n        group_search: \"ou=Groups,dc=cluster,dc=local\"\n      filters:\n        search: \"(uid=%s)\"\n        group_search: \"(&(objectclass=posixGroup)(memberUID=uid=%s,ou=People,dc=cluster,dc=local))\"\n    template: |\n      verbose_logging = false\n      [[servers]]\n      host = \"{{ tuple \"ldap\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\"\n      port = {{ tuple \"ldap\" \"internal\" \"ldap\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n      use_ssl = false\n      start_tls = false\n      ssl_skip_verify = false\n      bind_dn = \"{{ .Values.endpoints.ldap.auth.admin.bind_dn }}\"\n      bind_password = '{{ .Values.endpoints.ldap.auth.admin.password }}'\n      search_filter = \"{{ .Values.conf.ldap.config.filters.search }}\"\n      search_base_dns = [\"{{ .Values.conf.ldap.config.base_dns.search }}\"]\n      group_search_filter = \"{{ .Values.conf.ldap.config.filters.group_search }}\"\n      group_search_base_dns = [\"{{ .Values.conf.ldap.config.base_dns.group_search }}\"]\n      [servers.attributes]\n      username = \"uid\"\n      surname = \"sn\"\n      member_of = \"cn\"\n      email = \"mail\"\n      [[servers.group_mappings]]\n      group_dn = \"{{.Values.endpoints.ldap.auth.admin.bind_dn }}\"\n      org_role = \"Admin\"\n      [[servers.group_mappings]]\n      group_dn = \"*\"\n      org_role = \"Viewer\"\n  provisioning:\n    dashboards:\n      apiVersion: 1\n      providers:\n      - name: 'osh-infra-dashboards'\n        orgId: 1\n        folder: ''\n        type: file\n        disableDeletion: false\n        editable: false\n        options:\n          path: /etc/grafana/dashboards\n    datasources:\n      template: |\n        apiVersion: 1\n        datasources:\n        - name: prometheus\n          type: prometheus\n          access: proxy\n          orgId: 1\n          editable: true\n          basicAuth: true\n          basicAuthUser: {{ .Values.endpoints.monitoring.auth.user.username }}\n          secureJsonData:\n            basicAuthPassword: {{ .Values.endpoints.monitoring.auth.user.password }}\n          url: {{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" }}\n  grafana:\n    alerting:\n      enabled: false\n    unified_alerting:\n      enabled: true\n    image_rendering_sidecar:\n      enabled: false\n      # https://kubernetes.io/docs/concepts/workloads/pods/sidecar-containers/\n      k8s_sidecar_feature_enabled: true\n    analytics:\n      reporting_enabled: false\n      check_for_updates: false\n    remote_cache:\n      type: database\n    auth:\n      login_cookie_name: grafana_sess\n      login_maximum_lifetime_duration: 24h\n    auth.ldap:\n      enabled: true\n      config_file: /etc/grafana/ldap.toml\n    paths:\n      data: /var/lib/grafana/data\n      plugins: /var/lib/grafana/plugins\n      alerting: /var/lib/grafana/alerting\n      csv: /var/lib/grafana/csv\n      provisioning: /etc/grafana/provisioning\n    unified_storage:\n      index_path: /var/lib/grafana/unified-search/bleve\n    server:\n      protocol: http\n      http_port: 3000\n    database:\n      type: mysql\n      # -- Database connection URL. When empty the URL is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation.\n      url: \"\"\n    security:\n      admin_user: ${GF_SECURITY_ADMIN_USER}\n      admin_password: ${GF_SECURITY_ADMIN_PASSWORD}\n      cookie_secure: false\n      cookie_username: grafana_user\n      cookie_remember_name: grafana_remember\n      login_remember_days: 7\n    dashboards:\n      default_home_dashboard_path: /etc/grafana/dashboards/home_dashboard.json\n    users:\n      allow_sign_up: false\n      allow_org_create: false\n      auto_assign_org: true\n      default_theme: dark\n    log:\n      mode: console\n      level: info\n    grafana_net:\n      url: https://grafana.net\n  dashboards: {}\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "heat/.helmignore",
    "content": "values_overrides\n"
  },
  {
    "path": "heat/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Heat\nname: heat\nversion: 2025.2.0\nhome: https://docs.openstack.org/heat/latest/\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Heat/OpenStack_Project_Heat_vertical.png\nsources:\n  - https://opendev.org/openstack/heat\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "heat/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "heat/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nheat-manage db_sync\n"
  },
  {
    "path": "heat/templates/bin/_heat-api.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n\n{{- if .Values.manifests.certificates }}\n  if [ -f /etc/apache2/envvars ]; then\n    # Loading Apache2 ENV variables\n    source /etc/apache2/envvars\n    mkdir -p ${APACHE_RUN_DIR}\n  fi\n\n{{- if .Values.conf.software.apache2.a2enmod }}\n  {{- range .Values.conf.software.apache2.a2enmod }}\n  a2enmod {{ . }}\n  {{- end }}\n{{- end }}\n\n{{- if .Values.conf.software.apache2.a2dismod }}\n  {{- range .Values.conf.software.apache2.a2dismod }}\n  a2dismod {{ . }}\n  {{- end }}\n{{- end }}\n\n  if [ -f /var/run/apache2/apache2.pid ]; then\n    # Remove the stale pid for debian/ubuntu images\n    rm -f /var/run/apache2/apache2.pid\n  fi\n  # Starts Apache2\n  exec {{ .Values.conf.software.apache2.binary }} {{ .Values.conf.software.apache2.start_parameters }}\n{{- else }}\n  exec uwsgi --ini /etc/heat/heat-api-uwsgi.ini\n{{- end }}\n}\n\nfunction stop () {\n{{- if .Values.manifests.certificates }}\n  {{ .Values.conf.software.apache2.binary }} -k graceful-stop\n{{- else }}\n  kill -TERM 1\n{{- end }}\n}\n\n$COMMAND\n"
  },
  {
    "path": "heat/templates/bin/_heat-cfn.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n{{- if .Values.manifests.certificates }}\n  if [ -f /etc/apache2/envvars ]; then\n    # Loading Apache2 ENV variables\n    source /etc/apache2/envvars\n    mkdir -p ${APACHE_RUN_DIR}\n  fi\n\n\n{{- if .Values.conf.software.apache2.a2enmod }}\n  {{- range .Values.conf.software.apache2.a2enmod }}\n  a2enmod {{ . }}\n  {{- end }}\n{{- end }}\n\n{{- if .Values.conf.software.apache2.a2dismod }}\n  {{- range .Values.conf.software.apache2.a2dismod }}\n  a2dismod {{ . }}\n  {{- end }}\n{{- end }}\n\n\n  if [ -f /var/run/apache2/apache2.pid ]; then\n    # Remove the stale pid for debian/ubuntu images\n    rm -f /var/run/apache2/apache2.pid\n  fi\n  # Starts Apache2\n  exec {{ .Values.conf.software.apache2.binary }} {{ .Values.conf.software.apache2.start_parameters }}\n{{- else }}\n  exec uwsgi --ini /etc/heat/heat-api-cfn-uwsgi.ini\n{{- end }}\n}\n\nfunction stop () {\n{{- if .Values.manifests.certificates }}\n  {{ .Values.conf.software.apache2.binary }} -k graceful-stop\n{{- else }}\n  kill -TERM 1\n{{- end }}\n}\n\n$COMMAND\n"
  },
  {
    "path": "heat/templates/bin/_heat-engine-cleaner.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nheat-manage service clean\n"
  },
  {
    "path": "heat/templates/bin/_heat-engine.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n    exec heat-engine \\\n          --config-file /etc/heat/heat.conf \\\n          --config-dir /etc/heat/heat.conf.d\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "heat/templates/bin/_heat-purge-deleted-active.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nheat-manage purge_deleted -g minutes \"$1\"\n"
  },
  {
    "path": "heat/templates/bin/_trusts.sh.tpl",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n#!/bin/bash\n\nset -ex\n\n# Get IDs for filtering\nOS_PROJECT_ID=$(openstack project show -f value -c id ${OS_PROJECT_NAME})\nOS_USER_ID=$(openstack user show -f value -c id ${OS_USERNAME})\nSERVICE_OS_TRUSTEE_ID=$(openstack user show -f value -c id --domain ${SERVICE_OS_TRUSTEE_DOMAIN} ${SERVICE_OS_TRUSTEE})\n\n# Check if trust doesn't already exist\nopenstack trust list -f value -c \"Project ID\" \\\n          -c \"Trustee User ID\" -c \"Trustor User ID\" | \\\n          grep \"^${OS_PROJECT_ID} ${SERVICE_OS_TRUSTEE_ID} ${OS_USER_ID}$\" && \\\n          exit 0\n\n# If there are no roles specified...\nif [ -z \"${SERVICE_OS_ROLES}\" ]; then\n    # ...Heat will try to delegate all of the roles that user has in the\n    # project. Let's fetch them all and use that.\n    readarray -t roles < <(openstack role assignment list -f value \\\n            -c \"Role\" --user=\"${OS_USERNAME}\" --project=\"${OS_PROJECT_ID}\")\nelse\n    # Split roles into an array\n    IFS=',' read -r -a roles <<< \"${SERVICE_OS_ROLES}\"\nfi\n\n# Create trust between trustor and trustee\nSERVICE_OS_TRUST_ID=$(openstack trust create -f value -c id \\\n          --project=\"${OS_PROJECT_NAME}\" \\\n          ${roles[@]/#/--role=} \\\n          --trustee-domain=\"${SERVICE_OS_TRUSTEE_DOMAIN}\" \\\n          \"${OS_USERNAME}\" \\\n          \"${SERVICE_OS_TRUSTEE}\")\n\n# Display trust\nopenstack trust show \"${SERVICE_OS_TRUST_ID}\"\n"
  },
  {
    "path": "heat/templates/certificates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.certificates -}}\n{{  dict \"envAll\" . \"service\" \"orchestration\" \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{  dict \"envAll\" . \"service\" \"cloudformation\" \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end -}}\n"
  },
  {
    "path": "heat/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n{{- $rallyTests := .Values.conf.rally_tests }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: heat-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  rally-test.sh: |\n{{ tuple $rallyTests | include \"helm-toolkit.scripts.rally_test\" | indent 4 }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  ks-domain-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_domain_user\" . | indent 4 }}\n  trusts.sh: |\n{{ tuple \"bin/_trusts.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  heat-api.sh: |\n{{ tuple \"bin/_heat-api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  heat-cfn.sh: |\n{{ tuple \"bin/_heat-cfn.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  heat-engine.sh: |\n{{ tuple \"bin/_heat-engine.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  heat-engine-cleaner.sh: |\n{{ tuple \"bin/_heat-engine-cleaner.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  heat-purge-deleted-active.sh: |\n{{ tuple \"bin/_heat-purge-deleted-active.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n{{- if empty .Values.conf.heat.keystone_authtoken.auth_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.heat.keystone_authtoken \"auth_uri\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.heat.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.heat.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.heat.keystone_authtoken.region_name -}}\n{{- $_ := set .Values.conf.heat.keystone_authtoken \"region_name\" .Values.endpoints.identity.auth.heat.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.heat.keystone_authtoken.project_name -}}\n{{- $_ := set .Values.conf.heat.keystone_authtoken \"project_name\" .Values.endpoints.identity.auth.heat.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.heat.keystone_authtoken.project_domain_name -}}\n{{- $_ := set .Values.conf.heat.keystone_authtoken \"project_domain_name\" .Values.endpoints.identity.auth.heat.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.heat.keystone_authtoken.user_domain_name -}}\n{{- $_ := set .Values.conf.heat.keystone_authtoken \"user_domain_name\" .Values.endpoints.identity.auth.heat.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.heat.keystone_authtoken.username -}}\n{{- $_ := set .Values.conf.heat.keystone_authtoken \"username\" .Values.endpoints.identity.auth.heat.username -}}\n{{- end -}}\n{{- if empty .Values.conf.heat.keystone_authtoken.password -}}\n{{- $_ := set .Values.conf.heat.keystone_authtoken \"password\" .Values.endpoints.identity.auth.heat.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.heat.trustee.region_name -}}\n{{- $_ := set .Values.conf.heat.trustee \"region_name\" .Values.endpoints.identity.auth.heat_trustee.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.heat.trustee.user_domain_name -}}\n{{- $_ := set .Values.conf.heat.trustee \"user_domain_name\" .Values.endpoints.identity.auth.heat_trustee.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.heat.trustee.username -}}\n{{- $_ := set .Values.conf.heat.trustee \"username\" .Values.endpoints.identity.auth.heat_trustee.username -}}\n{{- end -}}\n{{- if empty .Values.conf.heat.trustee.password -}}\n{{- $_ := set .Values.conf.heat.trustee \"password\" .Values.endpoints.identity.auth.heat_trustee.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.heat.DEFAULT.stack_user_domain_name -}}\n{{- $_ := set .Values.conf.heat.DEFAULT \"stack_user_domain_name\" .Values.endpoints.identity.auth.heat_stack_user.domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.heat.DEFAULT.stack_domain_admin -}}\n{{- $_ := set .Values.conf.heat.DEFAULT \"stack_domain_admin\" .Values.endpoints.identity.auth.heat_stack_user.username -}}\n{{- end -}}\n{{- if empty .Values.conf.heat.DEFAULT.stack_domain_admin_password -}}\n{{- $_ := set .Values.conf.heat.DEFAULT \"stack_domain_admin_password\" .Values.endpoints.identity.auth.heat_stack_user.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.heat.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.heat.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n{{- if empty .Values.conf.heat.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.heat.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.heat.database.connection)) (empty .Values.conf.heat.database.connection) -}}\n{{- $connection := tuple \"oslo_db\" \"internal\" \"heat\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | set .Values.conf.heat.database \"connection\" -}}\n{{- else -}}\n{{- $_ := set .Values.conf.heat.database \"connection\" $connection -}}\n{{- end -}}\n{{- end -}}\n\n{{- if empty .Values.conf.heat.DEFAULT.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"heat\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.heat.DEFAULT \"transport_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.heat.DEFAULT.heat_metadata_server_url -}}\n{{- $_ := tuple \"cloudformation\" \"public\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | trimSuffix .Values.endpoints.cloudformation.path.default | set .Values.conf.heat.DEFAULT \"heat_metadata_server_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.heat.DEFAULT.heat_waitcondition_server_url -}}\n{{- $_ := cat (tuple \"cloudformation\" \"public\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\") \"waitcondition\" | replace \" \" \"/\" | set .Values.conf.heat.DEFAULT \"heat_waitcondition_server_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.heat.clients_keystone.auth_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | trimSuffix .Values.endpoints.identity.path.default | set .Values.conf.heat.clients_keystone \"auth_uri\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.heat.trustee.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | trimSuffix .Values.endpoints.identity.path.default | set .Values.conf.heat.trustee \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.heat.heat_api.bind_port -}}\n{{- $_ := tuple \"orchestration\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set .Values.conf.heat.heat_api \"bind_port\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.heat.heat_api_cfn.bind_port -}}\n{{- $_ := tuple \"cloudformation\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set .Values.conf.heat.heat_api_cfn \"bind_port\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.heat_api_uwsgi.uwsgi.processes -}}\n{{- $_ := set .Values.conf.heat_api_uwsgi.uwsgi \"processes\" .Values.conf.heat.heat_api.workers -}}\n{{- end -}}\n{{- if empty (index .Values.conf.heat_api_uwsgi.uwsgi \"http-socket\") -}}\n{{- $http_socket_port := tuple \"orchestration\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | toString }}\n{{- $http_socket := printf \"0.0.0.0:%s\" $http_socket_port }}\n{{- $_ := set .Values.conf.heat_api_uwsgi.uwsgi \"http-socket\" $http_socket -}}\n{{- end -}}\n\n{{- if empty .Values.conf.heat_api_cfn_uwsgi.uwsgi.processes -}}\n{{- $_ := set .Values.conf.heat_api_cfn_uwsgi.uwsgi \"processes\" .Values.conf.heat.heat_api_cfn.workers -}}\n{{- end -}}\n{{- if empty (index .Values.conf.heat_api_cfn_uwsgi.uwsgi \"http-socket\") -}}\n{{- $http_socket_port := tuple \"cloudformation\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | toString }}\n{{- $http_socket := printf \"0.0.0.0:%s\" $http_socket_port }}\n{{- $_ := set .Values.conf.heat_api_cfn_uwsgi.uwsgi \"http-socket\" $http_socket -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.handler_fluent) (has \"fluent\" .Values.conf.logging.handlers.keys) -}}\n{{- $fluentd_host := tuple \"fluentd\" \"internal\" $envAll | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\n{{- $fluentd_port := tuple \"fluentd\" \"internal\" \"service\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $fluent_args := printf \"('%s.%s', '%s', %s)\" .Release.Namespace .deployment_name $fluentd_host $fluentd_port }}\n{{- $handler_fluent := dict \"class\" \"fluent.handler.FluentHandler\" \"formatter\" \"fluent\" \"args\" $fluent_args -}}\n{{- $_ := set .Values.conf.logging \"handler_fluent\" $handler_fluent -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.formatter_fluent) (has \"fluent\" .Values.conf.logging.formatters.keys) -}}\n{{- $formatter_fluent := dict \"class\" \"oslo_log.formatters.FluentFormatter\" -}}\n{{- $_ := set .Values.conf.logging \"formatter_fluent\" $formatter_fluent -}}\n{{- end -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: heat-etc\ntype: Opaque\ndata:\n  rally_tests.yaml: {{ toYaml .Values.conf.rally_tests.tests | b64enc }}\n  heat.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.heat | b64enc }}\n  heat-api-uwsgi.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.heat_api_uwsgi | b64enc }}\n  heat-api-cfn-uwsgi.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.heat_api_cfn_uwsgi | b64enc }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.logging | b64enc }}\n  api-paste.ini: {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.paste | b64enc }}\n  policy.yaml: {{ toYaml .Values.conf.policy | b64enc }}\n{{- if .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.mpm_event \"key\" \"mpm_event.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.wsgi_heat \"key\" \"wsgi-heat.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.wsgi_cfn \"key\" \"wsgi-cnf.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- end }}\n  api_audit_map.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.api_audit_map | b64enc }}\n{{- range $key, $value := $envAll.Values.conf.rally_tests.templates }}\n  {{ printf \"test_template_%d\" $key }}: {{ $value.template | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/cron-job-engine-cleaner.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.cron_job_engine_cleaner }}\n{{- $envAll := . }}\n\n{{- $mounts_heat_engine_cleaner := .Values.pod.mounts.heat_engine_cleaner.heat_engine_cleaner }}\n{{- $mounts_heat_engine_cleaner_init := .Values.pod.mounts.heat_engine_cleaner.init_container }}\n\n{{- $serviceAccountName := \"heat-engine-cleaner\" }}\n{{ tuple $envAll \"engine_cleaner\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.heat_engine_cleaner }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: heat-engine-cleaner\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  schedule: {{ .Values.jobs.engine_cleaner.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.engine_cleaner.history.success }}\n  failedJobsHistoryLimit: {{ .Values.jobs.engine_cleaner.history.failed }}\n  {{- if .Values.jobs.engine_cleaner.starting_deadline }}\n  startingDeadlineSeconds: {{ .Values.jobs.engine_cleaner.starting_deadline }}\n  {{- end }}\n  concurrencyPolicy: Forbid\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"heat\" \"engine-cleaner\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" \"heat-engine-cleaner\" \"containerNames\" (list \"heat-engine-cleaner\" \"init\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"heat\" \"engine-cleaner\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n          annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 12 }}\n            configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n            configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"heat-engine-cleaner\" \"containerNames\" (list \"heat-engine-cleaner\" \"init\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 12 }}\n        spec:\n{{ tuple \"heat_engine_cleaner\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 10 }}\n{{ tuple \"heat_engine_cleaner\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 10 }}\n          serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"engine_cleaner\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 10 }}\n          restartPolicy: OnFailure\n{{ if $envAll.Values.pod.tolerations.heat.enabled }}\n{{ tuple $envAll \"heat\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 10 }}\n{{ end }}\n          nodeSelector:\n            {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n          initContainers:\n{{ tuple $envAll \"engine_cleaner\" $mounts_heat_engine_cleaner_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 12 }}\n          containers:\n            - name: heat-engine-cleaner\n{{ tuple $envAll \"heat_engine_cleaner\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.engine_cleaner | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{ dict \"envAll\" $envAll \"application\" \"engine_cleaner\" \"container\" \"heat_engine_cleaner\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 14 }}\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n              env:\n                - name: REQUESTS_CA_BUNDLE\n                  value: \"/etc/heat/certs/ca.crt\"\n{{- end }}\n              command:\n                - /tmp/heat-engine-cleaner.sh\n              volumeMounts:\n              - name: pod-tmp\n                mountPath: /tmp\n              - name: heat-bin\n                mountPath: /tmp/heat-engine-cleaner.sh\n                subPath: heat-engine-cleaner.sh\n                readOnly: true\n              - name: etcheat\n                mountPath: /etc/heat\n              - name: heat-etc\n                mountPath: /etc/heat/heat.conf\n                subPath: heat.conf\n                readOnly: true\n              - name: heat-etc-snippets\n                mountPath: /etc/heat/heat.conf.d/\n                readOnly: true\n              {{ if .Values.conf.heat.DEFAULT.log_config_append }}\n              - name: heat-etc\n                mountPath: {{ .Values.conf.heat.DEFAULT.log_config_append }}\n                subPath: {{ base .Values.conf.heat.DEFAULT.log_config_append }}\n                readOnly: true\n              {{ end }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.orchestration.api.internal \"path\" \"/etc/heat/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 14 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 14 }}\n{{ if $mounts_heat_engine_cleaner.volumeMounts }}{{ toYaml $mounts_heat_engine_cleaner.volumeMounts | indent 14 }}{{ end }}\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: etcheat\n              emptyDir: {}\n            - name: heat-etc\n              secret:\n                secretName: heat-etc\n                defaultMode: 0444\n            - name: heat-etc-snippets\n{{- if $etcSources }}\n              projected:\n                sources:\n{{ toYaml $etcSources | indent 18 }}\n{{- else }}\n              emptyDir: {}\n{{ end }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.orchestration.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n            - name: heat-bin\n              configMap:\n                name: heat-bin\n                defaultMode: 0555\n{{ if $mounts_heat_engine_cleaner.volumes }}{{ toYaml $mounts_heat_engine_cleaner.volumes | indent 12 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/cron-job-purge-deleted.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.cron_job_purge_deleted }}\n{{- $envAll := . }}\n\n{{- $mounts_heat_purge_deleted := .Values.pod.mounts.heat_purge_deleted.heat_purge_deleted }}\n{{- $mounts_heat_purge_deleted_init := .Values.pod.mounts.heat_purge_deleted.init_container }}\n\n{{- $serviceAccountName := \"heat-purge-deleted\" }}\n{{ tuple $envAll \"purge_deleted\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.heat_purge_deleted }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: heat-purge-deleted\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  schedule: {{ .Values.jobs.purge_deleted.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.purge_deleted.history.success }}\n  failedJobsHistoryLimit: {{ .Values.jobs.purge_deleted.history.failed }}\n  concurrencyPolicy: Forbid\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"heat\" \"purge-deleted\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" \"heat-purge-deleted\" \"containerNames\" (list \"init\" \"heat-purge-deleted\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"heat\" \"purge-deleted\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n          annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 12 }}\n{{ dict \"envAll\" $envAll \"podName\" \"heat-purge-deleted\" \"containerNames\" (list \"init\" \"heat-purge-deleted\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 12 }}\n        spec:\n{{ tuple \"heat_purge_deleted\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 10 }}\n{{ tuple \"heat_purge_deleted\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 10 }}\n          serviceAccountName: {{ $serviceAccountName }}\n          restartPolicy: OnFailure\n{{ if $envAll.Values.pod.tolerations.heat.enabled }}\n{{ tuple $envAll \"heat\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 10 }}\n{{ end }}\n          nodeSelector:\n            {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n          initContainers:\n{{ tuple $envAll \"purge_deleted\" $mounts_heat_purge_deleted_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 12 }}\n          containers:\n            - name: heat-purge-deleted\n{{ tuple $envAll \"heat_purge_deleted\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.purge_deleted | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n              env:\n                - name: REQUESTS_CA_BUNDLE\n                  value: \"/etc/heat/certs/ca.crt\"\n{{- end }}\n              command:\n                - /tmp/heat-purge-deleted-active.sh\n                - {{ quote .Values.jobs.purge_deleted.purge_age }}\n              volumeMounts:\n              - name: pod-tmp\n                mountPath: /tmp\n              - name: heat-bin\n                mountPath: /tmp/heat-purge-deleted-active.sh\n                subPath: heat-purge-deleted-active.sh\n                readOnly: true\n              - name: etcheat\n                mountPath: /etc/heat\n              - name: heat-etc\n                mountPath: /etc/heat/heat.conf\n                subPath: heat.conf\n                readOnly: true\n              - name: heat-etc-snippets\n                mountPath: /etc/heat/heat.conf.d/\n                readOnly: true\n              {{ if .Values.conf.heat.DEFAULT.log_config_append }}\n              - name: heat-etc\n                mountPath: {{ .Values.conf.heat.DEFAULT.log_config_append }}\n                subPath: {{ base .Values.conf.heat.DEFAULT.log_config_append }}\n                readOnly: true\n              {{ end }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.orchestration.api.internal \"path\" \"/etc/heat/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 14 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 14 }}\n{{ if $mounts_heat_purge_deleted.volumeMounts }}{{ toYaml $mounts_heat_purge_deleted.volumeMounts | indent 14 }}{{ end }}\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: etcheat\n              emptyDir: {}\n            - name: heat-etc\n              secret:\n                secretName: heat-etc\n                defaultMode: 0444\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.orchestration.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n            - name: heat-bin\n              configMap:\n                name: heat-bin\n                defaultMode: 0555\n            - name: heat-etc-snippets\n{{- if $etcSources }}\n              projected:\n                sources:\n{{ toYaml $etcSources | indent 18 }}\n{{- else }}\n              emptyDir: {}\n{{ end }}\n{{ if $mounts_heat_purge_deleted.volumes }}{{ toYaml $mounts_heat_purge_deleted.volumes | indent 12 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/deployment-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_api }}\n{{- $envAll := . }}\n\n{{- $mounts_heat_api := .Values.pod.mounts.heat_api.heat_api }}\n{{- $mounts_heat_api_init := .Values.pod.mounts.heat_api.init_container }}\n\n{{- $serviceAccountName := \"heat-api\" }}\n{{ tuple $envAll \"api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.heat_api }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: heat-api\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"heat\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"heat\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"heat\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"heat_api\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"heat-api\" \"containerNames\" (list \"heat-api\" \"init\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"heat_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"heat_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"heat\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"heat\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n{{ if $envAll.Values.pod.tolerations.heat.enabled }}\n{{ tuple $envAll \"heat\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.api.node_selector_key }}: {{ .Values.labels.api.node_selector_value }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.api.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"api\" $mounts_heat_api_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: heat-api\n{{ tuple $envAll \"heat_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"heat\" \"container\" \"heat_api\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n          env:\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/heat/certs/ca.crt\"\n{{- end }}\n          command:\n            - /tmp/heat-api.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/heat-api.sh\n                  - stop\n          ports:\n            - name: h-api\n              containerPort: {{ tuple \"orchestration\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            httpGet:\n              scheme: {{ tuple \"orchestration\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n              path: /\n              port: {{ tuple \"orchestration\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 30\n          livenessProbe:\n            httpGet:\n              scheme: {{ tuple \"orchestration\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n              path: /\n              {{- if .Values.pod.probes.api.heat_api.liveness.port }}\n              port: {{ .Values.pod.probes.api.heat_api.liveness.port }}\n              {{- else }}\n              port: {{ tuple \"orchestration\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n              {{- end }}\n            initialDelaySeconds: 10\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.heat.oslo_concurrency.lock_path }}\n            - name: pod-etc-heat\n              mountPath: /etc/heat\n            - name: heat-bin\n              mountPath: /tmp/heat-api.sh\n              subPath: heat-api.sh\n              readOnly: true\n            - name: heat-etc\n              mountPath: /etc/heat/heat.conf\n              subPath: heat.conf\n              readOnly: true\n            - name: heat-etc-snippets\n              mountPath: /etc/heat/heat.conf.d/\n              readOnly: true\n            - name: heat-etc\n              mountPath: /etc/heat/heat-api-uwsgi.ini\n              subPath: heat-api-uwsgi.ini\n              readOnly: true\n            {{ if .Values.conf.heat.DEFAULT.log_config_append }}\n            - name: heat-etc\n              mountPath: {{ .Values.conf.heat.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.heat.DEFAULT.log_config_append }}\n              readOnly: true\n            {{ end }}\n            - name: heat-etc\n              mountPath: /etc/heat/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: heat-etc\n              mountPath: /etc/heat/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: heat-etc\n              mountPath: /etc/heat/api_audit_map.conf\n              subPath: api_audit_map.conf\n              readOnly: true\n            {{- if .Values.manifests.certificates }}\n            - name: heat-etc\n              mountPath: {{ .Values.conf.software.apache2.site_dir }}/heat-api.conf\n              subPath: wsgi-heat.conf\n              readOnly: true\n            - name: heat-etc\n              mountPath: {{ .Values.conf.software.apache2.mods_dir }}/mpm_event.conf\n              subPath: mpm_event.conf\n              readOnly: true\n            {{- end }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.orchestration.api.internal \"path\" \"/etc/heat/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_heat_api.volumeMounts }}{{ toYaml $mounts_heat_api.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-heat\n          emptyDir: {}\n        - name: heat-bin\n          configMap:\n            name: heat-bin\n            defaultMode: 0555\n        - name: heat-etc\n          secret:\n            secretName: heat-etc\n            defaultMode: 0444\n        - name: heat-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.orchestration.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_heat_api.volumes }}{{ toYaml $mounts_heat_api.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/deployment-cfn.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_cfn }}\n{{- $envAll := . }}\n\n{{- $mounts_heat_cfn := .Values.pod.mounts.heat_cfn.heat_cfn }}\n{{- $mounts_heat_cfn_init := .Values.pod.mounts.heat_cfn.init_container }}\n\n{{- $serviceAccountName := \"heat-cfn\" }}\n{{ tuple $envAll \"cfn\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.heat_cfn }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: heat-cfn\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"heat\" \"cfn\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.cfn }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"heat\" \"cfn\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"heat\" \"cfn\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"heat_cfn\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"heat-cfn\" \"containerNames\" (list \"heat-cfn\" \"init\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"heat_cfn\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"heat_cfn\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"heat\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"heat\" \"cfn\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n{{ if $envAll.Values.pod.tolerations.heat.enabled }}\n{{ tuple $envAll \"heat\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.cfn.node_selector_key }}: {{ .Values.labels.cfn.node_selector_value }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.cfn.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"cfn\" $mounts_heat_cfn_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: heat-cfn\n{{ tuple $envAll \"heat_cfn\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.cfn | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"heat\" \"container\" \"heat_cfn\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n          env:\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/heat/certs/ca.crt\"\n{{- end }}\n          command:\n            - /tmp/heat-cfn.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/heat-cfn.sh\n                  - stop\n          ports:\n            - name: h-cfn\n              containerPort: {{ tuple \"cloudformation\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            httpGet:\n              scheme: {{ tuple \"cloudformation\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n              path: /\n              port: {{ tuple \"cloudformation\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          livenessProbe:\n            httpGet:\n              scheme: {{ tuple \"cloudformation\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n              path: /\n              {{- if .Values.pod.probes.cfn.heat_cfn.liveness.port }}\n              port: {{ .Values.pod.probes.cfn.heat_cfn.liveness.port }}\n              {{- else }}\n              port: {{ tuple \"cloudformation\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n              {{- end }}\n            initialDelaySeconds: 10\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.heat.oslo_concurrency.lock_path }}\n            - name: pod-etc-heat\n              mountPath: /etc/heat\n            - name: heat-bin\n              mountPath: /tmp/heat-cfn.sh\n              subPath: heat-cfn.sh\n              readOnly: true\n            - name: heat-etc\n              mountPath: /etc/heat/heat-api-cfn-uwsgi.ini\n              subPath: heat-api-cfn-uwsgi.ini\n              readOnly: true\n            - name: heat-etc\n              mountPath: /etc/heat/heat.conf\n              subPath: heat.conf\n              readOnly: true\n            - name: heat-etc-snippets\n              mountPath: /etc/heat/heat.conf.d/\n              readOnly: true\n            {{ if .Values.conf.heat.DEFAULT.log_config_append }}\n            - name: heat-etc\n              mountPath: {{ .Values.conf.heat.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.heat.DEFAULT.log_config_append }}\n              readOnly: true\n            {{ end }}\n            - name: heat-etc\n              mountPath: /etc/heat/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: heat-etc\n              mountPath: /etc/heat/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: heat-etc\n              mountPath: /etc/heat/api_audit_map.conf\n              subPath: api_audit_map.conf\n              readOnly: true\n            {{- if .Values.manifests.certificates }}\n            - name: heat-etc\n              mountPath: {{ .Values.conf.software.apache2.site_dir }}/heat-api-cfn.conf\n              subPath: wsgi-cnf.conf\n              readOnly: true\n            - name: heat-etc\n              mountPath: {{ .Values.conf.software.apache2.mods_dir }}/mpm_event.conf\n              subPath: mpm_event.conf\n              readOnly: true\n            {{- end }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.cloudformation.cfn.internal \"path\" \"/etc/heat/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_heat_cfn.volumeMounts }}{{ toYaml $mounts_heat_cfn.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-heat\n          emptyDir: {}\n        - name: heat-bin\n          configMap:\n            name: heat-bin\n            defaultMode: 0555\n        - name: heat-etc\n          secret:\n            secretName: heat-etc\n            defaultMode: 0444\n        - name: heat-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.cloudformation.cfn.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_heat_cfn.volumes }}{{ toYaml $mounts_heat_cfn.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/deployment-engine.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if or ( .Values.manifests.deployment_engine ) ( .Values.manifests.statefulset_engine ) }}\n{{- $envAll := . }}\n\n{{- $mounts_heat_engine := .Values.pod.mounts.heat_engine.heat_engine }}\n{{- $mounts_heat_engine_init := .Values.pod.mounts.heat_engine.init_container }}\n\n{{- $serviceAccountName := \"heat-engine\" }}\n{{ tuple $envAll \"engine\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.heat_engine }}\n---\napiVersion: apps/v1\nmetadata:\n  name: heat-engine\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"heat\" \"engine\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- if .Values.manifests.deployment_engine }}\nkind: Deployment\nspec:\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n{{- else if .Values.manifests.statefulset_engine }}\nkind: StatefulSet\nspec:\n  serviceName: heat-engine\n{{- end }}\n  replicas: {{ .Values.pod.replicas.engine }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"heat\" \"engine\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"heat\" \"engine\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n{{- if .Values.manifests.deployment_engine }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"heat_engine\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"heat-engine\" \"containerNames\" (list \"heat-engine\" \"init\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n{{- end }}\n    spec:\n{{ tuple \"heat_engine\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"heat_engine\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"heat\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{- tuple $envAll \"heat\" \"engine\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.engine.node_selector_key }}: {{ .Values.labels.engine.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.heat.enabled }}\n{{ tuple $envAll \"heat\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.engine.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"engine\" $mounts_heat_engine_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: heat-engine\n{{ tuple $envAll \"heat_engine\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.engine | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"heat\" \"container\" \"heat_engine\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n          env:\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/heat/certs/ca.crt\"\n{{- end }}\n          command:\n            - /tmp/heat-engine.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/heat-engine.sh\n                  - stop\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.heat.oslo_concurrency.lock_path }}\n            - name: pod-etc-heat\n              mountPath: /etc/heat\n            - name: heat-bin\n              mountPath: /tmp/heat-engine.sh\n              subPath: heat-engine.sh\n              readOnly: true\n            - name: heat-etc\n              mountPath: /etc/heat/heat.conf\n              subPath: heat.conf\n              readOnly: true\n            - name: heat-etc-snippets\n              mountPath: /etc/heat/heat.conf.d/\n              readOnly: true\n            {{ if .Values.conf.heat.DEFAULT.log_config_append }}\n            - name: heat-etc\n              mountPath: {{ .Values.conf.heat.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.heat.DEFAULT.log_config_append }}\n              readOnly: true\n            {{ end }}\n            - name: heat-etc\n              mountPath: /etc/heat/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.orchestration.api.internal \"path\" \"/etc/heat/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_heat_engine.volumeMounts }}{{ toYaml $mounts_heat_engine.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-heat\n          emptyDir: {}\n        - name: heat-bin\n          configMap:\n            name: heat-bin\n            defaultMode: 0555\n        - name: heat-etc\n          secret:\n            secretName: heat-etc\n            defaultMode: 0444\n        - name: heat-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.orchestration.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_heat_engine.volumes }}{{ toYaml $mounts_heat_engine.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "heat/templates/ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_api .Values.network.api.ingress.public }}\n{{- $envAll := . }}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendServiceType\" \"orchestration\" \"backendPort\" \"h-api\" -}}\n{{- $secretName := $envAll.Values.secrets.tls.orchestration.api.internal -}}\n{{- if and .Values.manifests.certificates $secretName -}}\n{{- $_ := set $ingressOpts \"certIssuer\" .Values.endpoints.orchestration.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/ingress-cfn.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_cfn .Values.network.cfn.ingress.public }}\n{{- $envAll := . }}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendService\" \"cfn\" \"backendServiceType\" \"cloudformation\" \"backendPort\" \"h-cfn\" -}}\n{{- $secretName := $envAll.Values.secrets.tls.cloudformation.cfn.internal -}}\n{{- if and .Values.manifests.certificates $secretName -}}\n{{- $_ := set $ingressOpts \"certIssuer\" .Values.endpoints.cloudformation.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.bootstrap\" }}\nhelm.sh/hook: post-install,post-upgrade\n{{- end }}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $bootstrapJob := dict \"envAll\" . \"serviceName\" \"heat\" \"keystoneUser\" .Values.bootstrap.ks_user \"logConfigFile\" .Values.conf.heat.DEFAULT.log_config_append -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $bootstrapJob \"tlsSecret\" .Values.secrets.tls.orchestration.api.internal -}}\n{{- end -}}\n{{- $_ := set $bootstrapJob \"jobAnnotations\" (include \"metadata.annotations.job.bootstrap\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.heat.enabled -}}\n{{- $_ := set $bootstrapJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $bootstrapJob | include \"helm-toolkit.manifests.job_bootstrap\" }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" \"heat\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbDropJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- if .Values.pod.tolerations.heat.enabled -}}\n{{- $_ := set $dbDropJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"heat\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbInitJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbInitJob \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.heat.enabled -}}\n{{- $_ := set $dbInitJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"heat\" \"podVolMounts\" .Values.pod.mounts.heat_db_sync.heat_db_sync.volumeMounts \"podVols\" .Values.pod.mounts.heat_db_sync.heat_db_sync.volumes -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbSyncJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.heat.enabled -}}\n{{- $_ := set $dbSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.repo_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\n{{- end }}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"heat\" -}}\n{{- $_ := $imageRepoSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.repo_sync\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.heat.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_endpoints\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-2\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"heat\" \"serviceTypes\" ( tuple \"orchestration\" \"cloudformation\" ) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.orchestration.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_endpoints\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.heat.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_service\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"heat\" \"serviceTypes\" ( tuple \"orchestration\" \"cloudformation\" ) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.orchestration.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_service\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.heat.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/job-ks-user-domain.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ks_user_domain }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"heat-ks-user-domain\" }}\n{{ tuple $envAll \"ks_user\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: heat-domain-ks-user\n  labels:\n{{ tuple $envAll \"heat\" \"ks-user-domain\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": post-install,post-upgrade\n    \"helm.sh/hook-delete-policy\": before-hook-creation\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"heat\" \"ks-user\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"heat-domain-ks-user\" \"containerNames\" (list \"heat-ks-domain-user\" \"init\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"ks_user\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.heat.enabled }}\n{{ tuple $envAll \"heat\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"ks_user\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: heat-ks-domain-user\n{{ tuple $envAll \"ks_user\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"ks_user\" \"container\" \"heat_ks_domain_user\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/ks-domain-user.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ks-user-sh\n              mountPath: /tmp/ks-domain-user.sh\n              subPath: ks-domain-user.sh\n              readOnly: true\n{{ dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.orchestration.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\"  | indent 12 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" $envAll.Values.secrets.identity.admin \"useCA\" (or .Values.manifests.certificates .Values.tls.identity) }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n            - name: SERVICE_OS_SERVICE_NAME\n              value: \"heat\"\n            - name: SERVICE_OS_REGION_NAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.identity.heat_stack_user }}\n                  key: OS_REGION_NAME\n            - name: SERVICE_OS_DOMAIN_NAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.identity.heat_stack_user }}\n                  key: OS_DOMAIN_NAME\n            - name: SERVICE_OS_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.identity.heat_stack_user }}\n                  key: OS_USERNAME\n            - name: SERVICE_OS_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.identity.heat_stack_user }}\n                  key: OS_PASSWORD\n            - name: SERVICE_OS_ROLE\n              value: {{ .Values.endpoints.identity.auth.heat_stack_user.role | quote }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: ks-user-sh\n          configMap:\n            name: heat-bin\n            defaultMode: 0555\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.orchestration.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"heat\" \"serviceUsers\" (tuple \"heat\" \"heat_trustee\") -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksUserJob \"tlsSecret\" .Values.secrets.tls.orchestration.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksUserJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.heat.enabled -}}\n{{- $_ := set $ksUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/job-rabbit-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.rabbit_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rmqUserJob := dict \"envAll\" . \"serviceName\" \"heat\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $rmqUserJob \"tlsSecret\" .Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $rmqUserJob \"jobAnnotations\" (include \"metadata.annotations.job.rabbit_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.heat.enabled -}}\n{{- $_ := set $rmqUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $rmqUserJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/job-trusts.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- $envAll := . }}\n\n{{- $mounts_heat_trusts := .Values.pod.mounts.heat_trusts.heat_trusts }}\n{{- $mounts_heat_trusts_init := .Values.pod.mounts.heat_trusts.init_container }}\n\n{{- $serviceAccountName := \"heat-trusts\" }}\n{{ tuple $envAll \"trusts\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: heat-trusts\n  labels:\n{{ tuple $envAll \"heat\" \"trusts\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": post-install,post-upgrade\n    \"helm.sh/hook-delete-policy\": before-hook-creation\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"heat\" \"trusts\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"heat-trusts\" \"containerNames\" (list \"heat-trusts\" \"init\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"heat_trusts\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"heat_trusts\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"trusts\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.heat.enabled }}\n{{ tuple $envAll \"heat\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"trusts\" $mounts_heat_trusts_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: heat-trusts\n{{ tuple $envAll \"ks_service\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.trusts | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"trusts\" \"container\" \"heat_trusts\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - bash\n            - /tmp/trusts.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: heat-bin\n              mountPath: /tmp/trusts.sh\n              subPath: trusts.sh\n              readOnly: true\n{{ dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.orchestration.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\"  | indent 12 }}\n{{ if $mounts_heat_trusts.volumeMounts }}{{ toYaml $mounts_heat_trusts.volumeMounts | indent 12 }}{{ end }}\n          env:\n{{- with $env := dict \"ksUserSecret\" $envAll.Values.secrets.identity.admin \"useCA\" (or .Values.manifests.certificates .Values.tls.identity) }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n            - name: SERVICE_OS_ROLES\n              value: {{ .Values.conf.heat.DEFAULT.trusts_delegated_roles }}\n            - name: SERVICE_OS_TRUSTEE\n              value: {{ .Values.endpoints.identity.auth.heat_trustee.username }}\n            - name: SERVICE_OS_TRUSTEE_DOMAIN\n              value: {{ .Values.endpoints.identity.auth.heat_trustee.user_domain_name }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: heat-bin\n          configMap:\n            name: heat-bin\n            defaultMode: 0555\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.orchestration.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_heat_trusts.volumes }}{{ toYaml $mounts_heat_trusts.volumes | indent 8 }}{{ end }}\n"
  },
  {
    "path": "heat/templates/network_policy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"heat\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "heat/templates/pdb-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_api }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: heat-api\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.api.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"heat\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/pdb-cfn.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_cfn }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: heat-cfn\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.cfn.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"heat\" \"cfn\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/pod-rally-test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if .Values.manifests.pod_rally_test }}\n{{- $envAll := . }}\n\n{{- $mounts_tests := .Values.pod.mounts.heat_tests.heat_tests }}\n{{- $mounts_tests_init := .Values.pod.mounts.heat_tests.init_container }}\n\n{{- $serviceAccountName := print $envAll.deployment_name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: {{ print $envAll.deployment_name \"-test\" }}\n  labels:\n{{ tuple $envAll \"heat\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.heat.enabled }}\n{{ tuple $envAll \"heat\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 2 }}\n{{ end }}\n  restartPolicy: Never\n{{ tuple \"heat_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 2 }}\n{{ tuple \"heat_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 2 }}\n  serviceAccountName: {{ $serviceAccountName }}\n  initContainers:\n{{ tuple $envAll \"tests\" $mounts_tests_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n    - name: {{ .deployment_name }}-test-ks-user\n{{ tuple $envAll \"ks_user\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.ks_user | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n      command:\n        - /tmp/ks-user.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: heat-bin\n          mountPath: /tmp/ks-user.sh\n          subPath: ks-user.sh\n          readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.orchestration.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\"  | indent 8 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_SERVICE_NAME\n          value: \"test\"\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.test }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_ROLE\n          value: {{ .Values.endpoints.identity.auth.test.role | quote }}\n  containers:\n    - name: {{ .deployment_name }}-test\n{{ tuple $envAll \"test\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.test }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: RALLY_ENV_NAME\n          value: {{.deployment_name}}\n      command:\n        - /tmp/rally-test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: heat-etc\n          mountPath: /etc/rally/rally_tests.yaml\n          subPath: rally_tests.yaml\n          readOnly: true\n        - name: heat-bin\n          mountPath: /tmp/rally-test.sh\n          subPath: rally-test.sh\n          readOnly: true\n        - name: rally-db\n          mountPath: /var/lib/rally\n        {{- range $key, $value := $envAll.Values.conf.rally_tests.templates }}\n        - name: heat-etc\n          mountPath: {{ $value.name }}\n          subPath: {{ printf \"test_template_%d\" $key }}\n          readOnly: true\n        {{- end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.orchestration.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\"  | indent 8 }}\n{{ if $mounts_tests.volumeMounts }}{{ toYaml $mounts_tests.volumeMounts | indent 8 }}{{ end }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: heat-etc\n      secret:\n        secretName: heat-etc\n        defaultMode: 0444\n    - name: heat-bin\n      configMap:\n        name: heat-bin\n        defaultMode: 0555\n    - name: rally-db\n      emptyDir: {}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.orchestration.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 4 }}\n{{ if $mounts_tests.volumes }}{{ toYaml $mounts_tests.volumes | indent 4 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"heat\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n{{- $connection := tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- if $envAll.Values.manifests.certificates }}\n  DB_CONNECTION: {{ (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | b64enc -}}\n{{- else }}\n  DB_CONNECTION: {{  $connection | b64enc -}}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{ include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"orchestration\" ) }}\n{{ include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendService\" \"cfn\" \"backendServiceType\" \"cloudformation\" ) }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"heat\" \"heat_trustee\" \"test\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $envAll.Values.secrets.identity.heat_stack_user }}\n  annotations:\n{{ tuple \"identity\" \"heat_stack_user\" $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  OS_AUTH_URL: {{ tuple \"identity\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | b64enc }}\n  OS_REGION_NAME: {{ .Values.endpoints.identity.auth.heat_stack_user.region_name | b64enc }}\n  OS_DOMAIN_NAME: {{ .Values.endpoints.identity.auth.heat_stack_user.domain_name | b64enc }}\n  OS_USERNAME: {{ .Values.endpoints.identity.auth.heat_stack_user.username | b64enc }}\n  OS_PASSWORD: {{ .Values.endpoints.identity.auth.heat_stack_user.password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- $rabbitmqProtocol := \"http\" }}\n{{- if $envAll.Values.manifests.certificates }}\n{{- $rabbitmqProtocol = \"https\" }}\n{{- end }}\n{{- range $key1, $userClass := tuple \"admin\" \"heat\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_messaging\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass $rabbitmqProtocol $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/service-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_api }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"orchestration\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: h-api\n      port: {{ tuple \"orchestration\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.api.node_port.enabled }}\n      nodePort: {{ .Values.network.api.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"heat\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.api.node_port.enabled }}\n  type: NodePort\n  {{ if .Values.network.api.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{ end }}\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/service-cfn.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_cfn }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"cloudformation\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: h-cfn\n      port: {{ tuple \"cloudformation\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.cfn.node_port.enabled }}\n      nodePort: {{ .Values.network.cfn.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"heat\" \"cfn\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.cfn.node_port.enabled }}\n  type: NodePort\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/service-ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_api .Values.network.api.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"orchestration\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "heat/templates/service-ingress-cfn.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_cfn .Values.network.cfn.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"cloudformation\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "heat/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for heat.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nrelease_group: null\n\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  cfn:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  engine:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  tags:\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    heat_db_sync: quay.io/airshipit/heat:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    heat_api: quay.io/airshipit/heat:2025.1-ubuntu_noble\n    heat_cfn: quay.io/airshipit/heat:2025.1-ubuntu_noble\n    heat_engine: quay.io/airshipit/heat:2025.1-ubuntu_noble\n    heat_engine_cleaner: quay.io/airshipit/heat:2025.1-ubuntu_noble\n    heat_purge_deleted: quay.io/airshipit/heat:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: docker.io/docker:17.07.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\njobs:\n  engine_cleaner:\n    cron: \"*/5 * * * *\"\n    starting_deadline: 600\n    history:\n      success: 3\n      failed: 1\n\n  purge_deleted:\n    cron: \"20 */24 * * *\"\n    purge_age: 60\n    history:\n      success: 3\n      failed: 1\n\nconf:\n  rally_tests:\n    run_tempest: false\n    tests:\n      HeatStacks.create_update_delete_stack:\n        - args:\n            template_path: /tmp/rally-jobs/random_strings.yaml\n            updated_template_path: /tmp/rally-jobs/updated_random_strings_replace.yaml\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      HeatStacks.create_check_delete_stack:\n        - args:\n            template_path: /tmp/rally-jobs/random_strings.yaml\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      HeatStacks.create_and_delete_stack:\n        - args:\n            template_path: /tmp/rally-jobs/resource_group_with_constraint.yaml\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      HeatStacks.create_and_list_stack:\n        - args:\n            template_path: /tmp/rally-jobs/default.yaml\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      HeatStacks.create_snapshot_restore_delete_stack:\n        - args:\n            template_path: /tmp/rally-jobs/random_strings.yaml\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      HeatStacks.create_stack_and_list_output:\n        - args:\n            template_path: /tmp/rally-jobs/resource_group_with_outputs.yaml\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      HeatStacks.create_stack_and_list_output_via_API:\n        - args:\n            template_path: /tmp/rally-jobs/resource_group_with_outputs.yaml\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n    templates:\n      - name: /tmp/rally-jobs/default.yaml\n        template: |\n          heat_template_version: 2014-10-16\n      - name: /tmp/rally-jobs/random_strings.yaml\n        template: |\n          heat_template_version: 2014-10-16\n          description: Test template for rally create-update-delete scenario\n          resources:\n            test_string_one:\n              type: OS::Heat::RandomString\n              properties:\n                length: 20\n            test_string_two:\n              type: OS::Heat::RandomString\n              properties:\n                length: 20\n      - name: /tmp/rally-jobs/resource_group_with_constraint.yaml\n        template: |\n          heat_template_version: 2013-05-23\n          description: Template for testing caching.\n          parameters:\n            count:\n              type: number\n              default: 40\n            delay:\n              type: number\n              default: 0.1\n          resources:\n            rg:\n              type: OS::Heat::ResourceGroup\n              properties:\n                count:\n                  get_param: count\n                resource_def:\n                    type: OS::Heat::TestResource\n                    properties:\n                      constraint_prop_secs:\n                        get_param: delay\n      - name: /tmp/rally-jobs/resource_group_with_outputs.yaml\n        template: |\n          heat_template_version: 2013-05-23\n          parameters:\n            attr_wait_secs:\n              type: number\n              default: 0.5\n          resources:\n            rg:\n              type: OS::Heat::ResourceGroup\n              properties:\n                count: 10\n                resource_def:\n                  type: OS::Heat::TestResource\n                  properties:\n                    attr_wait_secs:\n                      get_param: attr_wait_secs\n          outputs:\n            val1:\n              value:\n                get_attr:\n                  - rg\n                  - resource.0.output\n            val2:\n              value:\n                get_attr:\n                  - rg\n                  - resource.1.output\n            val3:\n              value:\n                get_attr:\n                  - rg\n                  - resource.2.output\n            val4:\n              value:\n                get_attr:\n                  - rg\n                  - resource.3.output\n            val5:\n              value:\n                get_attr:\n                  - rg\n                  - resource.4.output\n            val6:\n              value:\n                get_attr:\n                  - rg\n                  - resource.5.output\n            val7:\n              value:\n                get_attr:\n                  - rg\n                  - resource.6.output\n            val8:\n              value:\n                get_attr:\n                  - rg\n                  - resource.7.output\n            val9:\n              value:\n                get_attr:\n                  - rg\n                  - resource.8.output\n            val10:\n              value:\n                get_attr:\n                  - rg\n                  - resource.9.output\n      - name: /tmp/rally-jobs/updated_random_strings_replace.yaml\n        template: |\n          heat_template_version: 2014-10-16\n          description: |\n            Test template for create-update-delete-stack scenario in rally.\n            The template deletes one resource from the stack defined by\n            random-strings.yaml.template and re-creates it with the updated parameters\n            (so-called update-replace). That happens because some parameters cannot be\n            changed without resource re-creation. The template allows to measure performance\n            of update-replace operation.\n          resources:\n            test_string_one:\n              type: OS::Heat::RandomString\n              properties:\n                length: 20\n            test_string_two:\n              type: OS::Heat::RandomString\n              properties:\n                length: 40\n  paste:\n    pipeline:heat-api:\n      pipeline: cors request_id faultwrap http_proxy_to_wsgi versionnegotiation osprofiler authurl authtoken audit context apiv1app\n    pipeline:heat-api-standalone:\n      pipeline: cors request_id faultwrap http_proxy_to_wsgi versionnegotiation authurl authpassword context apiv1app\n    pipeline:heat-api-custombackend:\n      pipeline: cors request_id faultwrap versionnegotiation context custombackendauth apiv1app\n    pipeline:heat-api-cfn:\n      pipeline: cors http_proxy_to_wsgi cfnversionnegotiation osprofiler ec2authtoken authtoken audit context apicfnv1app\n    pipeline:heat-api-cfn-standalone:\n      pipeline: cors http_proxy_to_wsgi cfnversionnegotiation ec2authtoken context apicfnv1app\n    app:apiv1app:\n      paste.app_factory: heat.common.wsgi:app_factory\n      heat.app_factory: heat.api.openstack.v1:API\n    app:apicfnv1app:\n      paste.app_factory: heat.common.wsgi:app_factory\n      heat.app_factory: heat.api.cfn.v1:API\n    filter:versionnegotiation:\n      paste.filter_factory: heat.common.wsgi:filter_factory\n      heat.filter_factory: heat.api.openstack:version_negotiation_filter\n    filter:cors:\n      paste.filter_factory: oslo_middleware.cors:filter_factory\n      oslo_config_project: heat\n    filter:faultwrap:\n      paste.filter_factory: heat.common.wsgi:filter_factory\n      heat.filter_factory: heat.api.openstack:faultwrap_filter\n    filter:cfnversionnegotiation:\n      paste.filter_factory: heat.common.wsgi:filter_factory\n      heat.filter_factory: heat.api.cfn:version_negotiation_filter\n    filter:context:\n      paste.filter_factory: heat.common.context:ContextMiddleware_filter_factory\n    filter:ec2authtoken:\n      paste.filter_factory: heat.api.aws.ec2token:EC2Token_filter_factory\n    filter:http_proxy_to_wsgi:\n      paste.filter_factory: oslo_middleware:HTTPProxyToWSGI.factory\n    filter:authurl:\n      paste.filter_factory: heat.common.auth_url:filter_factory\n    filter:authtoken:\n      paste.filter_factory: keystonemiddleware.auth_token:filter_factory\n    filter:authpassword:\n      paste.filter_factory: heat.common.auth_password:filter_factory\n    filter:custombackendauth:\n      paste.filter_factory: heat.common.custom_backend_auth:filter_factory\n    filter:audit:\n      paste.filter_factory: keystonemiddleware.audit:filter_factory\n      audit_map_file: /etc/heat/api_audit_map.conf\n    filter:request_id:\n      paste.filter_factory: oslo_middleware.request_id:RequestId.factory\n    filter:osprofiler:\n      paste.filter_factory: osprofiler.web:WsgiMiddleware.factory\n  policy: {}\n  heat:\n    DEFAULT:\n      log_config_append: /etc/heat/logging.conf\n      num_engine_workers: 1\n      trusts_delegated_roles: \"\"\n      host: heat-engine\n    keystone_authtoken:\n      auth_type: password\n      auth_version: v3\n      memcache_security_strategy: ENCRYPT\n      service_type: orchestration\n    database:\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    trustee:\n      auth_type: password\n      auth_version: v3\n    heat_api:\n      # NOTE(portdirect): the bind port should not be defined, and is manipulated\n      # via the endpoints section.\n      bind_port: null\n      workers: 1\n    heat_api_cfn:\n      # NOTE(portdirect): the bind port should not be defined, and is manipulated\n      # via the endpoints section.\n      bind_port: null\n      workers: 1\n    paste_deploy:\n      api_paste_config: /etc/heat/api-paste.ini\n    clients:\n      endpoint_type: internalURL\n    clients_heat:\n      endpoint_type: publicURL\n    clients_keystone:\n      endpoint_type: internalURL\n    oslo_messaging_notifications:\n      driver: messagingv2\n    oslo_middleware:\n      enable_proxy_headers_parsing: true\n    oslo_messaging_rabbit:\n      rabbit_ha_queues: True\n    oslo_policy:\n      policy_file: /etc/heat/policy.yaml\n    oslo_concurrency:\n      lock_path: /var/lock\n  api_audit_map:\n    DEFAULT:\n      target_endpoint_type: None\n    path_keywords:\n      stacks: stack\n      resources: resource\n      preview: None\n      detail: None\n      abandon: None\n      snapshots: snapshot\n      restore: None\n      outputs: output\n      metadata: server\n      signal: None\n      events: event\n      template: None\n      template_versions: template_version\n      functions: None\n      validate: None\n      resource_types: resource_type\n      build_info: None\n      actions: None\n      software_configs: software_config\n      software_deployments: software_deployment\n      services: None\n    service_endpoints:\n      orchestration:service/orchestration\n  logging:\n    loggers:\n      keys:\n        - root\n        - heat\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: WARNING\n      handlers: 'null'\n    logger_heat:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: heat\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    formatter_default:\n      format: \"%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n\n  rabbitmq:\n    # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones\n    policies:\n      - vhost: \"heat\"\n        name: \"ha_ttl_heat\"\n        definition:\n          # mirror messges to other nodes in rmq cluster\n          ha-mode: \"all\"\n          ha-sync-mode: \"automatic\"\n          # 70s\n          message-ttl: 70000\n        priority: 0\n        apply-to: all\n        pattern: '^(?!(amq\\.|reply_)).*'\n  heat_api_uwsgi:\n    uwsgi:\n      add-header: \"Connection: close\"\n      buffer-size: 65535\n      die-on-term: true\n      enable-threads: true\n      exit-on-reload: false\n      hook-master-start: unix_signal:15 gracefully_kill_them_all\n      lazy-apps: true\n      log-x-forwarded-for: true\n      master: true\n      procname-prefix-spaced: \"heat-api:\"\n      route-user-agent: '^kube-probe.* donotlog:'\n      thunder-lock: true\n      worker-reload-mercy: 80\n      wsgi-file: /var/lib/openstack/bin/heat-wsgi-api\n      stats: 0.0.0.0:1717\n      stats-http: true\n  heat_api_cfn_uwsgi:\n    uwsgi:\n      add-header: \"Connection: close\"\n      buffer-size: 65535\n      die-on-term: true\n      enable-threads: true\n      exit-on-reload: false\n      hook-master-start: unix_signal:15 gracefully_kill_them_all\n      lazy-apps: true\n      log-x-forwarded-for: true\n      master: true\n      procname-prefix-spaced: \"heat-api-cfn:\"\n      route-user-agent: '^kube-probe.* donotlog:'\n      thunder-lock: true\n      worker-reload-mercy: 80\n      wsgi-file: /var/lib/openstack/bin/heat-wsgi-api-cfn\n      stats: 0.0.0.0:1717\n      stats-http: true\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30004\n  cfn:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    node_port:\n      enabled: false\n      port: 30800\n\nbootstrap:\n  enabled: true\n  ks_user: admin\n  script: |\n    #NOTE(portdirect): The Orchestration service automatically assigns the\n    # 'heat_stack_user' role to users that it creates during stack deployment.\n    # By default, this role restricts API operations. To avoid conflicts, do\n    # not add this role to actual users.\n    openstack role create --or-show heat_stack_user\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - heat-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    api:\n      jobs:\n        - heat-db-sync\n        - heat-rabbit-init\n        - heat-ks-user\n        - heat-domain-ks-user\n        - heat-ks-endpoints\n        - heat-bootstrap\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: identity\n    cfn:\n      jobs:\n        - heat-db-sync\n        - heat-rabbit-init\n        - heat-ks-user\n        - heat-domain-ks-user\n        - heat-ks-endpoints\n        - heat-bootstrap\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: identity\n    db_drop:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - heat-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    bootstrap:\n      services:\n        - endpoint: internal\n          service: identity\n    engine:\n      jobs:\n        - heat-db-sync\n        - heat-rabbit-init\n        - heat-ks-user\n        - heat-domain-ks-user\n        - heat-ks-endpoints\n        - heat-bootstrap\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: identity\n    engine_cleaner:\n      jobs:\n        - heat-db-sync\n        - heat-ks-user\n        - heat-domain-ks-user\n        - heat-ks-endpoints\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: identity\n    purge_deleted:\n      jobs:\n        - heat-db-sync\n        - heat-ks-user\n        - heat-domain-ks-user\n        - heat-ks-endpoints\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: identity\n    ks_endpoints:\n      jobs:\n        - heat-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    rabbit_init:\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n    trusts:\n      jobs:\n        - heat-ks-user\n        - heat-domain-ks-user\n      services:\n        - endpoint: internal\n          service: identity\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    tests:\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: orchestration\n\n# Names of secrets used by bootstrap and environmental checks\nsecrets:\n  identity:\n    admin: heat-keystone-admin\n    heat: heat-keystone-user\n    heat_trustee: heat-keystone-trustee\n    heat_stack_user: heat-keystone-stack-user\n    test: heat-keystone-test\n  oslo_db:\n    admin: heat-db-admin\n    heat: heat-db-user\n  oslo_messaging:\n    admin: heat-rabbitmq-admin\n    heat: heat-rabbitmq-user\n  tls:\n    orchestration:\n      api:\n        public: heat-tls-public\n        internal: heat-tls-api\n    cloudformation:\n      cfn:\n        public: cloudformation-tls-public\n        internal: heat-tls-cfn\n  oci_image_registry:\n    heat: heat-oci-image-registry\n\n# typically overridden by environmental\n# values, but should include all endpoints\n# required by this chart\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      heat:\n        username: heat\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      heat:\n        role: admin\n        region_name: RegionOne\n        username: heat\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      heat_trustee:\n        role: admin\n        region_name: RegionOne\n        username: heat-trust\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      heat_stack_user:\n        role: admin\n        region_name: RegionOne\n        username: heat-domain\n        password: password\n        domain_name: heat\n      test:\n        role: admin\n        region_name: RegionOne\n        username: heat-test\n        password: password\n        project_name: test\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 80\n        internal: 5000\n  orchestration:\n    name: heat\n    hosts:\n      default: heat-api\n      public: heat\n    host_fqdn_override:\n      default: null\n      # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public\n      # endpoints using the following format:\n      # public:\n      #   host: null\n      #   tls:\n      #     crt: null\n      #     key: null\n    path:\n      default: '/v1/%(project_id)s'\n    scheme:\n      default: 'http'\n      service: 'http'\n    port:\n      api:\n        default: 8004\n        public: 80\n        service: 8004\n  cloudformation:\n    name: heat-cfn\n    hosts:\n      default: heat-cfn\n      public: cloudformation\n    host_fqdn_override:\n      default: null\n      # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public\n      # endpoints using the following format:\n      # public:\n      #   host: null\n      #   tls:\n      #     crt: null\n      #     key: null\n    path:\n      default: /v1\n    scheme:\n      default: 'http'\n      service: 'http'\n    port:\n      api:\n        default: 8000\n        public: 80\n        service: 8000\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n        secret:\n          tls:\n            internal: mariadb-tls-direct\n      heat:\n        username: heat\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /heat\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_cache:\n    auth:\n      # NOTE(portdirect): this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  oslo_messaging:\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n        secret:\n          tls:\n            internal: rabbitmq-tls-direct\n      heat:\n        username: heat\n        password: password\n    statefulset:\n      replicas: 2\n      name: rabbitmq-rabbitmq\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /heat\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n  fluentd:\n    namespace: null\n    name: fluentd\n    hosts:\n      default: fluentd-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: 'http'\n    port:\n      service:\n        default: 24224\n      metrics:\n        default: 24220\n  # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress\n  # They are using to enable the Egress K8s network policy.\n  kube_dns:\n    namespace: kube-system\n    name: kubernetes-dns\n    hosts:\n      default: kube-dns\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: http\n    port:\n      dns:\n        default: 53\n        protocol: UDP\n  ingress:\n    namespace: null\n    name: ingress\n    hosts:\n      default: ingress\n    port:\n      ingress:\n        default: 80\n\npod:\n  security_context:\n    heat:\n      pod:\n        runAsUser: 42424\n      container:\n        heat_api:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        heat_cfn:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        heat_engine:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    trusts:\n      pod:\n        runAsUser: 42424\n      container:\n        heat_trusts:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    ks_user:\n      pod:\n        runAsUser: 42424\n      container:\n        heat_ks_domain_user:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    engine_cleaner:\n      pod:\n        runAsUser: 42424\n      container:\n        heat_engine_cleaner:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    heat:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n        effect: NoSchedule\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n        effect: NoSchedule\n  mounts:\n    heat_api:\n      init_container: null\n      heat_api:\n        volumeMounts:\n        volumes:\n    heat_cfn:\n      init_container: null\n      heat_cfn:\n        volumeMounts:\n        volumes:\n    heat_engine:\n      init_container: null\n      heat_engine:\n        volumeMounts:\n        volumes:\n    heat_bootstrap:\n      init_container: null\n      heat_bootstrap:\n        volumeMounts:\n        volumes:\n    heat_trusts:\n      init_container: null\n      heat_trusts:\n        volumeMounts:\n        volumes:\n    heat_engine_cleaner:\n      init_container: null\n      heat_engine_cleaner:\n        volumeMounts:\n        volumes:\n    heat_purge_deleted:\n      init_container: null\n      heat_purge_deleted:\n        volumeMounts:\n        volumes:\n    heat_tests:\n      init_container: null\n      heat_tests:\n        volumeMounts:\n        volumes:\n    heat_db_sync:\n      heat_db_sync:\n        volumeMounts:\n        volumes:\n  # -- This allows users to add Kubernetes Projected Volumes to be mounted at /etc/heat/heat.conf.d/\n  ## This is a list of projected volume source objects for each deployment/statefulset/daemonset/cronjob\n  ## https://kubernetes.io/docs/concepts/storage/projected-volumes/\n  etcSources:\n    heat_api: []\n    heat_cfn: []\n    heat_engine: []\n    heat_engine_cleaner: []\n    heat_purge_deleted: []\n    heat_db_sync: []\n  replicas:\n    api: 1\n    cfn: 1\n    engine: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    disruption_budget:\n      api:\n        min_available: 0\n      cfn:\n        min_available: 0\n    termination_grace_period:\n      api:\n        timeout: 30\n      cfn:\n        timeout: 30\n      engine:\n        timeout: 30\n  resources:\n    enabled: false\n    api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    cfn:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    engine:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rabbit_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      trusts:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      engine_cleaner:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      purge_deleted:\n        requests:\n          memory: \"124Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n  probes:\n    api:\n      heat_api:\n        liveness:\n          port: 1717\n    cfn:\n      heat_cfn:\n        liveness:\n          port: 1717\n\nnetwork_policy:\n  heat:\n    ingress:\n      - {}\n    egress:\n      - {}\n\ntls:\n  identity: false\n  oslo_messaging: false\n  oslo_db: false\n\nmanifests:\n  certificates: false\n  configmap_bin: true\n  configmap_etc: true\n  cron_job_engine_cleaner: true\n  cron_job_purge_deleted: true\n  deployment_api: true\n  deployment_cfn: true\n  deployment_engine: true\n  ingress_api: true\n  ingress_cfn: true\n  job_bootstrap: true\n  job_db_init: true\n  job_db_sync: true\n  job_db_drop: false\n  job_image_repo_sync: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user_domain: true\n  job_ks_user_trustee: true\n  job_ks_user: true\n  job_rabbit_init: true\n  pdb_api: true\n  pdb_cfn: true\n  pod_rally_test: true\n  network_policy: false\n  secret_db: true\n  secret_ingress_tls: true\n  secret_keystone: true\n  secret_rabbitmq: true\n  secret_registry: true\n  service_api: true\n  service_cfn: true\n  service_ingress_api: true\n  service_ingress_cfn: true\n  statefulset_engine: false\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "helm-toolkit/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Helm-Toolkit\nname: helm-toolkit\nversion: 2025.2.0\nhome: https://docs.openstack.org/openstack-helm\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/OpenStack-Helm/OpenStack_Project_OpenStackHelm_vertical.png\nsources:\n  - https://opendev.org/openstack/openstack-helm\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies: []\n...\n"
  },
  {
    "path": "helm-toolkit/templates/endpoints/_authenticated_endpoint_uri_lookup.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Resolves database, or basic auth, style endpoints\nvalues: |\n  endpoints:\n    cluster_domain_suffix: cluster.local\n    oslo_db:\n      auth:\n        admin:\n          username: root\n          password: password\n        service_username:\n          username: username\n          password: password\n      hosts:\n        default: mariadb\n      host_fqdn_override:\n        default: null\n      path: /dbname\n      scheme: mysql+pymysql\n      port:\n        mysql:\n          default: 3306\nusage: |\n  {{ tuple \"oslo_db\" \"internal\" \"service_username\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\nreturn: |\n  mysql+pymysql://serviceuser:password@mariadb.default.svc.cluster.local:3306/dbname\n*/}}\n\n{{- define \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" -}}\n{{- $type := index . 0 -}}\n{{- $endpoint := index . 1 -}}\n{{- $userclass := index . 2 -}}\n{{- $port := index . 3 -}}\n{{- $context := index . 4 -}}\n{{- $endpointScheme := tuple $type $endpoint $port $context | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" }}\n{{- $userMap := index $context.Values.endpoints ( $type | replace \"-\" \"_\" ) \"auth\" $userclass }}\n{{- $endpointUser := index $userMap \"username\" }}\n{{- $endpointPass := index $userMap \"password\" | urlquery }}\n{{- $endpointHost := tuple $type $endpoint $context | include \"helm-toolkit.endpoints.endpoint_host_lookup\" }}\n{{- $endpointPort := tuple $type $endpoint $port $context | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $endpointPath := tuple $type $endpoint $port $context | include \"helm-toolkit.endpoints.keystone_endpoint_path_lookup\" }}\n{{- printf \"%s://%s:%s@%s:%s%s\" $endpointScheme $endpointUser $endpointPass $endpointHost $endpointPort $endpointPath -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/endpoints/_authenticated_transport_endpoint_uri_lookup.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Resolves endpoint string suitible for use with oslo.messaging transport url\n  See: https://docs.openstack.org/oslo.messaging/latest/reference/transport.html#oslo_messaging.TransportURL\nexamples:\n  - values: |\n      endpoints:\n        cluster_domain_suffix: cluster.local\n        oslo_messaging:\n          auth:\n            cinder:\n              username: cinder\n              password: password\n          statefulset:\n            replicas: 2\n            name: rabbitmq-rabbitmq\n          hosts:\n            default: rabbitmq\n          host_fqdn_override:\n            default: null\n          path: /cinder\n          scheme: rabbit\n          port:\n            amqp:\n              default: 5672\n    usage: |\n      {{ tuple \"oslo_messaging\" \"internal\" \"cinder\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" }}\n    return: |\n      rabbit://cinder:password@rabbitmq-rabbitmq-0.rabbitmq.default.svc.cluster.local:5672,cinder:password@rabbitmq-rabbitmq-1.rabbitmq.default.svc.cluster.local:5672/cinder\n  - values: |\n      endpoints:\n        cluster_domain_suffix: cluster.local\n        oslo_messaging:\n          auth:\n            cinder:\n              username: cinder\n              password: password\n          statefulset: null\n          hosts:\n            default: rabbitmq\n          host_fqdn_override:\n            default: null\n          path: /cinder\n          scheme: rabbit\n          port:\n            amqp:\n              default: 5672\n    usage: |\n      {{ tuple \"oslo_messaging\" \"internal\" \"cinder\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" }}\n    return: |\n      rabbit://cinder:password@rabbitmq.default.svc.cluster.local:5672/cinder\n  - values: |\n      endpoints:\n        cluster_domain_suffix: cluster.local\n        oslo_messaging:\n          auth:\n            cinder:\n              username: cinder\n              password: password\n          statefulset:\n            replicas: 2\n            name: rabbitmq-rabbitmq\n          hosts:\n            default: rabbitmq\n          host_fqdn_override:\n            default: rabbitmq.openstackhelm.org\n          path: /cinder\n          scheme: rabbit\n          port:\n            amqp:\n              default: 5672\n    usage: |\n      {{ tuple \"oslo_messaging\" \"internal\" \"cinder\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" }}\n    return: |\n      rabbit://cinder:password@rabbitmq.openstackhelm.org:5672/cinder\n*/}}\n\n{{- define \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" -}}\n{{-   $type := index . 0 -}}\n{{-   $endpoint := index . 1 -}}\n{{-   $userclass := index . 2 -}}\n{{-   $port := index . 3 -}}\n{{-   $context := index . 4 -}}\n{{-   $endpointScheme := tuple $type $endpoint $port $context | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" }}\n{{-   $userMap := index $context.Values.endpoints ( $type | replace \"-\" \"_\" ) \"auth\" $userclass }}\n{{-   $ssMap := index $context.Values.endpoints ( $type | replace \"-\" \"_\" ) \"statefulset\" | default false}}\n{{-   $hostFqdnOverride := index $context.Values.endpoints ( $type | replace \"-\" \"_\" ) \"host_fqdn_override\" }}\n{{-   $endpointUser := index $userMap \"username\" }}\n{{-   $endpointPass := index $userMap \"password\" | urlquery }}\n{{-   $endpointHostSuffix := tuple $type $endpoint $context | include \"helm-toolkit.endpoints.endpoint_host_lookup\" }}\n{{-   $endpointPort := tuple $type $endpoint $port $context | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{-   $local := dict \"endpointCredsAndHosts\" list -}}\n{{-   if not (or (index $hostFqdnOverride $endpoint | default ( index $hostFqdnOverride \"default\" ) ) ( not $ssMap ) ) }}\n{{-     $endpointHostPrefix := $ssMap.name }}\n{{-     range $podInt := until ( atoi (print $ssMap.replicas ) ) }}\n{{-       $endpointCredAndHost := printf \"%s:%s@%s-%d.%s:%s\" $endpointUser $endpointPass $endpointHostPrefix $podInt $endpointHostSuffix $endpointPort }}\n{{-       $_ := set $local \"endpointCredsAndHosts\" ( append $local.endpointCredsAndHosts $endpointCredAndHost ) }}\n{{-     end }}\n{{-   else }}\n{{-     $endpointHost := tuple $type $endpoint $context | include \"helm-toolkit.endpoints.endpoint_host_lookup\" }}\n{{-     $endpointCredAndHost := printf \"%s:%s@%s:%s\" $endpointUser $endpointPass $endpointHost $endpointPort }}\n{{-     $_ := set $local \"endpointCredsAndHosts\" ( append $local.endpointCredsAndHosts $endpointCredAndHost ) }}\n{{-   end }}\n{{-   $endpointCredsAndHosts := include \"helm-toolkit.utils.joinListWithComma\" $local.endpointCredsAndHosts }}\n{{-   $endpointPath := tuple $type $endpoint $port $context | include \"helm-toolkit.endpoints.keystone_endpoint_path_lookup\" }}\n{{-   printf \"%s://%s%s\" $endpointScheme $endpointCredsAndHosts $endpointPath }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/endpoints/_endpoint_host_lookup.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Resolves either the fully qualified hostname, of if defined in the host field\n  IPv4 for an endpoint.\nexamples:\n  - values: |\n      endpoints:\n        cluster_domain_suffix: cluster.local\n        oslo_db:\n          hosts:\n            default: mariadb\n          host_fqdn_override:\n            default: null\n    usage: |\n      {{ tuple \"oslo_db\" \"internal\" . | include \"helm-toolkit.endpoints.endpoint_host_lookup\" }}\n    return: |\n      mariadb.default.svc.cluster.local\n  - values: |\n      endpoints:\n        cluster_domain_suffix: cluster.local\n        oslo_db:\n          hosts:\n            default:\n             host: mariadb\n          host_fqdn_override:\n            default: null\n    usage: |\n      {{ tuple \"oslo_db\" \"internal\" . | include \"helm-toolkit.endpoints.endpoint_host_lookup\" }}\n    return: |\n      mariadb.default.svc.cluster.local\n  - values: |\n      endpoints:\n        cluster_domain_suffix: cluster.local\n        oslo_db:\n          hosts:\n            default: 127.0.0.1\n          host_fqdn_override:\n            default: null\n    usage: |\n      {{ tuple \"oslo_db\" \"internal\" . | include \"helm-toolkit.endpoints.endpoint_host_lookup\" }}\n    return: |\n      127.0.0.1\n  - values: |\n      endpoints:\n        cluster_domain_suffix: cluster.local\n        oslo_db:\n          hosts:\n            default:\n             host: 127.0.0.1\n          host_fqdn_override:\n            default: null\n    usage: |\n      {{ tuple \"oslo_db\" \"internal\" . | include \"helm-toolkit.endpoints.endpoint_host_lookup\" }}\n    return: |\n      127.0.0.1\n*/}}\n\n{{- define \"helm-toolkit.endpoints.endpoint_host_lookup\" -}}\n{{- $type := index . 0 -}}\n{{- $endpoint := index . 1 -}}\n{{- $context := index . 2 -}}\n{{- $endpointMap := index $context.Values.endpoints ( $type | replace \"-\" \"_\" ) }}\n{{- $endpointScheme := $endpointMap.scheme }}\n{{- $_ := set $context.Values \"__endpointHost\" ( index $endpointMap.hosts $endpoint | default $endpointMap.hosts.default ) }}\n{{- if kindIs \"map\" $context.Values.__endpointHost }}\n{{- $_ := set $context.Values \"__endpointHost\" ( index $context.Values.__endpointHost \"host\" ) }}\n{{- end }}\n{{- $endpointHost := $context.Values.__endpointHost }}\n{{- if regexMatch \"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\\\\.[0-9]+\" $endpointHost }}\n{{- $endpointHostname := printf \"%s\" $endpointHost }}\n{{- printf \"%s\" $endpointHostname -}}\n{{- else }}\n{{- $endpointHostname := tuple $type $endpoint $context | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\n{{- printf \"%s\" $endpointHostname -}}\n{{- end }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/endpoints/_endpoint_port_lookup.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Resolves the port for an endpoint\nvalues: |\n  endpoints:\n    cluster_domain_suffix: cluster.local\n    oslo_db:\n      port:\n        mysql:\n          default: 3306\nusage: |\n  {{ tuple \"oslo_db\" \"internal\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\nreturn: |\n  3306\n*/}}\n\n{{- define \"helm-toolkit.endpoints.endpoint_port_lookup\" -}}\n{{- $type := index . 0 -}}\n{{- $endpoint := index . 1 -}}\n{{- $port := index . 2 -}}\n{{- $context := index . 3 -}}\n{{- $typeYamlSafe := $type | replace \"-\" \"_\" }}\n{{- $endpointMap := index $context.Values.endpoints $typeYamlSafe }}\n{{- $endpointPortMAP := index $endpointMap.port $port }}\n{{- $endpointPort := index $endpointPortMAP $endpoint | default ( index $endpointPortMAP \"default\" ) }}\n{{- printf \"%1.f\" $endpointPort -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/endpoints/_endpoint_token_lookup.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Gets the token for an endpoint\nvalues: |\n  endpoints:\n    keystone:\n      auth:\n        admin:\n          token: zh78JzXgw6YUKy2e\nusage: |\n  {{ tuple \"keystone\" \"admin\" . | include \"helm-toolkit.endpoints.endpoint_token_lookup\" }}\nreturn: |\n  zh78JzXgw6YUKy2e\n*/}}\n\n{{- define \"helm-toolkit.endpoints.endpoint_token_lookup\" -}}\n{{- $type := index . 0 -}}\n{{- $userName := index . 1 -}}\n{{- $context := index . 2 -}}\n{{- $serviceToken := index $context.Values.endpoints ( $type | replace \"-\" \"_\" ) \"auth\" $userName \"token\" }}\n{{- printf \"%s\" $serviceToken -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/endpoints/_host_and_port_endpoint_uri_lookup.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Resolves 'hostname:port' for an endpoint, or several hostname:port pairs for statefulset e.g\n  'hostname1:port1,hostname2:port2,hostname3:port3',\nexamples:\n  - values: |\n      endpoints:\n        cluster_domain_suffix: cluster.local\n        oslo_db:\n          hosts:\n            default: mariadb\n          host_fqdn_override:\n            default: null\n          port:\n            mysql:\n              default: 3306\n    usage: |\n      {{ tuple \"oslo_db\" \"internal\" \"mysql\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n    return: |\n      mariadb.default.svc.cluster.local:3306\n  - values: |\n      endpoints:\n        cluster_domain_suffix: cluster.local\n        oslo_db:\n          hosts:\n            default: 127.0.0.1\n          host_fqdn_override:\n            default: null\n          port:\n            mysql:\n              default: 3306\n    usage: |\n      {{ tuple \"oslo_db\" \"internal\" \"mysql\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n    return: |\n      127.0.0.1:3306\n  - values: |\n      endpoints:\n        oslo_cache:\n          hosts:\n            default: memcached\n          host_fqdn_override:\n            default: null\n          statefulset:\n            name: openstack-memcached-memcached\n            replicas: 3\n          port:\n            memcache:\n              default: 11211\n    usage: |\n      {{ tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n    return: |\n      openstack-memcached-memcached-0:11211,openstack-memcached-memcached-1:11211,openstack-memcached-memcached-2:11211\n*/}}\n\n{{- define \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" -}}\n{{- $type := index . 0 -}}\n{{- $endpoint := index . 1 -}}\n{{- $port := index . 2 -}}\n{{- $context := index . 3 -}}\n{{- $ssMap := index $context.Values.endpoints ( $type | replace \"-\" \"_\" ) \"statefulset\" | default false -}}\n{{- $local := dict \"endpointHosts\" list -}}\n{{- $endpointPort := tuple $type $endpoint $port $context | include \"helm-toolkit.endpoints.endpoint_port_lookup\" -}}\n{{- if $ssMap -}}\n{{-   $endpointHostPrefix := $ssMap.name -}}\n{{-   $endpointHostSuffix := tuple $type $endpoint $context | include \"helm-toolkit.endpoints.endpoint_host_lookup\" }}\n{{-   range $podInt := until ( atoi (print $ssMap.replicas ) ) -}}\n{{-     $endpointHostname := printf \"%s-%d.%s:%s\" $endpointHostPrefix $podInt $endpointHostSuffix $endpointPort -}}\n{{-     $_ := set $local \"endpointHosts\" ( append $local.endpointHosts $endpointHostname ) -}}\n{{-   end -}}\n{{- else -}}\n{{-   $endpointHostname := tuple $type $endpoint $context | include \"helm-toolkit.endpoints.endpoint_host_lookup\" -}}\n{{-   $_ := set $local \"endpointHosts\" ( append $local.endpointHosts (printf \"%s:%s\" $endpointHostname $endpointPort) ) -}}\n{{- end -}}\n{{ include \"helm-toolkit.utils.joinListWithComma\" $local.endpointHosts }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/endpoints/_hostname_fqdn_endpoint_lookup.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Resolves the fully qualified hostname for an endpoint\nexamples:\n  - values: |\n      endpoints:\n        cluster_domain_suffix: cluster.local\n        oslo_db:\n          hosts:\n            default: mariadb\n          host_fqdn_override:\n            default: null\n    usage: |\n      {{ tuple \"oslo_db\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\n    return: |\n      mariadb.default.svc.cluster.local\n  - values: |\n      endpoints:\n        cluster_domain_suffix: cluster.local\n        oslo_db:\n          hosts:\n            default: mariadb\n          host_fqdn_override:\n            default: mariadb.openstackhelm.openstack.org\n    usage: |\n      {{ tuple \"oslo_db\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\n    return: |\n      mariadb.openstackhelm.openstack.org\n  - values: |\n      endpoints:\n        cluster_domain_suffix: cluster.local\n        oslo_db:\n          hosts:\n            default: mariadb\n          host_fqdn_override:\n            default:\n              host: mariadb.openstackhelm.openstack.org\n    usage: |\n      {{ tuple \"oslo_db\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\n    return: |\n      mariadb.openstackhelm.openstack.org\n*/}}\n\n{{- define \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" -}}\n{{- $type := index . 0 -}}\n{{- $endpoint := index . 1 -}}\n{{- $context := index . 2 -}}\n{{- $endpointMap := index $context.Values.endpoints ( $type | replace \"-\" \"_\" ) }}\n{{- $endpointHostNamespaced := tuple $type $endpoint $context | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\n{{- $endpointClusterHostname := printf \"%s.svc.%s\" $endpointHostNamespaced $context.Values.endpoints.cluster_domain_suffix }}\n{{- $_ := set $context.Values \"__FQDNendpointHostDefault\" ( index $endpointMap.host_fqdn_override \"default\" | default \"\" ) }}\n{{- if kindIs \"map\" $context.Values.__FQDNendpointHostDefault }}\n{{- $_ := set $context.Values \"__FQDNendpointHostDefault\" ( index $context.Values.__FQDNendpointHostDefault \"host\" ) }}\n{{- end }}\n{{- if kindIs \"map\" (index $endpointMap.host_fqdn_override $endpoint) }}\n{{- $endpointHostname := index $endpointMap.host_fqdn_override $endpoint \"host\" | default $context.Values.__FQDNendpointHostDefault | default $endpointClusterHostname }}\n{{- printf \"%s\" $endpointHostname -}}\n{{- else }}\n{{- $endpointHostname := index $endpointMap.host_fqdn_override $endpoint | default $context.Values.__FQDNendpointHostDefault | default $endpointClusterHostname }}\n{{- printf \"%s\" $endpointHostname -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/endpoints/_hostname_namespaced_endpoint_lookup.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Resolves the namespace scoped hostname for an endpoint\nvalues: |\n  endpoints:\n    oslo_db:\n      hosts:\n        default: mariadb\n      host_fqdn_override:\n        default: null\nusage: |\n  {{ tuple \"oslo_db\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\nreturn: |\n  mariadb.default\n*/}}\n\n{{- define \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" -}}\n{{- $type := index . 0 -}}\n{{- $endpoint := index . 1 -}}\n{{- $context := index . 2 -}}\n{{- $endpointMap := index $context.Values.endpoints ( $type | replace \"-\" \"_\" ) }}\n{{- $namespace := $endpointMap.namespace | default $context.Release.Namespace }}\n{{- $endpointHost := tuple $type $endpoint $context | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n{{- $endpointClusterHostname := printf \"%s.%s\" $endpointHost $namespace }}\n{{- printf \"%s\" $endpointClusterHostname -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/endpoints/_hostname_namespaced_endpoint_namespace_lookup.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Resolves the namespace scoped hostname for an endpoint\nvalues: |\n  endpoints:\n    oslo_db:\n      hosts:\n        default: mariadb\n      host_fqdn_override:\n        default: null\nusage: |\n  {{ tuple \"oslo_db\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_namespace_lookup\" }}\nreturn: |\n  default\n*/}}\n\n{{- define \"helm-toolkit.endpoints.hostname_namespaced_endpoint_namespace_lookup\" -}}\n{{- $type := index . 0 -}}\n{{- $endpoint := index . 1 -}}\n{{- $context := index . 2 -}}\n{{- $endpointMap := index $context.Values.endpoints ( $type | replace \"-\" \"_\" ) }}\n{{- $namespace := $endpointMap.namespace | default $context.Release.Namespace }}\n{{- printf \"%s\" $namespace -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/endpoints/_hostname_short_endpoint_lookup.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Resolves the short hostname for an endpoint\nexamples:\n  - values: |\n      endpoints:\n        oslo_db:\n          hosts:\n            default: mariadb\n          host_fqdn_override:\n            default: null\n    usage: |\n      {{ tuple \"oslo_db\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n    return: |\n      mariadb\n  - values: |\n      endpoints:\n        oslo_db:\n          hosts:\n            default:\n              host: mariadb\n          host_fqdn_override:\n            default: null\n    usage: |\n      {{ tuple \"oslo_db\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n    return: |\n      mariadb\n*/}}\n\n{{- define \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" -}}\n{{- $type := index . 0 -}}\n{{- $endpoint := index . 1 -}}\n{{- $context := index . 2 -}}\n{{- $endpointMap := index $context.Values.endpoints ( $type | replace \"-\" \"_\" ) }}\n{{- $endpointScheme := $endpointMap.scheme }}\n{{- $_ := set $context.Values \"__endpointHost\" ( index $endpointMap.hosts $endpoint | default $endpointMap.hosts.default ) }}\n{{- if kindIs \"map\" $context.Values.__endpointHost }}\n{{- $_ := set $context.Values \"__endpointHost\" ( index $context.Values.__endpointHost \"host\" ) }}\n{{- end }}\n{{- $endpointHost := $context.Values.__endpointHost }}\n{{- if regexMatch \"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\\\\.[0-9]+\" $endpointHost }}\n{{- printf \"%s\" $type -}}\n{{- else }}\n{{- $endpointHostname := printf \"%s\" $endpointHost }}\n{{- printf \"%s\" $endpointHostname -}}\n{{- end }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/endpoints/_keystone_endpoint_name_lookup.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Resolves the service name for an service type\nvalues: |\n  endpoints:\n    identity:\n      name: keystone\nusage: |\n  {{ tuple identity . | include \"keystone_endpoint_name_lookup\" }}\nreturn: |\n  \"keystone\"\n*/}}\n\n{{- define \"helm-toolkit.endpoints.keystone_endpoint_name_lookup\" -}}\n{{- $type := index . 0 -}}\n{{- $context := index . 1 -}}\n{{- $endpointMap := index $context.Values.endpoints ( $type | replace \"-\" \"_\" ) }}\n{{- $endpointName := index $endpointMap \"name\" }}\n{{- $endpointName | quote -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/endpoints/_keystone_endpoint_path_lookup.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n# FIXME(portdirect): it appears the port input here serves no purpose,\n# and should be removed. In addition this function is bugged, do we use it?\n\n{{/*\nabstract: |\n  Resolves the path for an endpoint\nvalues: |\n  endpoints:\n    cluster_domain_suffix: cluster.local\n    oslo_db:\n      path:\n       default: /dbname\n      port:\n        mysql:\n          default: 3306\nusage: |\n  {{ tuple \"oslo_db\" \"internal\" \"mysql\" . | include \"helm-toolkit.endpoints.keystone_endpoint_path_lookup\" }}\nreturn: |\n  /dbname\n*/}}\n\n{{- define \"helm-toolkit.endpoints.keystone_endpoint_path_lookup\" -}}\n{{- $type := index . 0 -}}\n{{- $endpoint := index . 1 -}}\n{{- $port := index . 2 -}}\n{{- $context := index . 3 -}}\n{{- $endpointMap := index $context.Values.endpoints ( $type | replace \"-\" \"_\" ) }}\n{{- if kindIs \"string\" $endpointMap.path }}\n{{- printf \"%s\" $endpointMap.path | default \"\" -}}\n{{- else -}}\n{{- $endpointPath := index $endpointMap.path $endpoint | default $endpointMap.path.default | default \"\" }}\n{{- printf \"%s\" $endpointPath -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/endpoints/_keystone_endpoint_scheme_lookup.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n# FIXME(portdirect): it appears the port input here serves no purpose,\n# and should be removed. In addition this function is bugged, do we use it?\n\n{{/*\nabstract: |\n  Resolves the scheme for an endpoint\nvalues: |\n  endpoints:\n    cluster_domain_suffix: cluster.local\n    oslo_db:\n      scheme:\n        default:\n          mysql+pymysql\n      port:\n        mysql:\n          default: 3306\nusage: |\n  {{ tuple \"oslo_db\" \"internal\" \"mysql\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" }}\nreturn: |\n  mysql+pymysql\n*/}}\n\n# This function returns the scheme for a service, it takes an tuple\n# input in the form: service-type, endpoint-class, port-name. eg:\n# { tuple \"etcd\" \"internal\" \"client\" . | include \"helm-toolkit.endpoints.keystone_scheme_lookup\" }\n# will return the scheme setting for this particular endpoint.  In other words, for most endpoints\n# it will return either 'http' or 'https'\n\n{{- define \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" -}}\n{{- $type := index . 0 -}}\n{{- $endpoint := index . 1 -}}\n{{- $port := index . 2 -}}\n{{- $context := index . 3 -}}\n{{- $endpointMap := index $context.Values.endpoints ( $type | replace \"-\" \"_\" ) }}\n{{- if kindIs \"string\" $endpointMap.scheme }}\n{{- printf \"%s\" $endpointMap.scheme | default \"http\" -}}\n{{- else -}}\n{{- $endpointScheme := index $endpointMap.scheme $endpoint | default $endpointMap.scheme.default | default \"http\" }}\n{{- printf \"%s\" $endpointScheme -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/endpoints/_keystone_endpoint_uri_lookup.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  This function helps resolve uri style endpoints. It will omit the port for\n  http when 80 is used, and 443 in the case of https.\nvalues: |\n  endpoints:\n    cluster_domain_suffix: cluster.local\n    oslo_db:\n      hosts:\n        default: mariadb\n      host_fqdn_override:\n        default: null\n      path: /dbname\n      scheme: mysql+pymysql\n      port:\n        mysql:\n          default: 3306\nusage: |\n  {{ tuple \"oslo_db\" \"internal\" \"mysql\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" }}\nreturn: |\n  mysql+pymysql://mariadb.default.svc.cluster.local:3306/dbname\n*/}}\n\n{{- define \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" -}}\n{{- $type := index . 0 -}}\n{{- $endpoint := index . 1 -}}\n{{- $port := index . 2 -}}\n{{- $context := index . 3 -}}\n{{- $endpointScheme := tuple $type $endpoint $port $context | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" }}\n{{- $endpointHost := tuple $type $endpoint $context | include \"helm-toolkit.endpoints.endpoint_host_lookup\" }}\n{{- $endpointPort := tuple $type $endpoint $port $context | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $endpointPath := tuple $type $endpoint $port $context | include \"helm-toolkit.endpoints.keystone_endpoint_path_lookup\" }}\n{{- if or ( and ( eq $endpointScheme \"http\" ) ( eq $endpointPort \"80\" ) ) ( and ( eq $endpointScheme \"https\" ) ( eq $endpointPort \"443\" ) ) -}}\n{{- printf \"%s://%s%s\" $endpointScheme $endpointHost $endpointPath -}}\n{{- else -}}\n{{- printf \"%s://%s:%s%s\" $endpointScheme $endpointHost $endpointPort $endpointPath -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/endpoints/_service_name_endpoint_with_namespace_lookup.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  This function returns endpoint \"<namespace>:<name>\" pair from an endpoint\n  definition. This is used in kubernetes-entrypoint to support dependencies\n  between different services in different namespaces.\n  returns: the endpoint namespace and the service name, delimited by a colon\n\n  Normally, the service name is constructed dynamically from the hostname\n  however when an ip address is used as the hostname, we default to\n  namespace:endpointCategoryName in order to construct a valid service name\n  however this can be overridden to a custom service name by defining\n  .service.name within the endpoint definition\nvalues: |\n  endpoints:\n    cluster_domain_suffix: cluster.local\n    oslo_db:\n      namespace: foo\n      hosts:\n        default: mariadb\n      host_fqdn_override:\n        default: null\nusage: |\n  {{ tuple oslo_db internal . | include \"helm-toolkit.endpoints.service_name_endpoint_with_namespace_lookup\" }}\nreturn: |\n  foo:mariadb\n*/}}\n\n{{- define \"helm-toolkit.endpoints.service_name_endpoint_with_namespace_lookup\" -}}\n{{- $type := index . 0 -}}\n{{- $endpoint := index . 1 -}}\n{{- $context := index . 2 -}}\n{{- $typeYamlSafe := $type | replace \"-\" \"_\" }}\n{{- $endpointMap := index $context.Values.endpoints $typeYamlSafe }}\n{{- with $endpointMap -}}\n{{- $endpointName := index .hosts $endpoint | default .hosts.default }}\n{{- $endpointNamespace := .namespace | default $context.Release.Namespace }}\n{{- if regexMatch \"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\\\\.[0-9]+\" $endpointName }}\n{{- if .service.name }}\n{{- printf \"%s:%s\" $endpointNamespace .service.name -}}\n{{- else -}}\n{{- printf \"%s:%s\" $endpointNamespace $typeYamlSafe -}}\n{{- end -}}\n{{- else -}}\n{{- printf \"%s:%s\" $endpointNamespace $endpointName -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/manifests/_ceph-storageclass.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Creates a manifest for kubernete ceph storageclass\nexamples:\n  - values: |\n      manifests:\n        storageclass: true\n      storageclass:\n        rbd:\n          provision_storage_class: true\n          provisioner: \"ceph.com/rbd\"\n          metadata:\n            default_storage_class: true\n            name: general\n          parameters:\n            #We will grab the monitors value based on helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\n            pool: rbd\n            admin_id: admin\n            ceph_configmap_name: \"ceph-etc\"\n            admin_secret_name: \"pvc-ceph-conf-combined-storageclass\"\n            admin_secret_namespace: ceph\n            user_id: admin\n            user_secret_name: \"pvc-ceph-client-key\"\n            image_format: \"2\"\n            image_features: layering\n        cephfs:\n          provision_storage_class: true\n          provisioner: \"ceph.com/cephfs\"\n          metadata:\n            name: cephfs\n          parameters:\n            admin_id: admin\n            admin_secret_name: \"pvc-ceph-cephfs-client-key\"\n            admin_secret_namespace: ceph\n    usage: |\n      {{- range $storageclass, $val := .Values.storageclass }}\n      {{ dict \"storageclass_data\" $val \"envAll\" $ | include \"helm-toolkit.manifests.ceph-storageclass\" }}\n      {{- end }}\n    return: |\n      ---\n      apiVersion: storage.k8s.io/v1\n      kind: StorageClass\n      metadata:\n        annotations:\n          storageclass.kubernetes.io/is-default-class: \"true\"\n        name: general\n      provisioner: ceph.com/rbd\n      parameters:\n        monitors: ceph-mon.<ceph-namespace>.svc.<k8s-domain-name>:6789\n        adminId: admin\n        adminSecretName: pvc-ceph-conf-combined-storageclass\n        adminSecretNamespace: ceph\n        pool: rbd\n        userId: admin\n        userSecretName: pvc-ceph-client-key\n        image_format: \"2\"\n        image_features: layering\n      ---\n      apiVersion: storage.k8s.io/v1\n      kind: StorageClass\n      metadata:\n        name: cephfs\n      provisioner: ceph.com/cephfs\n      parameters:\n        monitors: ceph-mon.<ceph-namespace>.svc.<k8s-domain-name>:6789\n        adminId: admin\n        adminSecretName: pvc-ceph-cephfs-client-key\n        adminSecretNamespace: ceph\n*/}}\n\n{{- define \"helm-toolkit.manifests.ceph-storageclass\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $monHost := $envAll.Values.conf.ceph.global.mon_host -}}\n{{- if empty $monHost -}}\n{{- $monHost = tuple \"ceph_mon\" \"internal\" \"mon\" $envAll | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" -}}\n{{- end -}}\n{{- $storageclassData := index . \"storageclass_data\" -}}\n---\n{{- if $storageclassData.provision_storage_class }}\napiVersion: storage.k8s.io/v1\nkind: StorageClass\nmetadata:\n{{- if $storageclassData.metadata.default_storage_class }}\n  annotations:\n    storageclass.kubernetes.io/is-default-class: \"true\"\n{{- end }}\n  name: {{ $storageclassData.metadata.name }}\nprovisioner: {{ $storageclassData.provisioner }}\nparameters:\n  monitors: {{ $monHost }}\n{{- range $attr, $value := $storageclassData.parameters }}\n  {{ $attr }}: {{ $value | quote }}\n{{- end }}\nallowVolumeExpansion: true\n\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/manifests/_certificates.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Creates a certificate using jetstack\nexamples:\n  - values: |\n      endpoints:\n        dashboard:\n          host_fqdn_override:\n            default:\n              host: null\n              tls:\n                secretName: keystone-tls-api\n                issuerRef:\n                  name: ca-issuer\n                  duration: 2160h\n                  organization:\n                    - ACME\n                  commonName: keystone-api.openstack.svc.cluster.local\n                  privateKey:\n                    size: 2048\n                  usages:\n                    - server auth\n                    - client auth\n                  dnsNames:\n                    - cluster.local\n                  issuerRef:\n                    name: ca-issuer\n    usage: |\n      {{- $opts := dict \"envAll\" . \"service\" \"dashboard\" \"type\" \"internal\" -}}\n      {{ $opts | include \"helm-toolkit.manifests.certificates\" }}\n    return: |\n      ---\n      apiVersion: cert-manager.io/v1\n      kind: Certificate\n      metadata:\n        name: keystone-tls-api\n        namespace: NAMESPACE\n      spec:\n        commonName: keystone-api.openstack.svc.cluster.local\n        dnsNames:\n        - cluster.local\n        duration: 2160h\n        issuerRef:\n          name: ca-issuer\n        privateKey:\n          size: 2048\n        organization:\n        - ACME\n        secretName: keystone-tls-api\n        usages:\n        - server auth\n        - client auth\n*/}}\n\n{{- define \"helm-toolkit.manifests.certificates\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $service := index . \"service\" -}}\n{{- $type := index . \"type\" | default \"\" -}}\n{{- $slice := index $envAll.Values.endpoints $service \"host_fqdn_override\" \"default\" \"tls\" -}}\n{{/* Put in some sensible default value if one is not provided by values.yaml */}}\n{{/* If a dnsNames list is not in the values.yaml, it can be overridden by a passed-in parameter.\n  This allows user to use other HTK method to determine the URI and pass that into this method.*/}}\n{{- if not (hasKey $slice \"dnsNames\") -}}\n{{- $hostName := tuple $service $type $envAll | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" -}}\n{{- $dnsNames := list $hostName (printf \"%s.%s\" $hostName $envAll.Release.Namespace) (printf \"%s.%s.svc.%s\" $hostName $envAll.Release.Namespace $envAll.Values.endpoints.cluster_domain_suffix) -}}\n{{- $_ := $dnsNames | set (index $envAll.Values.endpoints $service \"host_fqdn_override\" \"default\" \"tls\") \"dnsNames\" -}}\n{{- end -}}\n{{/* Default privateKey size to 4096. This can be overridden. */}}\n{{- if not (hasKey $slice \"privateKey\") -}}\n{{- $_ := dict \"size\" ( printf \"%d\" 4096 | atoi ) | set (index $envAll.Values.endpoints $service \"host_fqdn_override\" \"default\" \"tls\") \"privateKey\" -}}\n{{- else if empty (index $envAll.Values.endpoints $service \"host_fqdn_override\" \"default\" \"tls\" \"privateKey\" \"size\") -}}\n{{- $_ := ( printf \"%d\" 4096 | atoi ) | set (index $envAll.Values.endpoints $service \"host_fqdn_override\" \"default\" \"tls\" \"privateKey\") \"size\" -}}\n{{- end -}}\n{{/* Default duration to 3 months. Note the min is 720h. This can be overridden. */}}\n{{- if not (hasKey $slice \"duration\") -}}\n{{- $_ := printf \"%s\" \"2190h\" | set (index $envAll.Values.endpoints $service \"host_fqdn_override\" \"default\" \"tls\") \"duration\" -}}\n{{- end -}}\n{{/* Default renewBefore to 15 days. This can be overridden. */}}\n{{- if not (hasKey $slice \"renewBefore\") -}}\n{{- $_ := printf \"%s\" \"360h\" | set (index $envAll.Values.endpoints $service \"host_fqdn_override\" \"default\" \"tls\") \"renewBefore\" -}}\n{{- end -}}\n{{/* Default the usage to server auth and client auth. This can be overridden. */}}\n{{- if not (hasKey $slice \"usages\") -}}\n{{- $_ := (list \"server auth\" \"client auth\") | set (index $envAll.Values.endpoints $service \"host_fqdn_override\" \"default\" \"tls\") \"usages\" -}}\n{{- end -}}\n---\napiVersion: cert-manager.io/v1\nkind: Certificate\nmetadata:\n  name: {{ index $envAll.Values.endpoints $service \"host_fqdn_override\" \"default\" \"tls\" \"secretName\" }}\n  namespace: {{ $envAll.Release.Namespace }}\nspec:\n{{ $slice | toYaml | indent 2 }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/manifests/_configmap-oslo-policy.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n   http://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{/*\nabstract: |\n  Renders out the configmap <service>-oslo-policy.\nvalues: |\n  conf:\n    policy.d:\n      file1:\n        foo: bar\n      file2:\n        foo: baz\nusage: |\n{{- include \"helm-toolkit.manifests.configmap_oslo_policy\" (dict \"envAll\" $envAll \"serviceName\" \"keystone\") }}\nreturn: |\n  ---\n  apiVersion: v1\n  kind: Secret\n  metadata:\n    name: keystone-oslo-policy\n  data:\n    file1: base64of(foo: bar)\n    file2: base64of(foo: baz)\n*/}}\n{{- define \"helm-toolkit.manifests.configmap_oslo_policy\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $serviceName := index . \"serviceName\" -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $serviceName }}-oslo-policy\ntype: Opaque\ndata:\n  {{- range $key, $value := index $envAll.Values.conf \"policy.d\" }}\n  {{- if $value }}\n  {{ $key }}: {{ toYaml $value | b64enc }}\n  {{- else }}\n  {{ $key }}: {{ \"\\n\" | b64enc }}\n  {{- end }}\n  {{- end }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/manifests/_ingress.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Creates a manifest for a services ingress rules.\nexamples:\n  - values: |\n      network:\n        api:\n          ingress:\n            public: true\n            classes:\n              namespace: \"nginx\"\n              cluster: \"nginx-cluster\"\n            annotations:\n              nginx.ingress.kubernetes.io/rewrite-target: /\n      secrets:\n        tls:\n          key_manager:\n            api:\n              public: barbican-tls-public\n      endpoints:\n        cluster_domain_suffix: cluster.local\n        key_manager:\n          name: barbican\n          hosts:\n            default: barbican-api\n            public: barbican\n          host_fqdn_override:\n            default: null\n            public:\n              host: barbican.openstackhelm.example\n              tls:\n                crt: |\n                  FOO-CRT\n                key: |\n                  FOO-KEY\n                ca: |\n                  FOO-CA_CRT\n          path:\n            default: /\n          scheme:\n            default: http\n            public: https\n          port:\n            api:\n              default: 9311\n              public: 80\n    usage: |\n      {{- include \"helm-toolkit.manifests.ingress\" ( dict \"envAll\" . \"backendServiceType\" \"key-manager\" \"backendPort\" \"b-api\" \"endpoint\" \"public\" \"pathType\" \"Prefix\" ) -}}\n    return: |\n      ---\n      apiVersion: networking.k8s.io/v1\n      kind: Ingress\n      metadata:\n        name: barbican\n        annotations:\n          nginx.ingress.kubernetes.io/rewrite-target: /\n\n      spec:\n        ingressClassName: \"nginx\"\n        rules:\n          - host: barbican\n            http:\n              paths:\n                - path: /\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: barbican-api\n                      port:\n                        name: b-api\n          - host: barbican.default\n            http:\n              paths:\n                - path: /\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: barbican-api\n                      port:\n                        name: b-api\n          - host: barbican.default.svc.cluster.local\n            http:\n              paths:\n                - path: /\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: barbican-api\n                      port:\n                        name: b-api\n      ---\n      apiVersion: networking.k8s.io/v1\n      kind: Ingress\n      metadata:\n        name: barbican-namespace-fqdn\n        annotations:\n          nginx.ingress.kubernetes.io/rewrite-target: /\n\n      spec:\n        ingressClassName: \"nginx\"\n        tls:\n          - secretName: barbican-tls-public\n            hosts:\n              - barbican.openstackhelm.example\n        rules:\n          - host: barbican.openstackhelm.example\n            http:\n              paths:\n                - path: /\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: barbican-api\n                      port:\n                        name: b-api\n      ---\n      apiVersion: networking.k8s.io/v1\n      kind: Ingress\n      metadata:\n        name: barbican-cluster-fqdn\n        annotations:\n          nginx.ingress.kubernetes.io/rewrite-target: /\n\n      spec:\n        ingressClassName: \"nginx-cluster\"\n        tls:\n          - secretName: barbican-tls-public\n            hosts:\n              - barbican.openstackhelm.example\n        rules:\n          - host: barbican.openstackhelm.example\n            http:\n              paths:\n                - path: /\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: barbican-api\n                      port:\n                        name: b-api\n  - values: |\n      network:\n        api:\n          ingress:\n            public: true\n            classes:\n              namespace: \"nginx\"\n              cluster: \"nginx-cluster\"\n            annotations:\n              nginx.ingress.kubernetes.io/rewrite-target: /\n      secrets:\n        tls:\n          key_manager:\n            api:\n              public: barbican-tls-public\n      endpoints:\n        cluster_domain_suffix: cluster.local\n        key_manager:\n          name: barbican\n          hosts:\n            default: barbican-api\n            public:\n              host: barbican\n              tls:\n                crt: |\n                  FOO-CRT\n                key: |\n                  FOO-KEY\n                ca: |\n                  FOO-CA_CRT\n          host_fqdn_override:\n            default: null\n          path:\n            default: /\n          scheme:\n            default: http\n            public: https\n          port:\n            api:\n              default: 9311\n              public: 80\n    usage: |\n      {{- include \"helm-toolkit.manifests.ingress\" ( dict \"envAll\" . \"backendServiceType\" \"key-manager\" \"backendPort\" \"b-api\" \"endpoint\" \"public\" \"pathType\" \"Prefix\" ) -}}\n    return: |\n      ---\n      apiVersion: networking.k8s.io/v1\n      kind: Ingress\n      metadata:\n        name: barbican\n        annotations:\n          nginx.ingress.kubernetes.io/rewrite-target: /\n\n      spec:\n        ingressClassName: \"nginx\"\n        tls:\n          - secretName: barbican-tls-public\n            hosts:\n              - barbican\n              - barbican.default\n              - barbican.default.svc.cluster.local\n        rules:\n          - host: barbican\n            http:\n              paths:\n                - path: /\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: barbican-api\n                      port:\n                        name: b-api\n          - host: barbican.default\n            http:\n              paths:\n                - path: /\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: barbican-api\n                      port:\n                        name: b-api\n          - host: barbican.default.svc.cluster.local\n            http:\n              paths:\n                - path: /\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: barbican-api\n                      port:\n                        name: b-api\n  - values: |\n      cert_issuer_type: issuer\n      network:\n        api:\n          ingress:\n            public: true\n            classes:\n              namespace: \"nginx\"\n              cluster: \"nginx-cluster\"\n            annotations:\n              nginx.ingress.kubernetes.io/secure-backends: \"true\"\n              nginx.ingress.kubernetes.io/backend-protocol: \"https\"\n      secrets:\n        tls:\n          key_manager:\n            api:\n              public: barbican-tls-public\n              internal: barbican-tls-api\n      endpoints:\n        cluster_domain_suffix: cluster.local\n        key_manager:\n          name: barbican\n          hosts:\n            default: barbican-api\n            public:\n              host: barbican\n              tls:\n                crt: |\n                  FOO-CRT\n                key: |\n                  FOO-KEY\n                ca: |\n                  FOO-CA_CRT\n          host_fqdn_override:\n            default: null\n          path:\n            default: /\n          scheme:\n            default: http\n            public: https\n          port:\n            api:\n              default: 9311\n              public: 80\n          certs:\n            barbican_tls_api:\n              secretName: barbican-tls-api\n              issuerRef:\n                name: ca-issuer\n                kind: Issuer\n    usage: |\n      {{- include \"helm-toolkit.manifests.ingress\" ( dict \"envAll\" . \"backendServiceType\" \"key-manager\" \"backendPort\" \"b-api\" \"endpoint\" \"public\" \"certIssuer\" \"ca-issuer\" \"pathType\" \"Prefix\" ) -}}\n    return: |\n      ---\n      apiVersion: networking.k8s.io/v1\n      kind: Ingress\n      metadata:\n        name: barbican\n        annotations:\n          cert-manager.io/issuer: ca-issuer\n          certmanager.k8s.io/issuer: ca-issuer\n          nginx.ingress.kubernetes.io/backend-protocol: https\n          nginx.ingress.kubernetes.io/secure-backends: \"true\"\n      spec:\n        ingressClassName: \"nginx\"\n        tls:\n          - secretName: barbican-tls-public-certmanager\n            hosts:\n              - barbican\n              - barbican.default\n              - barbican.default.svc.cluster.local\n        rules:\n          - host: barbican\n            http:\n              paths:\n                - path: /\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: barbican-api\n                      port:\n                        name: b-api\n          - host: barbican.default\n            http:\n              paths:\n                - path: /\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: barbican-api\n                      port:\n                        name: b-api\n          - host: barbican.default.svc.cluster.local\n            http:\n              paths:\n                - path: /\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: barbican-api\n                      port:\n                        name: b-api\n\n  - values: |\n      network:\n        api:\n          ingress:\n            public: true\n            classes:\n              namespace: \"nginx\"\n              cluster: \"nginx-cluster\"\n            annotations:\n              nginx.ingress.kubernetes.io/secure-backends: \"true\"\n              nginx.ingress.kubernetes.io/backend-protocol: \"https\"\n      secrets:\n        tls:\n          key_manager:\n            api:\n              public: barbican-tls-public\n              internal: barbican-tls-api\n      endpoints:\n        cluster_domain_suffix: cluster.local\n        key_manager:\n          name: barbican\n          hosts:\n            default: barbican-api\n            public:\n              host: barbican\n              tls:\n                crt: |\n                  FOO-CRT\n                key: |\n                  FOO-KEY\n                ca: |\n                  FOO-CA_CRT\n          host_fqdn_override:\n            default: null\n          path:\n            default: /\n          scheme:\n            default: http\n            public: https\n          port:\n            api:\n              default: 9311\n              public: 80\n          certs:\n            barbican_tls_api:\n              secretName: barbican-tls-api\n              issuerRef:\n                name: ca-issuer\n                kind: ClusterIssuer\n    usage: |\n      {{- include \"helm-toolkit.manifests.ingress\" ( dict \"envAll\" . \"backendServiceType\" \"key-manager\" \"backendPort\" \"b-api\" \"endpoint\" \"public\" \"certIssuer\" \"ca-issuer\" \"pathType\" \"Prefix\" ) -}}\n    return: |\n      ---\n      apiVersion: networking.k8s.io/v1\n      kind: Ingress\n      metadata:\n        name: barbican\n        annotations:\n          cert-manager.io/cluster-issuer: ca-issuer\n          certmanager.k8s.io/cluster-issuer: ca-issuer\n          nginx.ingress.kubernetes.io/backend-protocol: https\n          nginx.ingress.kubernetes.io/secure-backends: \"true\"\n      spec:\n        ingressClassName: \"nginx\"\n        tls:\n          - secretName: barbican-tls-public-certmanager\n            hosts:\n              - barbican\n              - barbican.default\n              - barbican.default.svc.cluster.local\n        rules:\n          - host: barbican\n            http:\n              paths:\n                - path: /\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: barbican-api\n                      port:\n                        name: b-api\n          - host: barbican.default\n            http:\n              paths:\n                - path: /\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: barbican-api\n                      port:\n                        name: b-api\n          - host: barbican.default.svc.cluster.local\n            http:\n              paths:\n                - path: /\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: barbican-api\n                      port:\n                        name: b-api\n  # Sample usage for multiple DNS names associated with the same public\n  # endpoint and certificate\n  - values: |\n      endpoints:\n        cluster_domain_suffix: cluster.local\n        grafana:\n          name: grafana\n          hosts:\n            default: grafana-dashboard\n            public: grafana\n          host_fqdn_override:\n            public:\n              host: grafana.openstackhelm.example\n              tls:\n                dnsNames:\n                  - grafana-alt.openstackhelm.example\n                crt: \"BASE64 ENCODED CERT\"\n                key: \"BASE64 ENCODED KEY\"\n      network:\n        grafana:\n          ingress:\n            classes:\n              namespace: \"nginx\"\n              cluster: \"nginx-cluster\"\n            annotations:\n              nginx.ingress.kubernetes.io/rewrite-target: /\n      secrets:\n        tls:\n          grafana:\n            grafana:\n              public: grafana-tls-public\n    usage: |\n      {{- $ingressOpts := dict \"envAll\" . \"backendService\" \"grafana\" \"backendServiceType\" \"grafana\" \"backendPort\" \"dashboard\" \"pathType\" \"Prefix\" -}}\n      {{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n    return: |\n      ---\n      apiVersion: networking.k8s.io/v1\n      kind: Ingress\n      metadata:\n        name: grafana\n        annotations:\n          nginx.ingress.kubernetes.io/rewrite-target: /\n\n      spec:\n        ingressClassName: \"nginx\"\n        rules:\n          - host: grafana\n            http:\n              paths:\n                - path: /\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: grafana-dashboard\n                      port:\n                        name: dashboard\n          - host: grafana.default\n            http:\n              paths:\n                - path: /\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: grafana-dashboard\n                      port:\n                        name: dashboard\n          - host: grafana.default.svc.cluster.local\n            http:\n              paths:\n                - path: /\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: grafana-dashboard\n                      port:\n                        name: dashboard\n      ---\n      apiVersion: networking.k8s.io/v1\n      kind: Ingress\n      metadata:\n        name: grafana-namespace-fqdn\n        annotations:\n          nginx.ingress.kubernetes.io/rewrite-target: /\n\n      spec:\n        ingressClassName: \"nginx\"\n        tls:\n          - secretName: grafana-tls-public\n            hosts:\n              - grafana.openstackhelm.example\n              - grafana-alt.openstackhelm.example\n        rules:\n          - host: grafana.openstackhelm.example\n            http:\n              paths:\n                - path: /\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: grafana-dashboard\n                      port:\n                        name: dashboard\n          - host: grafana-alt.openstackhelm.example\n            http:\n              paths:\n                - path: /\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: grafana-dashboard\n                      port:\n                        name: dashboard\n      ---\n      apiVersion: networking.k8s.io/v1\n      kind: Ingress\n      metadata:\n        name: grafana-cluster-fqdn\n        annotations:\n          nginx.ingress.kubernetes.io/rewrite-target: /\n\n      spec:\n        ingressClassName: \"nginx-cluster\"\n        tls:\n          - secretName: grafana-tls-public\n            hosts:\n              - grafana.openstackhelm.example\n              - grafana-alt.openstackhelm.example\n        rules:\n          - host: grafana.openstackhelm.example\n            http:\n              paths:\n                - path: /\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: grafana-dashboard\n                      port:\n                        name: dashboard\n          - host: grafana-alt.openstackhelm.example\n            http:\n              paths:\n                - path: /\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: grafana-dashboard\n                      port:\n                        name: dashboard\n  # Sample usage for custom ingressPaths (multiple paths)\n  - values: |\n      network:\n        api:\n          ingress:\n            public: true\n            classes:\n              namespace: \"nginx\"\n              cluster: \"nginx-cluster\"\n            annotations:\n              nginx.ingress.kubernetes.io/rewrite-target: /\n      secrets:\n        tls:\n          identity:\n            api:\n              public: keystone-tls-public\n      endpoints:\n        cluster_domain_suffix: cluster.local\n        identity:\n          name: keystone\n          hosts:\n            default: keystone-api\n            public: keystone\n          host_fqdn_override:\n            default: null\n          path:\n            default: /v3\n          scheme:\n            default: http\n            public: https\n          port:\n            api:\n              default: 5000\n              public: 80\n    usage: |\n      {{- include \"helm-toolkit.manifests.ingress\" ( dict \"envAll\" . \"backendServiceType\" \"identity\" \"backendPort\" \"ks-pub\" \"endpoint\" \"public\" \"ingressPaths\" (list \"/v3\" \"/v2.0\") \"pathType\" \"Prefix\" ) -}}\n    return: |\n      ---\n      apiVersion: networking.k8s.io/v1\n      kind: Ingress\n      metadata:\n        name: keystone\n        annotations:\n          nginx.ingress.kubernetes.io/rewrite-target: /\n\n      spec:\n        ingressClassName: \"nginx\"\n        rules:\n          - host: keystone\n            http:\n              paths:\n                - path: /v3\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: keystone-api\n                      port:\n                        name: ks-pub\n                - path: /v2.0\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: keystone-api\n                      port:\n                        name: ks-pub\n          - host: keystone.default\n            http:\n              paths:\n                - path: /v3\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: keystone-api\n                      port:\n                        name: ks-pub\n                - path: /v2.0\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: keystone-api\n                      port:\n                        name: ks-pub\n          - host: keystone.default.svc.cluster.local\n            http:\n              paths:\n                - path: /v3\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: keystone-api\n                      port:\n                        name: ks-pub\n                - path: /v2.0\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: keystone-api\n                      port:\n                        name: ks-pub\n  # Sample usage for additionalBackends (multiple backends with different paths)\n  - values: |\n      network:\n        api:\n          ingress:\n            public: true\n            classes:\n              namespace: \"nginx\"\n              cluster: \"nginx-cluster\"\n            annotations:\n              nginx.ingress.kubernetes.io/rewrite-target: /\n      secrets:\n        tls:\n          shipyard:\n            api:\n              public: shipyard-tls-public\n      endpoints:\n        cluster_domain_suffix: cluster.local\n        shipyard:\n          name: shipyard\n          hosts:\n            default: shipyard-int\n            public: shipyard\n          host_fqdn_override:\n            default: null\n          path:\n            default: /api/v1.0\n          scheme:\n            default: http\n            public: https\n          port:\n            api:\n              default: 9000\n              public: 80\n        airflow_web:\n          name: airflow-web\n          hosts:\n            default: airflow-web-int\n            public: airflow-web\n          host_fqdn_override:\n            default: null\n          path:\n            default: /airflow\n          scheme:\n            default: http\n          port:\n            api:\n              default: 8080\n    usage: |\n      {{- include \"helm-toolkit.manifests.ingress\" ( dict \"envAll\" . \"backendServiceType\" \"shipyard\" \"backendPort\" \"api\" \"endpoint\" \"public\" \"pathType\" \"Prefix\" \"additionalBackends\" (list (dict \"backendServiceType\" \"airflow-web\" \"backendPort\" \"api\")) ) -}}\n    return: |\n      ---\n      apiVersion: networking.k8s.io/v1\n      kind: Ingress\n      metadata:\n        name: shipyard\n        annotations:\n          nginx.ingress.kubernetes.io/rewrite-target: /\n\n      spec:\n        ingressClassName: \"nginx\"\n        rules:\n          - host: shipyard\n            http:\n              paths:\n                - path: /api/v1.0\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: shipyard-int\n                      port:\n                        name: api\n                - path: /airflow\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: airflow-web-int\n                      port:\n                        name: api\n          - host: shipyard.default\n            http:\n              paths:\n                - path: /api/v1.0\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: shipyard-int\n                      port:\n                        name: api\n                - path: /airflow\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: airflow-web-int\n                      port:\n                        name: api\n          - host: shipyard.default.svc.cluster.local\n            http:\n              paths:\n                - path: /api/v1.0\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: shipyard-int\n                      port:\n                        name: api\n                - path: /airflow\n                  pathType: Prefix\n                  backend:\n                    service:\n                      name: airflow-web-int\n                      port:\n                        name: api\n\n*/}}\n\n{{- define \"helm-toolkit.manifests.ingress._host_rules\" -}}\n{{- $vHost := index . \"vHost\" -}}\n{{- $backendName := index . \"backendName\" -}}\n{{- $backendPort := index . \"backendPort\" -}}\n{{- $pathType := index . \"pathType\" -}}\n{{- $ingressPaths := index . \"ingressPaths\" | default \"/\" -}}\n{{- if kindIs \"string\" $ingressPaths -}}\n{{-   $ingressPaths = list $ingressPaths -}}\n{{- end -}}\n{{- $additionalBackends := index . \"additionalBackends\" | default list -}}\n- host: {{ $vHost }}\n  http:\n    paths:\n{{- range $p := $ingressPaths }}\n{{- if kindIs \"map\" $p }}\n      - path: {{ $p.path }}\n        pathType: {{ $p.pathType | default $pathType }}\n{{- else }}\n      - path: {{ $p }}\n        pathType: {{ $pathType }}\n{{- end }}\n        backend:\n          service:\n            name: {{ $backendName }}\n            port:\n{{- if or (kindIs \"int\" $backendPort) (regexMatch \"^[0-9]{1,5}$\" $backendPort) }}\n              number: {{ $backendPort | int }}\n{{- else }}\n              name: {{ $backendPort | quote }}\n{{- end }}\n{{- end }}\n{{- range $ab := $additionalBackends }}\n{{- $abPaths := $ab.ingressPaths | default \"/\" -}}\n{{- if kindIs \"string\" $abPaths -}}\n{{-   $abPaths = list $abPaths -}}\n{{- end -}}\n{{- range $p := $abPaths }}\n{{- if kindIs \"map\" $p }}\n      - path: {{ $p.path }}\n        pathType: {{ $p.pathType | default $pathType }}\n{{- else }}\n      - path: {{ $p }}\n        pathType: {{ $pathType }}\n{{- end }}\n        backend:\n          service:\n            name: {{ $ab.backendName }}\n            port:\n{{- if or (kindIs \"int\" $ab.backendPort) (regexMatch \"^[0-9]{1,5}$\" $ab.backendPort) }}\n              number: {{ $ab.backendPort | int }}\n{{- else }}\n              name: {{ $ab.backendPort | quote }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n\n{{- define \"helm-toolkit.manifests.ingress\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $backendService := index . \"backendService\" | default \"api\" -}}\n{{- $backendServiceType := index . \"backendServiceType\" -}}\n{{- $backendPort := index . \"backendPort\" -}}\n{{- $endpoint := index . \"endpoint\" | default \"public\" -}}\n{{- $pathType := index . \"pathType\" | default \"Prefix\" -}}\n{{- $certIssuer := index . \"certIssuer\" | default \"\" -}}\n{{- $ingressPaths := index . \"ingressPaths\" | default \"/\" -}}\n{{- $additionalBackendsInput := index . \"additionalBackends\" | default list -}}\n{{- $additionalBackends := list -}}\n{{- range $ab := $additionalBackendsInput -}}\n{{-   $abServiceType := $ab.backendServiceType -}}\n{{-   $abBackendName := tuple $abServiceType \"internal\" $envAll | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" -}}\n{{-   $abBackendPort := $ab.backendPort -}}\n{{-   $abPaths := \"\" -}}\n{{-   $abEndpointMap := index $envAll.Values.endpoints ( $abServiceType | replace \"-\" \"_\" ) -}}\n{{-   if hasKey $abEndpointMap \"path\" -}}\n{{-     if kindIs \"string\" $abEndpointMap.path -}}\n{{-       $abPaths = $abEndpointMap.path -}}\n{{-     else if kindIs \"map\" $abEndpointMap.path -}}\n{{-       if hasKey $abEndpointMap.path \"default\" -}}\n{{-         $abPaths = index $abEndpointMap.path \"default\" -}}\n{{-       end -}}\n{{-     end -}}\n{{-   end -}}\n{{-   if not $abPaths -}}\n{{-     $abPaths = \"/\" -}}\n{{-   end -}}\n{{-   $additionalBackends = append $additionalBackends (dict \"backendName\" $abBackendName \"backendPort\" $abBackendPort \"ingressPaths\" $abPaths) -}}\n{{- end -}}\n{{- $ingressName := tuple $backendServiceType $endpoint $envAll | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n{{- $backendName := tuple $backendServiceType \"internal\" $envAll | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n{{- $hostName := tuple $backendServiceType $endpoint $envAll | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n{{- $hostNameFull := tuple $backendServiceType $endpoint $envAll | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\n{{- $certIssuerType := \"cluster-issuer\" -}}\n{{- if $envAll.Values.cert_issuer_type }}\n{{- $certIssuerType = $envAll.Values.cert_issuer_type }}\n{{- end }}\n---\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: {{ $ingressName }}\n  annotations:\n{{- if $certIssuer }}\n    cert-manager.io/{{ $certIssuerType }}: {{ $certIssuer }}\n    certmanager.k8s.io/{{ $certIssuerType }}: {{ $certIssuer }}\n{{- $slice := index $envAll.Values.endpoints $backendServiceType \"host_fqdn_override\" \"default\" \"tls\" -}}\n{{- if (hasKey $slice \"duration\") }}\n    cert-manager.io/duration: {{ index $slice \"duration\" }}\n{{- end }}\n{{- end }}\n{{ toYaml (index $envAll.Values.network $backendService \"ingress\" \"annotations\") | indent 4 }}\nspec:\n  ingressClassName: {{ index $envAll.Values.network $backendService \"ingress\" \"classes\" \"namespace\" | quote }}\n{{- $host := index $envAll.Values.endpoints ( $backendServiceType | replace \"-\" \"_\" ) \"hosts\" }}\n{{- if $certIssuer }}\n{{- $secretName := index $envAll.Values.secrets \"tls\" ( $backendServiceType | replace \"-\" \"_\" ) $backendService $endpoint }}\n{{- $_ := required \"You need to specify a secret in your values for the endpoint\" $secretName }}\n  tls:\n    - secretName: {{ printf \"%s-ing\" $secretName }}\n      hosts:\n{{- range $key1, $vHost := tuple $hostName (printf \"%s.%s\" $hostName $envAll.Release.Namespace) (printf \"%s.%s.svc.%s\" $hostName $envAll.Release.Namespace $envAll.Values.endpoints.cluster_domain_suffix) }}\n        - {{ $vHost }}\n{{- end }}\n{{- else }}\n{{- if hasKey $host $endpoint }}\n{{- $endpointHost := index $host $endpoint }}\n{{- if kindIs \"map\" $endpointHost }}\n{{- if hasKey $endpointHost \"tls\" }}\n{{- if and ( not ( empty $endpointHost.tls.key ) ) ( not ( empty $endpointHost.tls.crt ) ) }}\n{{- $secretName := index $envAll.Values.secrets \"tls\" ( $backendServiceType | replace \"-\" \"_\" ) $backendService $endpoint }}\n{{- $_ := required \"You need to specify a secret in your values for the endpoint\" $secretName }}\n  tls:\n    - secretName: {{ $secretName }}\n      hosts:\n{{- range $key1, $vHost := tuple $hostName (printf \"%s.%s\" $hostName $envAll.Release.Namespace) (printf \"%s.%s.svc.%s\" $hostName $envAll.Release.Namespace $envAll.Values.endpoints.cluster_domain_suffix) }}\n        - {{ $vHost }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n  rules:\n{{- range $key1, $vHost := tuple $hostName (printf \"%s.%s\" $hostName $envAll.Release.Namespace) (printf \"%s.%s.svc.%s\" $hostName $envAll.Release.Namespace $envAll.Values.endpoints.cluster_domain_suffix) }}\n{{- $hostRules := dict \"vHost\" $vHost \"backendName\" $backendName \"backendPort\" $backendPort \"pathType\" $pathType \"ingressPaths\" $ingressPaths \"additionalBackends\" $additionalBackends }}\n{{ $hostRules | include \"helm-toolkit.manifests.ingress._host_rules\" | indent 4 }}\n{{- end }}\n{{- if not ( hasSuffix ( printf \".%s.svc.%s\" $envAll.Release.Namespace $envAll.Values.endpoints.cluster_domain_suffix) $hostNameFull) }}\n{{- $ingressConf := $envAll.Values.network -}}\n{{- $ingressClasses := ternary (tuple \"namespace\") (tuple \"namespace\" \"cluster\") (and (hasKey $ingressConf \"use_external_ingress_controller\") $ingressConf.use_external_ingress_controller) }}\n{{- range $key2, $ingressController := $ingressClasses }}\n{{- $vHosts := list $hostNameFull }}\n---\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: {{ printf \"%s-%s-%s\" $ingressName $ingressController \"fqdn\" }}\n  annotations:\n{{ toYaml (index $envAll.Values.network $backendService \"ingress\" \"annotations\") | indent 4 }}\nspec:\n  ingressClassName: {{ index $envAll.Values.network $backendService \"ingress\" \"classes\" $ingressController | quote }}\n{{- $host := index $envAll.Values.endpoints ( $backendServiceType | replace \"-\" \"_\" ) \"host_fqdn_override\" }}\n{{- if hasKey $host $endpoint }}\n{{- $endpointHost := index $host $endpoint }}\n{{- if kindIs \"map\" $endpointHost }}\n{{- if hasKey $endpointHost \"tls\" }}\n{{- range $v := without (index $endpointHost.tls \"dnsNames\" | default list) $hostNameFull }}\n{{- $vHosts = append $vHosts $v }}\n{{- end }}\n{{- if hasKey $envAll.Values.endpoints \"alias_fqdn\" }}\n{{- $alias_host := $envAll.Values.endpoints.alias_fqdn }}\n{{- $vHosts = append $vHosts $alias_host }}\n{{- end }}\n{{- $secretName := index $envAll.Values.secrets \"tls\" ( $backendServiceType | replace \"-\" \"_\" ) $backendService $endpoint }}\n{{- $_ := required \"You need to specify a secret in your values for the endpoint\" $secretName }}\n  tls:\n    - secretName: {{ $secretName }}\n      hosts:\n{{- range $vHost := $vHosts }}\n        - {{ $vHost }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n  rules:\n{{- range $vHost := $vHosts }}\n{{- $hostNameFullRules := dict \"vHost\" $vHost \"backendName\" $backendName \"backendPort\" $backendPort \"pathType\" $pathType \"ingressPaths\" $ingressPaths \"additionalBackends\" $additionalBackends }}\n{{ $hostNameFullRules | include \"helm-toolkit.manifests.ingress._host_rules\" | indent 4 }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/manifests/_job-bootstrap.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n# This function creates a manifest for db creation and user management.\n# It can be used in charts dict created similar to the following:\n# {- $bootstrapJob := dict \"envAll\" . \"serviceName\" \"senlin\" -}\n# { $bootstrapJob | include \"helm-toolkit.manifests.job_bootstrap\" }\n\n{{- define \"helm-toolkit.manifests.job_bootstrap\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $serviceName := index . \"serviceName\" -}}\n{{- $jobNameRef := printf \"%s_%s\" $serviceName \"bootstrap\" -}}\n{{- $jobAnnotations := index . \"jobAnnotations\" -}}\n{{- $jobLabels := index . \"jobLabels\" -}}\n{{- $nodeSelector := index . \"nodeSelector\" | default ( dict $envAll.Values.labels.job.node_selector_key $envAll.Values.labels.job.node_selector_value ) -}}\n{{- $tolerationsEnabled := index . \"tolerationsEnabled\" | default false -}}\n{{- $podVolMounts := index . \"podVolMounts\" | default (dig $jobNameRef $jobNameRef \"volumeMounts\" false $envAll.Values.pod.mounts) -}}\n{{- $podVols := index . \"podVols\" | default (dig $jobNameRef $jobNameRef \"volumes\" false $envAll.Values.pod.mounts) -}}\n{{- $configMapBin := index . \"configMapBin\" | default (printf \"%s-%s\" $serviceName \"bin\" ) -}}\n{{- $configMapEtc := index . \"configMapEtc\" | default (printf \"%s-%s\" $serviceName \"etc\" ) -}}\n{{- $configFile := index . \"configFile\" | default (printf \"/etc/%s/%s.conf\" $serviceName $serviceName ) -}}\n{{- $logConfigFile := index . \"logConfigFile\" | default (printf \"/etc/%s/logging.conf\" $serviceName ) -}}\n{{- $tlsSecret := index . \"tlsSecret\" | default \"\" -}}\n{{- $keystoneUser := index . \"keystoneUser\" | default $serviceName -}}\n{{- $openrc := index . \"openrc\" | default \"true\" -}}\n{{- $secretBin := index . \"secretBin\" -}}\n{{- $backoffLimit := index . \"backoffLimit\" | default \"1000\" -}}\n{{- $activeDeadlineSeconds := index . \"activeDeadlineSeconds\" -}}\n{{- $serviceNamePretty := $serviceName | replace \"_\" \"-\" -}}\n\n{{- $serviceAccountName := printf \"%s-%s\" $serviceNamePretty \"bootstrap\" }}\n{{ tuple $envAll \"bootstrap\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceNamePretty \"bootstrap\" | quote }}\n  labels:\n{{ tuple $envAll $serviceName \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 4 }}\n{{- end }}\n  annotations:\n{{ tuple $serviceAccountName $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 -}}\n{{- if $jobAnnotations }}\n{{ toYaml $jobAnnotations | indent 4 }}\n{{- end }}\nspec:\n  backoffLimit: {{ $backoffLimit }}\n{{- if $activeDeadlineSeconds }}\n  activeDeadlineSeconds: {{ $activeDeadlineSeconds }}\n{{- end }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll $serviceName \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 8 }}\n{{- end }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n    spec:\n{{ tuple \"bootstrap\" $envAll | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"bootstrap\" $envAll | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      {{ tuple $envAll \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_image_pull_secrets\" | indent 6 }}\n      nodeSelector:\n{{ toYaml $nodeSelector | indent 8 }}\n{{- if $tolerationsEnabled }}\n{{ tuple $envAll $serviceName | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{- end}}\n      initContainers:\n{{ tuple $envAll \"bootstrap\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n      containers:\n        - name: bootstrap\n          image: {{ $envAll.Values.images.tags.bootstrap }}\n          imagePullPolicy: {{ $envAll.Values.images.pull_policy }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{- if eq $openrc \"true\" }}\n          env:\n{{- with $env := dict \"ksUserSecret\" ( index $envAll.Values.secrets.identity $keystoneUser ) \"useCA\" (ne $tlsSecret \"\") }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n{{- end }}\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/bootstrap.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: bootstrap-sh\n              mountPath: /tmp/bootstrap.sh\n              subPath: bootstrap.sh\n              readOnly: true\n            - name: etc-service\n              mountPath: {{ dir $configFile | quote }}\n            - name: bootstrap-conf\n              mountPath: {{ $configFile | quote }}\n              subPath: {{ base $configFile | quote }}\n              readOnly: true\n            - name: bootstrap-conf\n              mountPath: {{ $logConfigFile | quote }}\n              subPath: {{ base $logConfigFile | quote }}\n              readOnly: true\n{{ dict \"enabled\" (ne $tlsSecret \"\") \"name\" $tlsSecret | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- if $podVolMounts }}\n{{ $podVolMounts | toYaml | indent 12 }}\n{{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: bootstrap-sh\n{{- if $secretBin }}\n          secret:\n            secretName: {{ $secretBin | quote }}\n            defaultMode: 0555\n{{- else }}\n          configMap:\n            name: {{ $configMapBin | quote }}\n            defaultMode: 0555\n{{- end }}\n        - name: etc-service\n          emptyDir: {}\n        - name: bootstrap-conf\n          secret:\n            secretName: {{ $configMapEtc | quote }}\n            defaultMode: 0444\n{{- dict \"enabled\" (ne $tlsSecret \"\") \"name\" $tlsSecret | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- if $podVols }}\n{{ $podVols | toYaml | indent 8 }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/manifests/_job-db-drop-mysql.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n# This function creates a manifest for db creation and user management.\n# It can be used in charts dict created similar to the following:\n# {- $dbToDropJob := dict \"envAll\" . \"serviceName\" \"senlin\" -}\n# { $dbToDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }\n#\n# If the service does not use oslo then the db can be managed with:\n# {- $dbToDrop := dict \"inputType\" \"secret\" \"adminSecret\" .Values.secrets.oslo_db.admin \"userSecret\" .Values.secrets.oslo_db.horizon -}\n# {- $dbToDropJob := dict \"envAll\" . \"serviceName\" \"horizon\" \"dbToDrop\" $dbToDrop -}\n# { $dbToDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }\n\n{{- define \"helm-toolkit.manifests.job_db_drop_mysql\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $serviceName := index . \"serviceName\" -}}\n{{- $jobAnnotations := index . \"jobAnnotations\" -}}\n{{- $jobLabels := index . \"jobLabels\" -}}\n{{- $nodeSelector := index . \"nodeSelector\" | default ( dict $envAll.Values.labels.job.node_selector_key $envAll.Values.labels.job.node_selector_value ) -}}\n{{- $tolerationsEnabled := index . \"tolerationsEnabled\" | default false -}}\n{{- $configMapBin := index . \"configMapBin\" | default (printf \"%s-%s\" $serviceName \"bin\" ) -}}\n{{- $configMapEtc := index . \"configMapEtc\" | default (printf \"%s-%s\" $serviceName \"etc\" ) -}}\n{{- $dbToDrop := index . \"dbToDrop\" | default ( dict \"adminSecret\" $envAll.Values.secrets.oslo_db.admin \"configFile\" (printf \"/etc/%s/%s.conf\" $serviceName $serviceName ) \"logConfigFile\" (printf \"/etc/%s/logging.conf\" $serviceName ) \"configDbSection\" \"database\" \"configDbKey\" \"connection\" ) -}}\n{{- $dbsToDrop := default (list $dbToDrop) (index . \"dbsToDrop\") }}\n{{- $secretBin := index . \"secretBin\" -}}\n{{- $backoffLimit := index . \"backoffLimit\" | default \"1000\" -}}\n{{- $activeDeadlineSeconds := index . \"activeDeadlineSeconds\" -}}\n{{- $serviceNamePretty := $serviceName | replace \"_\" \"-\" -}}\n{{- $dbAdminTlsSecret := index . \"dbAdminTlsSecret\" | default \"\" -}}\n\n{{- $serviceAccountName := printf \"%s-%s\" $serviceNamePretty \"db-drop\" }}\n{{ tuple $envAll \"db_drop\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceNamePretty \"db-drop\" | quote }}\n  labels:\n{{ tuple $envAll $serviceName \"db-drop\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 4 }}\n{{- end }}\n  annotations:\n    \"helm.sh/hook\": pre-delete\n    \"helm.sh/hook-delete-policy\": hook-succeeded\n{{ tuple $serviceAccountName $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 -}}\n{{- if $jobAnnotations }}\n{{ toYaml $jobAnnotations | indent 4 }}\n{{- end }}\nspec:\n  backoffLimit: {{ $backoffLimit }}\n{{- if $activeDeadlineSeconds }}\n  activeDeadlineSeconds: {{ $activeDeadlineSeconds }}\n{{- end }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll $serviceName \"db-drop\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 8 }}\n{{- end }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      {{ tuple $envAll \"db_drop\" | include \"helm-toolkit.snippets.kubernetes_image_pull_secrets\" | indent 6 }}\n      nodeSelector:\n{{ toYaml $nodeSelector | indent 8 }}\n{{- if $tolerationsEnabled }}\n{{ tuple $envAll $serviceName | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{- end}}\n      initContainers:\n{{ tuple $envAll \"db_drop\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n{{- range $key1, $dbToDrop := $dbsToDrop }}\n{{ $dbToDropType := default \"oslo\" $dbToDrop.inputType }}\n        - name: {{ printf \"%s-%s-%d\" $serviceNamePretty \"db-drop\" $key1 | quote }}\n          image: {{ $envAll.Values.images.tags.db_drop }}\n          imagePullPolicy: {{ $envAll.Values.images.pull_policy }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.db_drop | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n            - name: ROOT_DB_CONNECTION\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $dbToDrop.adminSecret | quote }}\n                  key: DB_CONNECTION\n{{- if eq $dbToDropType \"oslo\" }}\n            - name: OPENSTACK_CONFIG_FILE\n              value: {{ $dbToDrop.configFile | quote }}\n            - name: OPENSTACK_CONFIG_DB_SECTION\n              value: {{ $dbToDrop.configDbSection | quote }}\n            - name: OPENSTACK_CONFIG_DB_KEY\n              value: {{ $dbToDrop.configDbKey | quote }}\n{{- end }}\n{{- if $envAll.Values.manifests.certificates }}\n            - name: MARIADB_X509\n              value: \"REQUIRE X509\"\n{{- end }}\n{{- if eq $dbToDropType \"secret\" }}\n            - name: DB_CONNECTION\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $dbToDrop.userSecret | quote }}\n                  key: DB_CONNECTION\n{{- end }}\n          command:\n            - /tmp/db-drop.py\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: db-drop-sh\n              mountPath: /tmp/db-drop.py\n              subPath: db-drop.py\n              readOnly: true\n\n{{- if eq $dbToDropType \"oslo\" }}\n            - name: etc-service\n              mountPath: {{ dir $dbToDrop.configFile | quote }}\n            - name: db-drop-conf\n              mountPath: {{ $dbToDrop.configFile | quote }}\n              subPath: {{ base $dbToDrop.configFile | quote }}\n              readOnly: true\n            - name: db-drop-conf\n              mountPath: {{ $dbToDrop.logConfigFile | quote }}\n              subPath: {{ base $dbToDrop.logConfigFile | quote }}\n              readOnly: true\n{{- end }}\n{{- if $envAll.Values.manifests.certificates }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $dbAdminTlsSecret \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- end }}\n{{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: db-drop-sh\n{{- if $secretBin }}\n          secret:\n            secretName: {{ $secretBin | quote }}\n            defaultMode: 0555\n{{- else }}\n          configMap:\n            name: {{ $configMapBin | quote }}\n            defaultMode: 0555\n{{- end }}\n{{- if $envAll.Values.manifests.certificates }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $dbAdminTlsSecret | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n{{- $local := dict \"configMapBinFirst\" true -}}\n{{- range $key1, $dbToDrop := $dbsToDrop }}\n{{- $dbToDropType := default \"oslo\" $dbToDrop.inputType }}\n{{- if and (eq $dbToDropType \"oslo\") $local.configMapBinFirst }}\n{{- $_ := set $local \"configMapBinFirst\" false }}\n        - name: etc-service\n          emptyDir: {}\n        - name: db-drop-conf\n          secret:\n            secretName: {{ $configMapEtc | quote }}\n            defaultMode: 0444\n{{- end -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/manifests/_job-db-init-mysql.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n# This function creates a manifest for db creation and user management.\n# It can be used in charts dict created similar to the following:\n# {- $dbToInitJob := dict \"envAll\" . \"serviceName\" \"senlin\" -}\n# { $dbToInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }\n#\n# If the service does not use oslo then the db can be managed with:\n# {- $dbToInit := dict \"inputType\" \"secret\" \"adminSecret\" .Values.secrets.oslo_db.admin \"userSecret\" .Values.secrets.oslo_db.horizon -}\n# {- $dbToInitJob := dict \"envAll\" . \"serviceName\" \"horizon\" \"dbToInit\" $dbToInit -}\n# { $dbToInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }\n\n{{- define \"helm-toolkit.manifests.job_db_init_mysql\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $serviceName := index . \"serviceName\" -}}\n{{- $jobAnnotations := index . \"jobAnnotations\" -}}\n{{- $jobLabels := index . \"jobLabels\" -}}\n{{- $nodeSelector := index . \"nodeSelector\" | default ( dict $envAll.Values.labels.job.node_selector_key $envAll.Values.labels.job.node_selector_value ) -}}\n{{- $tolerationsEnabled := index . \"tolerationsEnabled\" | default false -}}\n{{- $configMapBin := index . \"configMapBin\" | default (printf \"%s-%s\" $serviceName \"bin\" ) -}}\n{{- $configMapEtc := index . \"configMapEtc\" | default (printf \"%s-%s\" $serviceName \"etc\" ) -}}\n{{- $dbToInit := index . \"dbToInit\" | default ( dict \"adminSecret\" $envAll.Values.secrets.oslo_db.admin \"configFile\" (printf \"/etc/%s/%s.conf\" $serviceName $serviceName ) \"logConfigFile\" (printf \"/etc/%s/logging.conf\" $serviceName ) \"configDbSection\" \"database\" \"configDbKey\" \"connection\" ) -}}\n{{- $dbsToInit := default (list $dbToInit) (index . \"dbsToInit\") }}\n{{- $secretBin := index . \"secretBin\" -}}\n{{- $backoffLimit := index . \"backoffLimit\" | default \"1000\" -}}\n{{- $activeDeadlineSeconds := index . \"activeDeadlineSeconds\" -}}\n{{- $serviceNamePretty := $serviceName | replace \"_\" \"-\" -}}\n{{- $dbAdminTlsSecret := index . \"dbAdminTlsSecret\" | default \"\" -}}\n\n{{- $serviceAccountName := printf \"%s-%s\" $serviceNamePretty \"db-init\" }}\n{{ tuple $envAll \"db_init\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceNamePretty \"db-init\" | quote }}\n  labels:\n{{ tuple $envAll $serviceName \"db-init\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 4 }}\n{{- end }}\n  annotations:\n{{ tuple $serviceAccountName $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 -}}\n{{- if $jobAnnotations }}\n{{ toYaml $jobAnnotations | indent 4 }}\n{{- end }}\nspec:\n  backoffLimit: {{ $backoffLimit }}\n{{- if $activeDeadlineSeconds }}\n  activeDeadlineSeconds: {{ $activeDeadlineSeconds }}\n{{- end }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll $serviceName \"db-init\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 8 }}\n{{- end }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n    spec:\n{{ tuple \"db_init\" $envAll | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"db_init\" $envAll | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      {{ tuple $envAll \"db_init\" | include \"helm-toolkit.snippets.kubernetes_image_pull_secrets\" | indent 6 }}\n      nodeSelector:\n{{ toYaml $nodeSelector | indent 8 }}\n{{- if $tolerationsEnabled }}\n{{ tuple $envAll $serviceName | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{- end}}\n      initContainers:\n{{ tuple $envAll \"db_init\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n{{- range $key1, $dbToInit := $dbsToInit }}\n{{ $dbToInitType := default \"oslo\" $dbToInit.inputType }}\n        - name: {{ printf \"%s-%s-%d\" $serviceNamePretty \"db-init\" $key1 | quote }}\n          image: {{ $envAll.Values.images.tags.db_init }}\n          imagePullPolicy: {{ $envAll.Values.images.pull_policy }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.db_init | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n            - name: ROOT_DB_CONNECTION\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $dbToInit.adminSecret | quote }}\n                  key: DB_CONNECTION\n{{- if eq $dbToInitType \"oslo\" }}\n            - name: OPENSTACK_CONFIG_FILE\n              value: {{ $dbToInit.configFile | quote }}\n            - name: OPENSTACK_CONFIG_DB_SECTION\n              value: {{ $dbToInit.configDbSection | quote }}\n            - name: OPENSTACK_CONFIG_DB_KEY\n              value: {{ $dbToInit.configDbKey | quote }}\n{{- end }}\n{{- if eq $dbToInitType \"secret\" }}\n            - name: DB_CONNECTION\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $dbToInit.userSecret | quote }}\n                  key: DB_CONNECTION\n{{- end }}\n{{- if $envAll.Values.manifests.certificates }}\n            - name: MARIADB_X509\n              value: \"REQUIRE X509\"\n{{- end }}\n          command:\n            - /tmp/db-init.py\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: db-init-sh\n              mountPath: /tmp/db-init.py\n              subPath: db-init.py\n              readOnly: true\n{{- if eq $dbToInitType \"oslo\" }}\n            - name: etc-service\n              mountPath: {{ dir $dbToInit.configFile | quote }}\n            - name: db-init-conf\n              mountPath: {{ $dbToInit.configFile | quote }}\n              subPath: {{ base $dbToInit.configFile | quote }}\n              readOnly: true\n            - name: db-init-conf\n              mountPath: {{ $dbToInit.logConfigFile | quote }}\n              subPath: {{ base $dbToInit.logConfigFile | quote }}\n              readOnly: true\n{{- end }}\n{{- if $envAll.Values.manifests.certificates }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $dbAdminTlsSecret \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- end }}\n{{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: db-init-sh\n{{- if $secretBin }}\n          secret:\n            secretName: {{ $secretBin | quote }}\n            defaultMode: 0555\n{{- else }}\n          configMap:\n            name: {{ $configMapBin | quote }}\n            defaultMode: 0555\n{{- end }}\n{{- if $envAll.Values.manifests.certificates }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $dbAdminTlsSecret | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n{{- $local := dict \"configMapBinFirst\" true -}}\n{{- range $key1, $dbToInit := $dbsToInit }}\n{{- $dbToInitType := default \"oslo\" $dbToInit.inputType }}\n{{- if and (eq $dbToInitType \"oslo\") $local.configMapBinFirst }}\n{{- $_ := set $local \"configMapBinFirst\" false }}\n        - name: etc-service\n          emptyDir: {}\n        - name: db-init-conf\n          secret:\n            secretName: {{ $configMapEtc | quote }}\n            defaultMode: 0444\n{{- end -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/manifests/_job-db-sync.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n# This function creates a manifest for db migration and management.\n# It can be used in charts dict created similar to the following:\n# {- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"senlin\" -}\n# { $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }\n\n{{- define \"helm-toolkit.manifests.job_db_sync\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $serviceName := index . \"serviceName\" -}}\n{{- $jobNameRef := printf \"%s_%s\" $serviceName \"db_sync\" -}}\n{{- $jobAnnotations := index . \"jobAnnotations\" -}}\n{{- $jobLabels := index . \"jobLabels\" -}}\n{{- $nodeSelector := index . \"nodeSelector\" | default ( dict $envAll.Values.labels.job.node_selector_key $envAll.Values.labels.job.node_selector_value ) -}}\n{{- $tolerationsEnabled := index . \"tolerationsEnabled\" | default false -}}\n{{- $configMapBin := index . \"configMapBin\" | default (printf \"%s-%s\" $serviceName \"bin\" ) -}}\n{{- $configMapEtc := index . \"configMapEtc\" | default (printf \"%s-%s\" $serviceName \"etc\" ) -}}\n{{- $podMount := index (index $envAll.Values.pod.mounts $jobNameRef | default dict) $jobNameRef | default dict -}}\n{{- $podVolMounts := (concat ((index $podMount \"volumeMounts\" | default list)) ((index . \"podVolMounts\") | default (list))) | uniq -}}\n{{- $podVols := (concat ((index $podMount \"volumes\" | default list)) ((index . \"podVols\") | default (list))) | uniq -}}\n{{- $podEnvVars := index . \"podEnvVars\" | default false -}}\n{{- $dbToSync := index . \"dbToSync\" | default ( dict \"configFile\" (printf \"/etc/%s/%s.conf\" $serviceName $serviceName ) \"configDir\" (printf \"/etc/%s/%s.conf.d\" $serviceName $serviceName ) \"logConfigFile\" (printf \"/etc/%s/logging.conf\" $serviceName ) \"image\" ( index $envAll.Values.images.tags ( printf \"%s_db_sync\" $serviceName )) ) -}}\n{{- $etcSources := index (index $envAll.Values.pod \"etcSources\" | default dict) $jobNameRef | default list -}}\n{{- $secretBin := index . \"secretBin\" -}}\n{{- $backoffLimit := index . \"backoffLimit\" | default \"1000\" -}}\n{{- $activeDeadlineSeconds := index . \"activeDeadlineSeconds\" -}}\n{{- $serviceNamePretty := $serviceName | replace \"_\" \"-\" -}}\n{{- $dbAdminTlsSecret := index . \"dbAdminTlsSecret\" | default \"\" -}}\n\n{{- $serviceAccountName := printf \"%s-%s\" $serviceNamePretty \"db-sync\" }}\n{{ tuple $envAll \"db_sync\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceNamePretty \"db-sync\" | quote }}\n  labels:\n{{ tuple $envAll $serviceName \"db-sync\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 4 }}\n{{- end }}\n  annotations:\n{{ tuple $serviceAccountName $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 -}}\n{{- if $jobAnnotations }}\n{{ toYaml $jobAnnotations | indent 4 }}\n{{- end }}\nspec:\n  backoffLimit: {{ $backoffLimit }}\n{{- if $activeDeadlineSeconds }}\n  activeDeadlineSeconds: {{ $activeDeadlineSeconds }}\n{{- end }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll $serviceName \"db-sync\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 8 }}\n{{- end }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n    spec:\n{{ tuple \"db_sync\" $envAll | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"db_sync\" $envAll | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      {{ tuple $envAll \"db_sync\" | include \"helm-toolkit.snippets.kubernetes_image_pull_secrets\" | indent 6 }}\n      nodeSelector:\n{{ toYaml $nodeSelector | indent 8 }}\n{{- if $tolerationsEnabled }}\n{{ tuple $envAll $serviceName | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{- end}}\n      initContainers:\n{{ tuple $envAll \"db_sync\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: {{ printf \"%s-%s\" $serviceNamePretty \"db-sync\" | quote }}\n          image: {{ $dbToSync.image | quote }}\n          imagePullPolicy: {{ $envAll.Values.images.pull_policy | quote }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.db_sync | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{- if $podEnvVars }}\n          env:\n{{ $podEnvVars | toYaml | indent 12 }}\n{{- end }}\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/db-sync.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: db-sync-sh\n              mountPath: /tmp/db-sync.sh\n              subPath: db-sync.sh\n              readOnly: true\n            - name: etc-service\n              mountPath: {{ dir $dbToSync.configFile | quote }}\n            - name: db-sync-conf\n              mountPath: {{ $dbToSync.configFile | quote }}\n              subPath: {{ base $dbToSync.configFile | quote }}\n              readOnly: true\n            - name: db-sync-conf-dir\n              mountPath: {{ $dbToSync.configDir | quote }}\n              readOnly: true\n            - name: db-sync-conf\n              mountPath: {{ $dbToSync.logConfigFile | quote }}\n              subPath: {{ base $dbToSync.logConfigFile | quote }}\n              readOnly: true\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $dbAdminTlsSecret \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- if $podVolMounts }}\n{{ $podVolMounts | toYaml | indent 12 }}\n{{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: db-sync-sh\n{{- if $secretBin }}\n          secret:\n            secretName: {{ $secretBin | quote }}\n            defaultMode: 0555\n{{- else }}\n          configMap:\n            name: {{ $configMapBin | quote }}\n            defaultMode: 0555\n{{- end }}\n        - name: etc-service\n          emptyDir: {}\n        - name: db-sync-conf\n          secret:\n            secretName: {{ $configMapEtc | quote }}\n            defaultMode: 0444\n        - name: db-sync-conf-dir\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $dbAdminTlsSecret | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- if $podVols }}\n{{ $podVols | toYaml | indent 8 }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/manifests/_job-ks-endpoints.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n# This function creates a manifest for keystone service management.\n# It can be used in charts dict created similar to the following:\n# {- $ksEndpointJob := dict \"envAll\" . \"serviceName\" \"senlin\" \"serviceTypes\" ( tuple \"clustering\" ) -}\n# { $ksEndpointJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }\n\n{{- define \"helm-toolkit.manifests.job_ks_endpoints\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $serviceName := index . \"serviceName\" -}}\n{{- $serviceTypes := index . \"serviceTypes\" -}}\n{{- $jobAnnotations := index . \"jobAnnotations\" -}}\n{{- $jobLabels := index . \"jobLabels\" -}}\n{{- $nodeSelector := index . \"nodeSelector\" | default ( dict $envAll.Values.labels.job.node_selector_key $envAll.Values.labels.job.node_selector_value ) -}}\n{{- $tolerationsEnabled := index . \"tolerationsEnabled\" | default false -}}\n{{- $configMapBin := index . \"configMapBin\" | default (printf \"%s-%s\" $serviceName \"bin\" ) -}}\n{{- $secretBin := index . \"secretBin\" -}}\n{{- $tlsSecret := index . \"tlsSecret\" | default \"\" -}}\n{{- $backoffLimit := index . \"backoffLimit\" | default \"1000\" -}}\n{{- $activeDeadlineSeconds := index . \"activeDeadlineSeconds\" -}}\n{{- $serviceNamePretty := $serviceName | replace \"_\" \"-\" -}}\n{{- $restartPolicy_ := \"OnFailure\" -}}\n{{- if hasKey $envAll.Values \"jobs\" -}}\n{{- if hasKey $envAll.Values.jobs \"ks_endpoints\" -}}\n{{- $restartPolicy_ = $envAll.Values.jobs.ks_endpoints.restartPolicy | default $restartPolicy_ }}\n{{- end }}\n{{- end }}\n{{- $restartPolicy := index . \"restartPolicy\" | default $restartPolicy_ -}}\n\n{{- $serviceAccountName := printf \"%s-%s\" $serviceNamePretty \"ks-endpoints\" }}\n{{ tuple $envAll \"ks_endpoints\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceNamePretty \"ks-endpoints\" | quote }}\n  labels:\n{{ tuple $envAll $serviceName \"ks-endpoints\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 4 }}\n{{- end }}\n  annotations:\n{{ tuple $serviceAccountName $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 -}}\n{{- if $jobAnnotations }}\n{{ toYaml $jobAnnotations | indent 4 }}\n{{- end }}\nspec:\n  backoffLimit: {{ $backoffLimit }}\n{{- if $activeDeadlineSeconds }}\n  activeDeadlineSeconds: {{ $activeDeadlineSeconds }}\n{{- end }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll $serviceName \"ks-endpoints\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 8 }}\n{{- end }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: {{ $restartPolicy }}\n      {{ tuple $envAll \"ks_endpoints\" | include \"helm-toolkit.snippets.kubernetes_image_pull_secrets\" | indent 6 }}\n      nodeSelector:\n{{ toYaml $nodeSelector | indent 8 }}\n{{- if $tolerationsEnabled }}\n{{ tuple $envAll $serviceName | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{- end}}\n      initContainers:\n{{ tuple $envAll \"ks_endpoints\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n{{- range $key1, $osServiceType := $serviceTypes }}\n{{- range $key2, $osServiceEndPoint := tuple \"admin\" \"internal\" \"public\" }}\n        - name: {{ printf \"%s-%s-%s\" $osServiceType \"ks-endpoints\" $osServiceEndPoint | quote }}\n          image: {{ $envAll.Values.images.tags.ks_endpoints }}\n          imagePullPolicy: {{ $envAll.Values.images.pull_policy }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.ks_endpoints | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/ks-endpoints.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ks-endpoints-sh\n              mountPath: /tmp/ks-endpoints.sh\n              subPath: ks-endpoints.sh\n              readOnly: true\n{{ dict \"enabled\" true \"name\" $tlsSecret \"ca\" true | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" $envAll.Values.secrets.identity.admin \"useCA\" (ne $tlsSecret \"\") }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n            - name: OS_SVC_ENDPOINT\n              value: {{ $osServiceEndPoint | quote }}\n            - name: OS_SERVICE_NAME\n              value: {{ tuple $osServiceType $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_name_lookup\" }}\n            - name: OS_SERVICE_TYPE\n              value: {{ $osServiceType | quote }}\n            - name: OS_SERVICE_ENDPOINT\n              value: {{ tuple $osServiceType $osServiceEndPoint \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | quote }}\n{{- end }}\n{{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: ks-endpoints-sh\n{{- if $secretBin }}\n          secret:\n            secretName: {{ $secretBin | quote }}\n            defaultMode: 0555\n{{- else }}\n          configMap:\n            name: {{ $configMapBin | quote }}\n            defaultMode: 0555\n{{- end }}\n{{- dict \"enabled\" true \"name\" $tlsSecret | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/manifests/_job-ks-service.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n# This function creates a manifest for keystone service management.\n# It can be used in charts dict created similar to the following:\n# {- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"senlin\" \"serviceTypes\" ( tuple \"clustering\" ) -}\n# { $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }\n\n{{- define \"helm-toolkit.manifests.job_ks_service\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $serviceName := index . \"serviceName\" -}}\n{{- $serviceTypes := index . \"serviceTypes\" -}}\n{{- $jobAnnotations := index . \"jobAnnotations\" -}}\n{{- $jobLabels := index . \"jobLabels\" -}}\n{{- $nodeSelector := index . \"nodeSelector\" | default ( dict $envAll.Values.labels.job.node_selector_key $envAll.Values.labels.job.node_selector_value ) -}}\n{{- $tolerationsEnabled := index . \"tolerationsEnabled\" | default false -}}\n{{- $configMapBin := index . \"configMapBin\" | default (printf \"%s-%s\" $serviceName \"bin\" ) -}}\n{{- $secretBin := index . \"secretBin\" -}}\n{{- $tlsSecret := index . \"tlsSecret\" | default \"\" -}}\n{{- $backoffLimit := index . \"backoffLimit\" | default \"1000\" -}}\n{{- $activeDeadlineSeconds := index . \"activeDeadlineSeconds\" -}}\n{{- $serviceNamePretty := $serviceName | replace \"_\" \"-\" -}}\n{{- $restartPolicy_ := \"OnFailure\" -}}\n{{- if hasKey $envAll.Values \"jobs\" -}}\n{{- if hasKey $envAll.Values.jobs \"ks_service\" -}}\n{{- $restartPolicy_ = $envAll.Values.jobs.ks_service.restartPolicy | default $restartPolicy_ }}\n{{- end }}\n{{- end }}\n{{- $restartPolicy := index . \"restartPolicy\" | default $restartPolicy_ -}}\n\n{{- $serviceAccountName := printf \"%s-%s\" $serviceNamePretty \"ks-service\" }}\n{{ tuple $envAll \"ks_service\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceNamePretty \"ks-service\" | quote }}\n  labels:\n{{ tuple $envAll $serviceName \"ks-service\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 4 }}\n{{- end }}\n  annotations:\n{{ tuple $serviceAccountName $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 -}}\n{{- if $jobAnnotations }}\n{{ toYaml $jobAnnotations | indent 4 }}\n{{- end }}\nspec:\n  backoffLimit: {{ $backoffLimit }}\n{{- if $activeDeadlineSeconds }}\n  activeDeadlineSeconds: {{ $activeDeadlineSeconds }}\n{{- end }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll $serviceName \"ks-service\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 8 }}\n{{- end }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: {{ $restartPolicy }}\n      {{ tuple $envAll \"ks_service\" | include \"helm-toolkit.snippets.kubernetes_image_pull_secrets\" | indent 6 }}\n      nodeSelector:\n{{ toYaml $nodeSelector | indent 8 }}\n{{- if $tolerationsEnabled }}\n{{ tuple $envAll $serviceName | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{- end}}\n      initContainers:\n{{ tuple $envAll \"ks_service\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n{{- range $key1, $osServiceType := $serviceTypes }}\n        - name: {{ printf \"%s-%s\" $osServiceType \"ks-service-registration\" | quote }}\n          image: {{ $envAll.Values.images.tags.ks_service }}\n          imagePullPolicy: {{ $envAll.Values.images.pull_policy }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.ks_service | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/ks-service.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ks-service-sh\n              mountPath: /tmp/ks-service.sh\n              subPath: ks-service.sh\n              readOnly: true\n{{ dict \"enabled\" true \"name\" $tlsSecret \"ca\" true | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" $envAll.Values.secrets.identity.admin \"useCA\" (ne $tlsSecret \"\") }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n            - name: OS_SERVICE_NAME\n              value: {{ tuple $osServiceType $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_name_lookup\" }}\n            - name: OS_SERVICE_TYPE\n              value: {{ $osServiceType | quote }}\n{{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: ks-service-sh\n{{- if $secretBin }}\n          secret:\n            secretName: {{ $secretBin | quote }}\n            defaultMode: 0555\n{{- else }}\n          configMap:\n            name: {{ $configMapBin | quote }}\n            defaultMode: 0555\n{{- end }}\n{{- dict \"enabled\" true \"name\" $tlsSecret | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/manifests/_job-ks-user.yaml.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n# This function creates a manifest for keystone user management.\n# It can be used in charts dict created similar to the following:\n# {- $ksUserJob := dict \"envAll\" . \"serviceName\" \"senlin\" }\n# { $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }\n\n{{/*\n# This function creates a manifest for keystone user management.\n# It can be used in charts as follows:\n# {{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"heat\" \"serviceUsers\" ( tuple \"heat\" \"heat_trustee\" ) -}}\n# {{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n*/}}\n\n{{- define \"helm-toolkit.manifests.job_ks_user\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $serviceName := index . \"serviceName\" -}}\n{{- $serviceNamePretty := $serviceName | replace \"_\" \"-\" -}}\n{{- $jobAnnotations := index . \"jobAnnotations\" -}}\n{{- $jobLabels := index . \"jobLabels\" -}}\n{{- $nodeSelector := index . \"nodeSelector\" | default ( dict $envAll.Values.labels.job.node_selector_key $envAll.Values.labels.job.node_selector_value ) -}}\n{{- $tolerationsEnabled := index . \"tolerationsEnabled\" | default false -}}\n{{- $configMapBin := index . \"configMapBin\" | default (printf \"%s-%s\" $serviceName \"bin\" ) -}}\n{{- $singleServiceUser := index . \"serviceUser\" | default $serviceName -}}\n{{- $serviceUsers := index . \"serviceUsers\" | default (tuple $singleServiceUser) -}}\n{{- $secretBin := index . \"secretBin\" -}}\n{{- $tlsSecret := index . \"tlsSecret\" | default \"\" -}}\n{{- $backoffLimit := index . \"backoffLimit\" | default \"1000\" -}}\n{{- $activeDeadlineSeconds := index . \"activeDeadlineSeconds\" -}}\n{{- $restartPolicy_ := \"OnFailure\" -}}\n{{- if hasKey $envAll.Values \"jobs\" -}}\n{{- if hasKey $envAll.Values.jobs \"ks_user\" -}}\n{{- $restartPolicy_ = $envAll.Values.jobs.ks_user.restartPolicy | default $restartPolicy_ }}\n{{- end }}\n{{- end }}\n{{- $restartPolicy := index . \"restartPolicy\" | default $restartPolicy_ -}}\n\n{{- $serviceAccountName := printf \"%s-ks-user\" $serviceNamePretty }}\n{{ tuple $envAll \"ks_user\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ printf \"%s-ks-user\" $serviceNamePretty | quote }}\n  labels:\n{{ tuple $envAll $serviceName \"ks-user\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 4 }}\n{{- end }}\n  annotations:\n{{ tuple $serviceAccountName $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 -}}\n{{- if $jobAnnotations }}\n{{ toYaml $jobAnnotations | indent 4 }}\n{{- end }}\nspec:\n  backoffLimit: {{ $backoffLimit }}\n{{- if $activeDeadlineSeconds }}\n  activeDeadlineSeconds: {{ $activeDeadlineSeconds }}\n{{- end }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll $serviceName \"ks-user\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 8 }}\n{{- end }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName | quote }}\n{{ dict \"envAll\" $envAll \"application\" \"ks_user\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      restartPolicy: {{ $restartPolicy }}\n      {{ tuple $envAll \"ks_user\" | include \"helm-toolkit.snippets.kubernetes_image_pull_secrets\" | indent 6 }}\n      nodeSelector:\n{{ toYaml $nodeSelector | indent 8 }}\n{{- if $tolerationsEnabled }}\n{{ tuple $envAll $serviceName | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{- end}}\n      initContainers:\n{{ tuple $envAll \"ks_user\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n{{- range $serviceUser := $serviceUsers }}\n        - name: {{ printf \"%s-ks-user\" $serviceUser | replace \"_\" \"-\" | quote }}\n          image: {{ $envAll.Values.images.tags.ks_user }}\n          imagePullPolicy: {{ $envAll.Values.images.pull_policy }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.ks_user | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"ks_user\" \"container\" \"ks_user\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/ks-user.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ks-user-sh\n              mountPath: /tmp/ks-user.sh\n              subPath: ks-user.sh\n              readOnly: true\n{{ dict \"enabled\" true \"name\" $tlsSecret \"ca\" true | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" $envAll.Values.secrets.identity.admin \"useCA\" (ne $tlsSecret \"\") }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n            - name: SERVICE_OS_SERVICE_NAME\n              value: {{ $serviceName | quote }}\n{{- with $env := dict \"ksUserSecret\" (index $envAll.Values.secrets.identity $serviceUser ) }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 12 }}\n{{- end }}\n            - name: SERVICE_OS_ROLES\n            {{- $serviceOsRoles := index $envAll.Values.endpoints.identity.auth $serviceUser \"role\" }}\n            {{- if kindIs \"slice\" $serviceOsRoles }}\n              value: {{ include \"helm-toolkit.utils.joinListWithComma\" $serviceOsRoles | quote }}\n            {{- else }}\n              value: {{ $serviceOsRoles | quote }}\n            {{- end }}\n{{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: ks-user-sh\n{{- if $secretBin }}\n          secret:\n            secretName: {{ $secretBin | quote }}\n            defaultMode: 0555\n{{- else }}\n          configMap:\n            name: {{ $configMapBin | quote }}\n            defaultMode: 0555\n{{- end }}\n{{- dict \"enabled\" true \"name\" $tlsSecret | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/manifests/_job-rabbit-init.yaml.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.manifests.job_rabbit_init\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $serviceName := index . \"serviceName\" -}}\n{{- $jobAnnotations := index . \"jobAnnotations\" -}}\n{{- $jobLabels := index . \"jobLabels\" -}}\n{{- $nodeSelector := index . \"nodeSelector\" | default ( dict $envAll.Values.labels.job.node_selector_key $envAll.Values.labels.job.node_selector_value ) -}}\n{{- $tolerationsEnabled := index . \"tolerationsEnabled\" | default false -}}\n{{- $configMapBin := index . \"configMapBin\" | default (printf \"%s-%s\" $serviceName \"bin\" ) -}}\n{{- $serviceUser := index . \"serviceUser\" | default $serviceName -}}\n{{- $secretBin := index . \"secretBin\" -}}\n{{- $backoffLimit := index . \"backoffLimit\" | default \"1000\" -}}\n{{- $activeDeadlineSeconds := index . \"activeDeadlineSeconds\" -}}\n{{- $serviceUserPretty := $serviceUser | replace \"_\" \"-\" -}}\n{{- $serviceNamePretty := $serviceName | replace \"_\" \"-\" -}}\n{{- $tlsPath := index . \"tlsPath\" | default \"/etc/rabbitmq/certs\" -}}\n{{- $tlsSecret := index . \"tlsSecret\" | default \"\" -}}\n\n{{- $serviceAccountName := printf \"%s-%s\" $serviceUserPretty \"rabbit-init\" }}\n{{ tuple $envAll \"rabbit_init\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceUserPretty \"rabbit-init\" | quote }}\n  labels:\n{{ tuple $envAll $serviceName \"rabbit-init\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 4 }}\n{{- end }}\n  annotations:\n{{ tuple $serviceAccountName $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 -}}\n{{- if $jobAnnotations }}\n{{ toYaml $jobAnnotations | indent 4 }}\n{{- end }}\nspec:\n  backoffLimit: {{ $backoffLimit }}\n{{- if $activeDeadlineSeconds }}\n  activeDeadlineSeconds: {{ $activeDeadlineSeconds }}\n{{- end }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll $serviceName \"rabbit-init\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 8 }}\n{{- end }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName | quote }}\n      restartPolicy: OnFailure\n      {{ tuple $envAll \"rabbit_init\" | include \"helm-toolkit.snippets.kubernetes_image_pull_secrets\" | indent 6 }}\n      nodeSelector:\n{{ toYaml $nodeSelector | indent 8 }}\n{{- if $tolerationsEnabled }}\n{{ tuple $envAll $serviceName | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{- end}}\n      initContainers:\n{{ tuple $envAll \"rabbit_init\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: rabbit-init\n          image: {{ $envAll.Values.images.tags.rabbit_init | quote }}\n          imagePullPolicy: {{ $envAll.Values.images.pull_policy | quote }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.rabbit_init | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/rabbit-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: rabbit-init-sh\n              mountPath: /tmp/rabbit-init.sh\n              subPath: rabbit-init.sh\n              readOnly: true\n{{- if $envAll.Values.manifests.certificates }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $tlsSecret \"path\" $tlsPath | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- end }}\n          env:\n          - name: RABBITMQ_ADMIN_CONNECTION\n            valueFrom:\n              secretKeyRef:\n                name: {{ $envAll.Values.secrets.oslo_messaging.admin }}\n                key: RABBITMQ_CONNECTION\n          - name: RABBITMQ_USER_CONNECTION\n            valueFrom:\n              secretKeyRef:\n                name: {{ index $envAll.Values.secrets.oslo_messaging $serviceName }}\n                key: RABBITMQ_CONNECTION\n{{- if $envAll.Values.conf.rabbitmq }}\n          - name: RABBITMQ_AUXILIARY_CONFIGURATION\n            value: {{ toJson $envAll.Values.conf.rabbitmq | quote }}\n{{- end }}\n{{- if and $envAll.Values.manifests.certificates (ne $tlsSecret \"\") }}\n          - name: RABBITMQ_X509\n            value: \"REQUIRE X509\"\n          - name: USER_CERT_PATH\n            value: {{ $tlsPath | quote }}\n{{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: rabbit-init-sh\n{{- if $secretBin }}\n          secret:\n            secretName: {{ $secretBin | quote }}\n            defaultMode: 0555\n{{- else }}\n          configMap:\n            name: {{ $configMapBin | quote }}\n            defaultMode: 0555\n{{- end }}\n{{- if $envAll.Values.manifests.certificates }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $tlsSecret | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/manifests/_job-s3-bucket.yaml.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n# This function creates a manifest for linking an s3 bucket to an s3 user.\n# It can be used in charts dict created similar to the following:\n# {- $s3BucketJob := dict \"envAll\" . \"serviceName\" \"elasticsearch\" }\n# { $s3BucketJob | include \"helm-toolkit.manifests.job_s3_bucket\" }\n\n{{- define \"helm-toolkit.manifests.job_s3_bucket\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $serviceName := index . \"serviceName\" -}}\n{{- $jobAnnotations := index . \"jobAnnotations\" -}}\n{{- $jobLabels := index . \"jobLabels\" -}}\n{{- $nodeSelector := index . \"nodeSelector\" | default ( dict $envAll.Values.labels.job.node_selector_key $envAll.Values.labels.job.node_selector_value ) -}}\n{{- $tolerationsEnabled := index . \"tolerationsEnabled\" | default false -}}\n{{- $configMapBin := index . \"configMapBin\" | default (printf \"%s-%s\" $serviceName \"bin\" ) -}}\n{{- $configMapCeph := index . \"configMapCeph\" | default (printf \"ceph-etc\" ) -}}\n{{- $secretBin := index . \"secretBin\" -}}\n{{- $backoffLimit := index . \"backoffLimit\" | default \"1000\" -}}\n{{- $activeDeadlineSeconds := index . \"activeDeadlineSeconds\" -}}\n{{- $serviceNamePretty := $serviceName | replace \"_\" \"-\" -}}\n{{- $s3UserSecret := index $envAll.Values.secrets.rgw $serviceName -}}\n{{- $s3Bucket := index . \"s3Bucket\" | default $serviceName }}\n{{- $tlsCertificateSecret := index . \"tlsCertificateSecret\" -}}\n{{- $tlsCertificatePath := index . \"tlsCertificatePath\" -}}\n\n{{- $serviceAccountName := printf \"%s-%s\" $serviceNamePretty \"s3-bucket\" }}\n{{ tuple $envAll \"s3_bucket\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceNamePretty \"s3-bucket\" | quote }}\n  labels:\n{{ tuple $envAll $serviceName \"s3-bucket\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 4 }}\n{{- end }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ tuple $serviceAccountName $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 -}}\n{{- if $jobAnnotations }}\n{{ toYaml $jobAnnotations | indent 4 }}\n{{- end }}\nspec:\n  backoffLimit: {{ $backoffLimit }}\n{{- if $activeDeadlineSeconds }}\n  activeDeadlineSeconds: {{ $activeDeadlineSeconds }}\n{{- end }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll $serviceName \"s3-bucket\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 8 }}\n{{- end }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName | quote }}\n      restartPolicy: OnFailure\n      {{ tuple $envAll \"s3_bucket\" | include \"helm-toolkit.snippets.kubernetes_image_pull_secrets\" | indent 6 }}\n      nodeSelector:\n{{ toYaml $nodeSelector | indent 8 }}\n{{- if $tolerationsEnabled }}\n{{ tuple $envAll $serviceName | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{- end}}\n      initContainers:\n{{ tuple $envAll \"s3_bucket\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: s3-bucket\n          image: {{ $envAll.Values.images.tags.s3_bucket }}\n          imagePullPolicy: {{ $envAll.Values.images.pull_policy }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.s3_bucket | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/create-s3-bucket.sh\n          env:\n{{- include \"helm-toolkit.snippets.rgw_s3_user_env_vars\" $envAll | indent 12 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: s3-bucket-sh\n              mountPath: /tmp/create-s3-bucket.sh\n              subPath: create-s3-bucket.sh\n              readOnly: true\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: ceph-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            {{- if empty $envAll.Values.conf.ceph.admin_keyring }}\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n            {{ end }}\n{{- if and ($tlsCertificatePath) ($tlsCertificateSecret) }}\n            - name: {{ $tlsCertificateSecret }}\n              mountPath: {{ $tlsCertificatePath }}\n              subPath: ca.crt\n              readOnly: true\n{{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: s3-bucket-sh\n{{- if $secretBin }}\n          secret:\n            secretName: {{ $secretBin | quote }}\n            defaultMode: 0555\n{{- else }}\n          configMap:\n            name: {{ $configMapBin | quote }}\n            defaultMode: 0555\n{{- end }}\n        - name: etcceph\n          emptyDir: {}\n        - name: ceph-etc\n          configMap:\n            name: {{ $configMapCeph | quote }}\n            defaultMode: 0444\n        {{- if empty $envAll.Values.conf.ceph.admin_keyring }}\n        - name: ceph-keyring\n          secret:\n            secretName: pvc-ceph-client-key\n        {{ end }}\n{{- if and ($tlsCertificatePath) ($tlsCertificateSecret) }}\n        - name: {{ $tlsCertificateSecret }}\n          secret:\n            secretName: {{ $tlsCertificateSecret }}\n            defaultMode: 292\n{{- end }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/manifests/_job-s3-user.yaml.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n# This function creates a manifest for s3 user management.\n# It can be used in charts dict created similar to the following:\n# {- $s3UserJob := dict \"envAll\" . \"serviceName\" \"elasticsearch\" }\n# { $s3UserJob | include \"helm-toolkit.manifests.job_s3_user\" }\n\n{{- define \"helm-toolkit.manifests.job_s3_user\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $serviceName := index . \"serviceName\" -}}\n{{- $jobAnnotations := index . \"jobAnnotations\" -}}\n{{- $jobLabels := index . \"jobLabels\" -}}\n{{- $nodeSelector := index . \"nodeSelector\" | default ( dict $envAll.Values.labels.job.node_selector_key $envAll.Values.labels.job.node_selector_value ) -}}\n{{- $tolerationsEnabled := index . \"tolerationsEnabled\" | default false -}}\n{{- $configMapBin := index . \"configMapBin\" | default (printf \"%s-%s\" $serviceName \"bin\" ) -}}\n{{- $configMapCeph := index . \"configMapCeph\" | default (printf \"ceph-etc\" ) -}}\n{{- $secretBin := index . \"secretBin\" -}}\n{{- $backoffLimit := index . \"backoffLimit\" | default \"1000\" -}}\n{{- $activeDeadlineSeconds := index . \"activeDeadlineSeconds\" -}}\n{{- $serviceNamePretty := $serviceName | replace \"_\" \"-\" -}}\n{{- $s3UserSecret := index $envAll.Values.secrets.rgw $serviceName -}}\n\n{{- $serviceAccountName := printf \"%s-%s\" $serviceNamePretty \"s3-user\" }}\n{{ tuple $envAll \"s3_user\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceNamePretty \"s3-user\" | quote }}\n  labels:\n{{ tuple $envAll $serviceName \"s3-user\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 4 }}\n{{- end }}\n  annotations:\n    \"helm.sh/hook-delete-policy\": before-hook-creation\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ tuple $serviceAccountName $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 -}}\n{{- if $jobAnnotations }}\n{{ toYaml $jobAnnotations | indent 4 }}\n{{- end }}\nspec:\n  backoffLimit: {{ $backoffLimit }}\n{{- if $activeDeadlineSeconds }}\n  activeDeadlineSeconds: {{ $activeDeadlineSeconds }}\n{{- end }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll $serviceName \"s3-user\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 8 }}\n{{- end }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName | quote }}\n      restartPolicy: OnFailure\n      {{ tuple $envAll \"s3_user\" | include \"helm-toolkit.snippets.kubernetes_image_pull_secrets\" | indent 6 }}\n      nodeSelector:\n{{ toYaml $nodeSelector | indent 8 }}\n{{- if $tolerationsEnabled }}\n{{ tuple $envAll $serviceName | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{- end}}\n      initContainers:\n{{ tuple $envAll \"s3_user\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: ceph-keyring-placement\n          image: {{ $envAll.Values.images.tags.ceph_key_placement }}\n          imagePullPolicy: {{ $envAll.Values.images.pull_policy }}\n          command:\n            - /tmp/ceph-admin-keyring.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: ceph-keyring-sh\n              mountPath: /tmp/ceph-admin-keyring.sh\n              subPath: ceph-admin-keyring.sh\n              readOnly: true\n            {{- if empty $envAll.Values.conf.ceph.admin_keyring }}\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n            {{ end }}\n      containers:\n        - name: s3-user\n          image: {{ $envAll.Values.images.tags.s3_user }}\n          imagePullPolicy: {{ $envAll.Values.images.pull_policy }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.s3_user | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/create-s3-user.sh\n          env:\n{{- include \"helm-toolkit.snippets.rgw_s3_user_env_vars\" $envAll | indent 12 }}\n            - name: RGW_HOST\n              value: {{ tuple \"ceph_object_store\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: create-s3-user-sh\n              mountPath: /tmp/create-s3-user.sh\n              subPath: create-s3-user.sh\n              readOnly: true\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: ceph-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            {{- if empty $envAll.Values.conf.ceph.admin_keyring }}\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n            {{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: create-s3-user-sh\n{{- if $secretBin }}\n          secret:\n            secretName: {{ $secretBin | quote }}\n            defaultMode: 0555\n{{- else }}\n          configMap:\n            name: {{ $configMapBin | quote }}\n            defaultMode: 0555\n{{- end }}\n        - name: ceph-keyring-sh\n          configMap:\n            name: {{ $configMapBin | quote }}\n            defaultMode: 0555\n        - name: etcceph\n          emptyDir: {}\n        - name: ceph-etc\n          configMap:\n            name: {{ $configMapCeph | quote }}\n            defaultMode: 0444\n        {{- if empty $envAll.Values.conf.ceph.admin_keyring }}\n        - name: ceph-keyring\n          secret:\n            secretName: pvc-ceph-client-key\n        {{ end }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/manifests/_job_image_repo_sync.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n# This function creates a manifest for the image repo sync jobs.\n# It can be used in charts dict created similar to the following:\n# {- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"prometheus\" -}\n# { $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }\n\n{{- define \"helm-toolkit.manifests.job_image_repo_sync\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $serviceName := index . \"serviceName\" -}}\n{{- $jobAnnotations := index . \"jobAnnotations\" -}}\n{{- $jobLabels := index . \"jobLabels\" -}}\n{{- $nodeSelector := index . \"nodeSelector\" | default ( dict $envAll.Values.labels.job.node_selector_key $envAll.Values.labels.job.node_selector_value ) -}}\n{{- $tolerationsEnabled := index . \"tolerationsEnabled\" | default false -}}\n{{- $podVolMounts := index . \"podVolMounts\" | default false -}}\n{{- $podVols := index . \"podVols\" | default false -}}\n{{- $configMapBin := index . \"configMapBin\" | default (printf \"%s-%s\" $serviceName \"bin\" ) -}}\n{{- $secretBin := index . \"secretBin\" -}}\n{{- $backoffLimit := index . \"backoffLimit\" | default \"1000\" -}}\n{{- $activeDeadlineSeconds := index . \"activeDeadlineSeconds\" -}}\n{{- $serviceNamePretty := $serviceName | replace \"_\" \"-\" -}}\n\n{{- $serviceAccountName := printf \"%s-%s\" $serviceNamePretty \"image-repo-sync\" }}\n{{ tuple $envAll \"image_repo_sync\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ printf \"%s-%s\" $serviceNamePretty \"image-repo-sync\" | quote }}\n  labels:\n{{ tuple $envAll $serviceName \"image-repo-sync\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 4 }}\n{{- end }}\n  annotations:\n    \"helm.sh/hook-delete-policy\": before-hook-creation\n{{- if $jobAnnotations }}\n{{ toYaml $jobAnnotations | indent 4 }}\n{{- end }}\nspec:\n  backoffLimit: {{ $backoffLimit }}\n{{- if $activeDeadlineSeconds }}\n  activeDeadlineSeconds: {{ $activeDeadlineSeconds }}\n{{- end }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll $serviceName \"image-repo-sync\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n{{- if $jobLabels }}\n{{ toYaml $jobLabels | indent 8 }}\n{{- end }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      {{ tuple $envAll \"image_repo_sync\" | include \"helm-toolkit.snippets.kubernetes_image_pull_secrets\" | indent 6 }}\n      nodeSelector:\n{{ toYaml $nodeSelector | indent 8 }}\n{{- if $tolerationsEnabled }}\n{{ tuple $envAll $serviceName | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{- end}}\n      initContainers:\n{{ tuple $envAll \"image_repo_sync\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n      containers:\n        - name: image-repo-sync\n{{ tuple $envAll \"image_repo_sync\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.image_repo_sync | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n            - name: LOCAL_REPO\n              value: \"{{ tuple \"local_image_registry\" \"node\" $envAll | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}:{{ tuple \"local_image_registry\" \"node\" \"registry\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\"\n            - name: IMAGE_SYNC_LIST\n              value: \"{{ include \"helm-toolkit.utils.image_sync_list\" $envAll }}\"\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/image-repo-sync.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: bootstrap-sh\n              mountPath: /tmp/image-repo-sync.sh\n              subPath: image-repo-sync.sh\n              readOnly: true\n            - name: docker-socket\n              mountPath: /var/run/docker.sock\n{{- if $podVolMounts }}\n{{ $podVolMounts | toYaml | indent 12 }}\n{{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: bootstrap-sh\n{{- if $secretBin }}\n          secret:\n            secretName: {{ $secretBin | quote }}\n            defaultMode: 0555\n{{- else }}\n          configMap:\n            name: {{ $configMapBin | quote }}\n            defaultMode: 0555\n{{- end }}\n        - name: docker-socket\n          hostPath:\n            path: /var/run/docker.sock\n{{- if $podVols }}\n{{ $podVols | toYaml | indent 8 }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/manifests/_network_policy.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Creates a network policy manifest for services.\nvalues: |\n  endpoints:\n    kube_dns:\n      namespace: kube-system\n      name: kubernetes-dns\n      hosts:\n        default: kube-dns\n      host_fqdn_override:\n        default: null\n      path:\n        default: null\n      scheme: http\n      port:\n        dns_tcp:\n          default: 53\n        dns:\n          default: 53\n          protocol: UDP\n  network_policy:\n    myLabel:\n      podSelector:\n        matchLabels:\n          component: api\n      ingress:\n      - from:\n        - podSelector:\n            matchLabels:\n              application: keystone\n        ports:\n        - protocol: TCP\n          port: 80\n      egress:\n      - to:\n        - namespaceSelector:\n            matchLabels:\n              name: default\n        - namespaceSelector:\n            matchLabels:\n              name: kube-public\n        ports:\n        - protocol: TCP\n          port: 53\n        - protocol: UDP\n          port: 53\nusage: |\n  {{ dict \"envAll\" . \"name\" \"application\" \"label\" \"myLabel\" | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n  {{ dict \"envAll\" . \"key\" \"myLabel\" \"labels\" (dict \"application\" \"myApp\" \"component\" \"myComp\")}}\nreturn: |\n  ---\n  apiVersion: networking.k8s.io/v1\n  kind: NetworkPolicy\n  metadata:\n    name: RELEASE-NAME\n    namespace: NAMESPACE\n  spec:\n    policyTypes:\n      - Ingress\n      - Egress\n    podSelector:\n      matchLabels:\n        application: myLabel\n        component: api\n    ingress:\n    - from:\n      - podSelector:\n          matchLabels:\n            application: keystone\n      ports:\n      - protocol: TCP\n        port: 80\n    egress:\n      - to:\n        - podSelector:\n            matchLabels:\n              name: default\n        - namespaceSelector:\n            matchLabels:\n              name: kube-public\n        ports:\n        - protocol: TCP\n          port: 53\n        - protocol: UDP\n          port: 53\n  ---\n  apiVersion: networking.k8s.io/v1\n  kind: NetworkPolicy\n  metadata:\n    name: RELEASE-NAME\n    namespace: NAMESPACE\n  spec:\n    policyTypes:\n      - Ingress\n      - Egress\n    podSelector:\n      matchLabels:\n        application: myApp\n        component: myComp\n    ingress:\n    - from:\n      - podSelector:\n          matchLabels:\n            application: keystone\n      ports:\n      - protocol: TCP\n        port: 80\n    egress:\n      - to:\n        - podSelector:\n            matchLabels:\n              name: default\n        - namespaceSelector:\n            matchLabels:\n              name: kube-public\n        ports:\n        - protocol: TCP\n          port: 53\n        - protocol: UDP\n          port: 53\n*/}}\n\n{{/*\nabstract: |\n  Creates a network policy manifest for services.\nvalues: |\n  network_policy:\n    myLabel:\n      spec:\n        <RAW SPEC>\nusage: |\n  {{ dict \"envAll\" . \"name\" \"application\" \"label\" \"myLabel\" | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n\nreturn: |\n  ---\n  apiVersion: networking.k8s.io/v1\n  kind: NetworkPolicy\n  metadata:\n    name: RELEASE-NAME-myLabel-netpol\n    namespace: NAMESPACE\n  spec:\n    <RAW SPEC>\n*/}}\n\n{{- define \"helm-toolkit.manifests.kubernetes_network_policy\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $name := index . \"name\" -}}\n{{- $labels := index . \"labels\" | default nil -}}\n{{- $label := index . \"key\" | default (index . \"label\") -}}\n\n{{- $spec_labels := list  -}}\n{{- range $label, $value := $envAll.Values.network_policy }}\n{{- if hasKey $value \"spec\" }}\n{{- $spec_labels = append $spec_labels $label }}\n{{- end }}\n{{- end }}\n{{- if $spec_labels }}\n{{- range $label := $spec_labels }}\n{{- $raw_spec := (index $envAll.Values.network_policy $label \"spec\") }}\n---\napiVersion: networking.k8s.io/v1\nkind: NetworkPolicy\nmetadata:\n  name: {{ $envAll.Release.Name }}-{{ $label | replace \"_\" \"-\" }}-netpol\n  namespace: {{ $envAll.Release.Namespace }}\nspec:\n{{ $raw_spec | toYaml | indent 2 }}\n{{- end }}\n{{- else }}\n---\napiVersion: networking.k8s.io/v1\nkind: NetworkPolicy\nmetadata:\n  name: {{ $label | replace \"_\" \"-\" }}-netpol\n  namespace: {{ $envAll.Release.Namespace }}\nspec:\n{{- if hasKey (index $envAll.Values \"network_policy\") $label }}\n  policyTypes:\n{{- $is_egress := false -}}\n{{- if hasKey (index $envAll.Values.network_policy $label) \"policyTypes\" -}}\n{{- if has \"Egress\" (index $envAll.Values.network_policy $label \"policyTypes\") -}}\n{{- $is_egress = true -}}\n{{- end -}}\n{{- end -}}\n{{- if or $is_egress (index $envAll.Values.network_policy $label \"egress\") }}\n    - Egress\n{{ end -}}\n{{- $is_ingress := false -}}\n{{- if hasKey (index $envAll.Values.network_policy $label) \"policyTypes\" -}}\n{{- if has \"Ingress\" (index $envAll.Values.network_policy $label \"policyTypes\") -}}\n{{- $is_ingress = true -}}\n{{- end -}}\n{{- end -}}\n{{- if or $is_ingress (index $envAll.Values.network_policy $label \"ingress\") }}\n    - Ingress\n{{ end -}}\n{{- end }}\n  podSelector:\n    matchLabels:\n{{- if empty $labels }}\n      {{ $name }}: {{ $label }}\n{{- else }}\n{{ range $k, $v := $labels }}\n      {{ $k }}: {{ $v }}\n{{- end }}\n{{- end }}\n{{- if hasKey (index $envAll.Values \"network_policy\") $label }}\n{{- if hasKey (index $envAll.Values.network_policy $label) \"podSelector\" }}\n{{- if index $envAll.Values.network_policy $label \"podSelector\" \"matchLabels\" }}\n{{ index $envAll.Values.network_policy $label \"podSelector\" \"matchLabels\" | toYaml | indent 6 }}\n{{ end }}\n{{ end }}\n{{ end }}\n{{- if hasKey (index $envAll.Values \"network_policy\") $label }}\n  egress:\n{{- range $key, $value := $envAll.Values.endpoints }}\n{{- if kindIs \"map\" $value }}\n{{- if or (hasKey $value \"namespace\") (hasKey $value \"hosts\") }}\n    - to:\n{{- if index $value \"namespace\" }}\n      - namespaceSelector:\n          matchLabels:\n            name: {{ index $value \"namespace\" }}\n{{- else if index $value \"hosts\" }}\n{{- $defaultValue := index $value \"hosts\" \"internal\" }}\n{{- if hasKey (index $value \"hosts\") \"internal\" }}\n{{- $a := split \"-\" $defaultValue }}\n      - podSelector:\n          matchLabels:\n            application: {{ printf \"%s\" (index $a._0) | default $defaultValue }}\n{{- else }}\n{{- $defaultValue := index $value \"hosts\" \"default\" }}\n{{- $a := split \"-\" $defaultValue }}\n      - podSelector:\n          matchLabels:\n            application: {{ printf \"%s\" (index $a._0) | default $defaultValue }}\n{{- end }}\n{{- end }}\n{{- if index $value \"port\" }}\n      ports:\n{{- range $k, $v := index $value \"port\" }}\n{{- if $k }}\n{{- range $pk, $pv := $v }}\n{{- if and $pv (ne $pk \"protocol\") }}\n      - port: {{ $pv }}\n        protocol: {{ $v.protocol | default \"TCP\" }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- if index $envAll.Values.network_policy $label \"egress\" }}\n{{ index $envAll.Values.network_policy $label \"egress\" | toYaml | indent 4 }}\n{{- end }}\n{{- end }}\n{{- if hasKey (index $envAll.Values \"network_policy\") $label }}\n{{- if index $envAll.Values.network_policy $label \"ingress\" }}\n  ingress:\n{{ index $envAll.Values.network_policy $label \"ingress\" | toYaml | indent 4 }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/manifests/_secret-ks-etc.yaml.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.manifests.secret_ks_etc\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $serviceName := index . \"serviceName\" -}}\n{{- $serviceUserSections := index . \"serviceUserSections\" -}}\n{{- $serviceNamePretty := $serviceName | replace \"_\" \"-\" -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ printf \"%s-ks-etc\" $serviceNamePretty | quote }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ tuple \"ks_etc\" $serviceName $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- range $epName, $sectionName := $serviceUserSections }}\n{{- $epAuth := index $envAll.Values.endpoints.identity.auth $epName -}}\n{{- $configSection := dict\n  \"region_name\" $epAuth.region_name\n  \"project_name\" $epAuth.project_name\n  \"project_domain_name\" $epAuth.project_domain_name\n  \"user_domain_name\" $epAuth.user_domain_name\n  \"username\" $epAuth.username\n  \"password\" $epAuth.password\n-}}\n{{- $configSnippet := dict $sectionName $configSection }}\n{{ printf \"%s_%s.conf\" $serviceName $sectionName | indent 2 }}: {{ include \"helm-toolkit.utils.to_oslo_conf\" $configSnippet | b64enc }}\n{{- end }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/manifests/_secret-registry.yaml.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Creates a manifest for a authenticating a registry with a secret\nexamples:\n  - values: |\n      annotations:\n        secret:\n          oci_image_registry:\n            {{ $serviceName }}:\n              custom.tld/key: \"value\"\n      secrets:\n        oci_image_registry:\n          {{ $serviceName }}: {{ $keyName }}\n      endpoints:\n        oci_image_registry:\n          name: oci-image-registry\n          auth:\n            enabled: true\n             {{ $serviceName }}:\n                name: {{ $userName }}\n                password: {{ $password }}\n  usage: |\n    {{- include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) -}}\n  return: |\n    ---\n    apiVersion: v1\n    kind: Secret\n    metadata:\n      name: {{ $secretName }}\n      annotations:\n        custom.tld/key: \"value\"\n    type: kubernetes.io/dockerconfigjson\n    data:\n      dockerconfigjson: {{ $dockerAuth }}\n*/}}\n\n{{- define \"helm-toolkit.manifests.secret_registry\" }}\n{{- $envAll := index . \"envAll\" }}\n{{- $registryUser := index . \"registryUser\" }}\n{{- $secretName := index $envAll.Values.secrets.oci_image_registry $registryUser }}\n{{- $registryHost := tuple \"oci_image_registry\" \"internal\" $envAll | include \"helm-toolkit.endpoints.endpoint_host_lookup\" }}\n{{/*\nWe only use \"host:port\" when port is non-null, else just use \"host\"\n*/}}\n{{- $registryPort := \"\" }}\n{{- $port := $envAll.Values.endpoints.oci_image_registry.port.registry.default }}\n{{- if $port }}\n{{- $port = tuple \"oci_image_registry\" \"internal\" \"registry\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $registryPort = printf \":%s\" $port }}\n{{- end }}\n{{- $imageCredentials := index $envAll.Values.endpoints.oci_image_registry.auth $registryUser }}\n{{- $dockerAuthToken := printf \"%s:%s\" $imageCredentials.username $imageCredentials.password | b64enc }}\n{{- $dockerAuth := printf \"{\\\"auths\\\": {\\\"%s%s\\\": {\\\"auth\\\": \\\"%s\\\"}}}\" $registryHost $registryPort $dockerAuthToken | b64enc }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oci_image_registry\" $registryUser $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: kubernetes.io/dockerconfigjson\ndata:\n  .dockerconfigjson: {{ $dockerAuth }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/manifests/_secret-tls.yaml.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Creates a manifest for a services public tls secret\nexamples:\n  - values: |\n      annotations:\n        secret:\n          tls:\n            key_manager_api_public:\n              custom.tld/key: \"value\"\n      secrets:\n        tls:\n          key_manager:\n            api:\n              public: barbican-tls-public\n      endpoints:\n        key_manager:\n          host_fqdn_override:\n            public:\n              tls:\n                crt: |\n                  FOO-CRT\n                key: |\n                  FOO-KEY\n                ca: |\n                  FOO-CA_CRT\n  usage: |\n    {{- include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"key-manager\" ) -}}\n  return: |\n    ---\n    apiVersion: v1\n    kind: Secret\n    metadata:\n      name: barbican-tls-public\n      annotations:\n        custom.tld/key: \"value\"\n    type: kubernetes.io/tls\n    data:\n      tls.key: Rk9PLUtFWQo=\n      tls.crt: Rk9PLUNSVAoKRk9PLUNBX0NSVAo=\n\n  - values: |\n      secrets:\n        tls:\n          key_manager:\n            api:\n              public: barbican-tls-public\n      endpoints:\n        key_manager:\n          host_fqdn_override:\n            public:\n              tls:\n                crt: |\n                  FOO-CRT\n                  FOO-INTERMEDIATE_CRT\n                  FOO-CA_CRT\n                key: |\n                  FOO-KEY\n  usage: |\n    {{- include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"key-manager\" ) -}}\n  return: |\n    ---\n    apiVersion: v1\n    kind: Secret\n    metadata:\n      name: barbican-tls-public\n    type: kubernetes.io/tls\n    data:\n      tls.key: Rk9PLUtFWQo=\n      tls.crt: Rk9PLUNSVApGT08tSU5URVJNRURJQVRFX0NSVApGT08tQ0FfQ1JUCg==\n*/}}\n\n{{- define \"helm-toolkit.manifests.secret_ingress_tls\" }}\n{{- $envAll := index . \"envAll\" }}\n{{- $endpoint := index . \"endpoint\" | default \"public\" }}\n{{- $backendServiceType := index . \"backendServiceType\" }}\n{{- $backendService := index . \"backendService\" | default \"api\" }}\n{{- $host := index $envAll.Values.endpoints ( $backendServiceType | replace \"-\" \"_\" ) \"host_fqdn_override\" }}\n{{- if hasKey $host $endpoint }}\n{{- $endpointHost := index $host $endpoint }}\n{{- if kindIs \"map\" $endpointHost }}\n{{- if hasKey $endpointHost \"tls\" }}\n{{- if and $endpointHost.tls.key $endpointHost.tls.crt }}\n\n{{- $customAnnotationKey := printf \"%s_%s_%s\" ( $backendServiceType | replace \"-\" \"_\" ) $backendService $endpoint }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ index $envAll.Values.secrets.tls ( $backendServiceType | replace \"-\" \"_\" ) $backendService $endpoint }}\n  annotations:\n{{ tuple \"tls\" $customAnnotationKey $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: kubernetes.io/tls\ndata:\n  tls.key: {{ $endpointHost.tls.key | b64enc }}\n{{- if $endpointHost.tls.ca }}\n  tls.crt: {{ list $endpointHost.tls.crt $endpointHost.tls.ca | join \"\\n\" | b64enc }}\n{{- else }}\n  tls.crt: {{ $endpointHost.tls.crt | b64enc }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/manifests/_service-ingress.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n# This function creates a manifest for a services ingress rules.\n# It can be used in charts dict created similar to the following:\n# {- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"key-manager\" -}\n# { $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }\n\n{{- define \"helm-toolkit.manifests.service_ingress\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $backendServiceType := index . \"backendServiceType\" -}}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple $backendServiceType \"public\" $envAll | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: http\n      port: 80\n    - name: https\n      port: 443\n  selector:\n    app: ingress-api\n{{- if index $envAll.Values.endpoints $backendServiceType }}\n{{- if index $envAll.Values.endpoints $backendServiceType \"ip\" }}\n{{- if index $envAll.Values.endpoints $backendServiceType \"ip\" \"ingress\" }}\n  clusterIP: {{ (index $envAll.Values.endpoints $backendServiceType \"ip\" \"ingress\") }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/scripts/_create-s3-user.sh.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- define \"helm-toolkit.scripts.create_s3_user\" }}\n#!/bin/bash\nset -e\nfunction create_s3_user () {\n  echo \"Creating s3 user and key pair\"\n  radosgw-admin user create \\\n    --uid=${S3_USERNAME} \\\n    --display-name=${S3_USERNAME} \\\n    --key-type=s3 \\\n    --access-key ${S3_ACCESS_KEY} \\\n    --secret-key ${S3_SECRET_KEY}\n}\nfunction update_s3_user () {\n  # Retrieve old access keys, if they exist\n  old_access_keys=$(radosgw-admin user info --uid=${S3_USERNAME} \\\n    | jq -r '.keys[].access_key' || true)\n\n  if [[ ! -z ${old_access_keys} ]]; then\n    for access_key in $old_access_keys; do\n      # If current access key is the same as the key supplied, do nothing.\n      if [ \"$access_key\" == \"${S3_ACCESS_KEY}\" ]; then\n        echo \"Current user and key pair exists.\"\n        continue\n      else\n        # If keys differ, remove previous key\n        radosgw-admin key rm --uid=${S3_USERNAME} --key-type=s3 --access-key=$access_key\n      fi\n    done\n  fi\n\n  # Perform one more additional check to account for scenarios where multiple\n  # key pairs existed previously, but one existing key was the supplied key\n  current_access_key=$(radosgw-admin user info --uid=${S3_USERNAME} \\\n    | jq -r '.keys[].access_key' || true)\n\n  # If the supplied key does not exist, modify the user\n  if [[ -z ${current_access_key} ]]; then\n    # Modify user with new access and secret keys\n    echo \"Updating existing user's key pair\"\n    radosgw-admin user modify \\\n      --uid=${S3_USERNAME}\\\n      --access-key ${S3_ACCESS_KEY} \\\n      --secret-key ${S3_SECRET_KEY}\n  fi\n}\nuser_exists=$(radosgw-admin user info --uid=${S3_USERNAME} || true)\nif [[ -z ${user_exists} ]]; then\n  create_s3_user\nelse\n  update_s3_user\nfi\n{{- end }}"
  },
  {
    "path": "helm-toolkit/templates/scripts/_db-drop.py.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.scripts.db_drop\" }}\n#!/usr/bin/env python\n\n# Drops db and user for an OpenStack Service:\n# Set ROOT_DB_CONNECTION and DB_CONNECTION environment variables to contain\n# SQLAlchemy strings for the root connection to the database and the one you\n# wish the service to use. Alternatively, you can use an ini formatted config\n# at the location specified by OPENSTACK_CONFIG_FILE, and extract the string\n# from the key OPENSTACK_CONFIG_DB_KEY, in the section specified by\n# OPENSTACK_CONFIG_DB_SECTION.\n\nimport os\nimport sys\ntry:\n    import ConfigParser\n    PARSER_OPTS = {}\nexcept ImportError:\n    import configparser as ConfigParser\n    PARSER_OPTS = {\"strict\": False}\nimport logging\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import text\n\n# Create logger, console handler and formatter\nlogger = logging.getLogger('OpenStack-Helm DB Drop')\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n# Set the formatter and add the handler\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n\n# Get the connection string for the service db root user\nif \"ROOT_DB_CONNECTION\" in os.environ:\n    db_connection = os.environ['ROOT_DB_CONNECTION']\n    logger.info('Got DB root connection')\nelse:\n    logger.critical('environment variable ROOT_DB_CONNECTION not set')\n    sys.exit(1)\n\nmysql_x509 = os.getenv('MARIADB_X509', \"\")\nssl_args = {}\nif mysql_x509:\n    ssl_args = {'ssl': {'ca': '/etc/mysql/certs/ca.crt',\n                        'key': '/etc/mysql/certs/tls.key',\n                        'cert': '/etc/mysql/certs/tls.crt'}}\n\n# Get the connection string for the service db\nif \"OPENSTACK_CONFIG_FILE\" in os.environ:\n    os_conf = os.environ['OPENSTACK_CONFIG_FILE']\n    if \"OPENSTACK_CONFIG_DB_SECTION\" in os.environ:\n        os_conf_section = os.environ['OPENSTACK_CONFIG_DB_SECTION']\n    else:\n        logger.critical('environment variable OPENSTACK_CONFIG_DB_SECTION not set')\n        sys.exit(1)\n    if \"OPENSTACK_CONFIG_DB_KEY\" in os.environ:\n        os_conf_key = os.environ['OPENSTACK_CONFIG_DB_KEY']\n    else:\n        logger.critical('environment variable OPENSTACK_CONFIG_DB_KEY not set')\n        sys.exit(1)\n    try:\n        config = ConfigParser.RawConfigParser(**PARSER_OPTS)\n        logger.info(\"Using {0} as db config source\".format(os_conf))\n        config.read(os_conf)\n        logger.info(\"Trying to load db config from {0}:{1}\".format(\n            os_conf_section, os_conf_key))\n        user_db_conn = config.get(os_conf_section, os_conf_key)\n        logger.info(\"Got config from {0}\".format(os_conf))\n    except:\n        logger.critical(\"Tried to load config from {0} but failed.\".format(os_conf))\n        raise\nelif \"DB_CONNECTION\" in os.environ:\n    user_db_conn = os.environ['DB_CONNECTION']\n    logger.info('Got config from DB_CONNECTION env var')\nelse:\n    logger.critical('Could not get db config, either from config file or env var')\n    sys.exit(1)\n\n# Root DB engine\ntry:\n    root_engine_full = create_engine(db_connection)\n    root_user = root_engine_full.url.username\n    root_password = root_engine_full.url.password\n    drivername = root_engine_full.url.drivername\n    host = root_engine_full.url.host\n    port = root_engine_full.url.port\n    root_engine_url = ''.join([drivername, '://', root_user, ':', root_password, '@', host, ':', str (port)])\n    root_engine = create_engine(root_engine_url, connect_args=ssl_args)\n    connection = root_engine.connect()\n    connection.close()\n    logger.info(\"Tested connection to DB @ {0}:{1} as {2}\".format(\n        host, port, root_user))\nexcept:\n    logger.critical('Could not connect to database as root user')\n    raise\n\n# User DB engine\ntry:\n    user_engine = create_engine(user_db_conn, connect_args=ssl_args)\n    # Get our user data out of the user_engine\n    database = user_engine.url.database\n    user = user_engine.url.username\n    password = user_engine.url.password\n    logger.info('Got user db config')\nexcept:\n    logger.critical('Could not get user database config')\n    raise\n\n# Delete DB\ntry:\n    with root_engine.connect() as connection:\n        connection.execute(text(\"DROP DATABASE IF EXISTS {0}\".format(database)))\n        try:\n            connection.commit()\n        except AttributeError:\n            pass\n    logger.info(\"Deleted database {0}\".format(database))\nexcept:\n    logger.critical(\"Could not drop database {0}\".format(database))\n    raise\n\n# Delete DB User\ntry:\n    with root_engine.connect() as connection:\n        connection.execute(text(\"DROP USER IF EXISTS {0}\".format(user)))\n        try:\n            connection.commit()\n        except AttributeError:\n            pass\n    logger.info(\"Deleted user {0}\".format(user))\nexcept:\n    logger.critical(\"Could not delete user {0}\".format(user))\n    raise\n\nlogger.info('Finished DB Management')\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/scripts/_db-init.py.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.scripts.db_init\" }}\n#!/usr/bin/env python\n\n# Creates db and user for an OpenStack Service:\n# Set ROOT_DB_CONNECTION and DB_CONNECTION environment variables to contain\n# SQLAlchemy strings for the root connection to the database and the one you\n# wish the service to use. Alternatively, you can use an ini formatted config\n# at the location specified by OPENSTACK_CONFIG_FILE, and extract the string\n# from the key OPENSTACK_CONFIG_DB_KEY, in the section specified by\n# OPENSTACK_CONFIG_DB_SECTION.\n\nimport os\nimport sys\ntry:\n    import ConfigParser\n    PARSER_OPTS = {}\nexcept ImportError:\n    import configparser as ConfigParser\n    PARSER_OPTS = {\"strict\": False}\nimport logging\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import text\n\n# Create logger, console handler and formatter\nlogger = logging.getLogger('OpenStack-Helm DB Init')\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n# Set the formatter and add the handler\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n\n# Get the connection string for the service db root user\nif \"ROOT_DB_CONNECTION\" in os.environ:\n    db_connection = os.environ['ROOT_DB_CONNECTION']\n    logger.info('Got DB root connection')\nelse:\n    logger.critical('environment variable ROOT_DB_CONNECTION not set')\n    sys.exit(1)\n\nmysql_x509 = os.getenv('MARIADB_X509', \"\")\nssl_args = {}\nif mysql_x509:\n    ssl_args = {'ssl': {'ca': '/etc/mysql/certs/ca.crt',\n                'key': '/etc/mysql/certs/tls.key',\n                'cert': '/etc/mysql/certs/tls.crt'}}\n\n# Get the connection string for the service db\nif \"OPENSTACK_CONFIG_FILE\" in os.environ:\n    os_conf = os.environ['OPENSTACK_CONFIG_FILE']\n    if \"OPENSTACK_CONFIG_DB_SECTION\" in os.environ:\n        os_conf_section = os.environ['OPENSTACK_CONFIG_DB_SECTION']\n    else:\n        logger.critical('environment variable OPENSTACK_CONFIG_DB_SECTION not set')\n        sys.exit(1)\n    if \"OPENSTACK_CONFIG_DB_KEY\" in os.environ:\n        os_conf_key = os.environ['OPENSTACK_CONFIG_DB_KEY']\n    else:\n        logger.critical('environment variable OPENSTACK_CONFIG_DB_KEY not set')\n        sys.exit(1)\n    try:\n        config = ConfigParser.RawConfigParser(**PARSER_OPTS)\n        logger.info(\"Using {0} as db config source\".format(os_conf))\n        config.read(os_conf)\n        logger.info(\"Trying to load db config from {0}:{1}\".format(\n            os_conf_section, os_conf_key))\n        user_db_conn = config.get(os_conf_section, os_conf_key)\n        logger.info(\"Got config from {0}\".format(os_conf))\n    except:\n        logger.critical(\"Tried to load config from {0} but failed.\".format(os_conf))\n        raise\nelif \"DB_CONNECTION\" in os.environ:\n    user_db_conn = os.environ['DB_CONNECTION']\n    logger.info('Got config from DB_CONNECTION env var')\nelse:\n    logger.critical('Could not get db config, either from config file or env var')\n    sys.exit(1)\n\n# Root DB engine\ntry:\n    root_engine_full = create_engine(db_connection)\n    root_user = root_engine_full.url.username\n    root_password = root_engine_full.url.password\n    drivername = root_engine_full.url.drivername\n    host = root_engine_full.url.host\n    port = root_engine_full.url.port\n    root_engine_url = ''.join([drivername, '://', root_user, ':', root_password, '@', host, ':', str (port)])\n    root_engine = create_engine(root_engine_url, connect_args=ssl_args)\n    connection = root_engine.connect()\n    connection.close()\n    logger.info(\"Tested connection to DB @ {0}:{1} as {2}\".format(\n        host, port, root_user))\nexcept:\n    logger.critical('Could not connect to database as root user')\n    raise\n\n# User DB engine\ntry:\n    user_engine = create_engine(user_db_conn, connect_args=ssl_args)\n    # Get our user data out of the user_engine\n    database = user_engine.url.database\n    user = user_engine.url.username\n    password = user_engine.url.password\n    logger.info('Got user db config')\nexcept:\n    logger.critical('Could not get user database config')\n    raise\n\n# Create DB\ntry:\n    with root_engine.connect() as connection:\n        connection.execute(text(\"CREATE DATABASE IF NOT EXISTS {0}\".format(database)))\n        try:\n            connection.commit()\n        except AttributeError:\n            pass\n    logger.info(\"Created database {0}\".format(database))\nexcept:\n    logger.critical(\"Could not create database {0}\".format(database))\n    raise\n\n# Create DB User\ntry:\n    with root_engine.connect() as connection:\n        connection.execute(\n            text(\"CREATE USER IF NOT EXISTS \\'{0}\\'@\\'%\\' IDENTIFIED BY \\'{1}\\' {2}\".format(\n                user, password, mysql_x509)))\n        connection.execute(\n            text(\"GRANT ALL ON `{0}`.* TO \\'{1}\\'@\\'%\\'\".format(database, user)))\n        try:\n            connection.commit()\n        except AttributeError:\n            pass\n    logger.info(\"Created user {0} for {1}\".format(user, database))\nexcept:\n    logger.critical(\"Could not create user {0} for {1}\".format(user, database))\n    raise\n\n# Test connection\ntry:\n    connection = user_engine.connect()\n    connection.close()\n    logger.info(\"Tested connection to DB @ {0}:{1}/{2} as {3}\".format(\n        host, port, database, user))\nexcept:\n    logger.critical('Could not connect to database as user')\n    raise\n\nlogger.info('Finished DB Management')\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/scripts/_db-pg-init.sh.tpl",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- define \"helm-toolkit.scripts.pg_db_init\" }}\n#!/bin/bash\nset -ex\n\nif [[ ! -v DB_HOST ]]; then\n    echo \"environment variable DB_HOST not set\"\n    exit 1\nelif [[ ! -v DB_ADMIN_USER ]]; then\n    echo \"environment variable DB_ADMIN_USER not set\"\n    exit 1\nelif [[ ! -v PGPASSWORD ]]; then\n    echo \"environment variable PGPASSWORD not set\"\n    exit 1\nelif [[ ! -v DB_PORT ]]; then\n    echo \"environment variable DB_PORT not set\"\n    exit 1\nelif [[ ! -v USER_DB_USER ]]; then\n    echo \"environment variable USER_DB_USER not set\"\n    exit 1\nelif [[ ! -v USER_DB_PASS ]]; then\n    echo \"environment variable USER_DB_PASS not set\"\n    exit 1\nelif [[ ! -v USER_DB_NAME ]]; then\n    echo \"environment variable USER_DB_NAME not set\"\n    exit 1\nelse\n    echo \"Got DB connection info\"\nfi\n\npgsql_superuser_cmd () {\n  DB_COMMAND=\"$1\"\n  if [[ ! -z $2 ]]; then\n      export PGDATABASE=$2\n  fi\n  /usr/bin/psql \\\n  -h ${DB_HOST} \\\n  -p ${DB_PORT} \\\n  -U ${DB_ADMIN_USER} \\\n  --command=\"${DB_COMMAND}\"\n}\n\n#create db\npgsql_superuser_cmd \"SELECT 1 FROM pg_database WHERE datname = '$USER_DB_NAME'\" | grep -q \"(1 row)\" || pgsql_superuser_cmd \"CREATE DATABASE $USER_DB_NAME\"\n\n#create db user\npgsql_superuser_cmd \"SELECT * FROM pg_roles WHERE rolname = '$USER_DB_USER';\" | grep -q \"(1 row)\" || \\\n    pgsql_superuser_cmd \"CREATE ROLE ${USER_DB_USER} LOGIN PASSWORD '$USER_DB_PASS';\"\n\n#Set password everytime. This is required for cases when we would want password rotation to take effect and set the updated password for a user.\npgsql_superuser_cmd \"SELECT * FROM pg_roles WHERE rolname = '$USER_DB_USER';\" && pgsql_superuser_cmd \"ALTER USER ${USER_DB_USER} with password '$USER_DB_PASS'\"\n\n#give permissions to user\npgsql_superuser_cmd \"GRANT ALL PRIVILEGES ON DATABASE $USER_DB_NAME to $USER_DB_USER;\"\n\n#revoke all privileges from PUBLIC role\npgsql_superuser_cmd \"REVOKE ALL ON DATABASE $USER_DB_NAME FROM PUBLIC;\"\n\n#Critical for PG15+: allow user to create in public schema\npgsql_superuser_cmd \"GRANT USAGE, CREATE ON SCHEMA public TO $USER_DB_USER;\" \"$USER_DB_NAME\"\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/scripts/_image-repo-sync.sh.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.scripts.image_repo_sync\" }}\n#!/bin/sh\nset -ex\n\nIFS=','; for IMAGE in ${IMAGE_SYNC_LIST}; do\n  docker pull ${IMAGE}\n  docker tag ${IMAGE} ${LOCAL_REPO}/${IMAGE}\n  docker push ${LOCAL_REPO}/${IMAGE}\ndone\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/scripts/_ks-domain-user.sh.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.scripts.keystone_domain_user\" }}\n#!/bin/bash\n\n# Copyright 2017 Pete Birley\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -ex\n\n# Manage domain\nSERVICE_OS_DOMAIN_ID=$(openstack domain create --or-show --enable -f value -c id \\\n    --description=\"Service Domain for ${SERVICE_OS_DOMAIN_NAME}\" \\\n    \"${SERVICE_OS_DOMAIN_NAME}\")\n\n# Display domain\nopenstack domain show \"${SERVICE_OS_DOMAIN_ID}\"\n\n# Manage user\nSERVICE_OS_USERID=$(openstack user create --or-show --enable -f value -c id \\\n    --domain=\"${SERVICE_OS_DOMAIN_ID}\" \\\n    --description \"Service User for ${SERVICE_OS_REGION_NAME}/${SERVICE_OS_DOMAIN_NAME}\" \\\n    --password=\"${SERVICE_OS_PASSWORD}\" \\\n    \"${SERVICE_OS_USERNAME}\")\n\n# Manage user password (we do this to ensure the password is updated if required)\nopenstack user set --password=\"${SERVICE_OS_PASSWORD}\" \"${SERVICE_OS_USERID}\"\n\n# Display user\nopenstack user show \"${SERVICE_OS_USERID}\"\n\n# Manage role\nSERVICE_OS_ROLE_ID=$(openstack role show -f value -c id \\\n    \"${SERVICE_OS_ROLE}\" || openstack role create -f value -c id \\\n    \"${SERVICE_OS_ROLE}\" )\n\n# Manage user role assignment\nopenstack role add \\\n          --domain=\"${SERVICE_OS_DOMAIN_ID}\" \\\n          --user=\"${SERVICE_OS_USERID}\" \\\n          --user-domain=\"${SERVICE_OS_DOMAIN_ID}\" \\\n          \"${SERVICE_OS_ROLE_ID}\"\n\n# Display user role assignment\nopenstack role assignment list \\\n          --role=\"${SERVICE_OS_ROLE_ID}\" \\\n          --user-domain=\"${SERVICE_OS_DOMAIN_ID}\" \\\n          --user=\"${SERVICE_OS_USERID}\"\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/scripts/_ks-endpoints.sh.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.scripts.keystone_endpoints\" }}\n#!/bin/bash\n\n# Copyright 2017 Pete Birley\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -ex\n\n# Get Service ID\nOS_SERVICE_ID=$( openstack service list -f csv --quote none | \\\n                  grep \",${OS_SERVICE_NAME},${OS_SERVICE_TYPE}$\" | \\\n                    sed -e \"s/,${OS_SERVICE_NAME},${OS_SERVICE_TYPE}//g\" )\n\n# Get Endpoint ID if it exists\nOS_ENDPOINT_ID=$( openstack endpoint list  -f csv --quote none | \\\n                  grep \"^[a-z0-9]*,${OS_REGION_NAME},${OS_SERVICE_NAME},${OS_SERVICE_TYPE},True,${OS_SVC_ENDPOINT},\" | \\\n                  awk -F ',' '{ print $1 }' )\n\n# Making sure only a single endpoint exists for a service within a region\nif [ \"$(echo $OS_ENDPOINT_ID | wc -w)\" -gt \"1\" ]; then\n  echo \"More than one endpoint found, cleaning up\"\n  for ENDPOINT_ID in $OS_ENDPOINT_ID; do\n    openstack endpoint delete ${ENDPOINT_ID}\n  done\n  unset OS_ENDPOINT_ID\nfi\n\n# Determine if Endpoint needs updated\nif [[ ${OS_ENDPOINT_ID} ]]; then\n  OS_ENDPOINT_URL_CURRENT=$(openstack endpoint show ${OS_ENDPOINT_ID} -f value -c url)\n  if [ \"${OS_ENDPOINT_URL_CURRENT}\" == \"${OS_SERVICE_ENDPOINT}\" ]; then\n    echo \"Endpoints Match: no action required\"\n    OS_ENDPOINT_UPDATE=\"False\"\n  else\n    echo \"Endpoints Dont Match: removing existing entries\"\n    openstack endpoint delete ${OS_ENDPOINT_ID}\n    OS_ENDPOINT_UPDATE=\"True\"\n  fi\nelse\n  OS_ENDPOINT_UPDATE=\"True\"\nfi\n\n# Update Endpoint if required\nif [[ \"${OS_ENDPOINT_UPDATE}\" == \"True\" ]]; then\n  OS_ENDPOINT_ID=$( openstack endpoint create -f value -c id \\\n    --region=\"${OS_REGION_NAME}\" \\\n    \"${OS_SERVICE_ID}\" \\\n    ${OS_SVC_ENDPOINT} \\\n    \"${OS_SERVICE_ENDPOINT}\" )\nfi\n\n# Display the Endpoint\nopenstack endpoint show ${OS_ENDPOINT_ID}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/scripts/_ks-service.sh.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.scripts.keystone_service\" }}\n#!/bin/bash\n\n# Copyright 2017 Pete Birley\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -ex\n\n# Service boilerplate description\nOS_SERVICE_DESC=\"${OS_REGION_NAME}: ${OS_SERVICE_NAME} (${OS_SERVICE_TYPE}) service\"\n\n# Get Service ID if it exists\nunset OS_SERVICE_ID\n\n# FIXME - There seems to be an issue once in a while where the\n# openstack service list fails and encounters an error message such as:\n#   Unable to establish connection to\n#   https://keystone-api.openstack.svc.cluster.local:5000/v3/auth/tokens:\n#   ('Connection aborted.', OSError(\"(104, 'ECONNRESET')\",))\n# During an upgrade scenario, this would cause the OS_SERVICE_ID to be blank\n# and it would attempt to create a new service when it was not needed.\n# This duplciate service would sometimes be used by other services such as\n# Horizon and would give an 'Invalid Service Catalog' error.\n# This loop allows for a 'retry' of the openstack service list in an\n# attempt to get the service list as expected if it does ecounter an error.\n# This loop and recheck can be reverted once the underlying issue is addressed.\n\n# If OS_SERVICE_ID is blank then wait a few seconds to give it\n# additional time and try again\nfor i in $(seq 3)\ndo\n  OS_SERVICE_ID=$( openstack service list -f csv --quote none | \\\n                   grep \",${OS_SERVICE_NAME},${OS_SERVICE_TYPE}$\" | \\\n                   sed -e \"s/,${OS_SERVICE_NAME},${OS_SERVICE_TYPE}//g\" )\n\n  # If the service was found, go ahead and exit successfully.\n  if [[ -n \"${OS_SERVICE_ID}\" ]]; then\n    exit 0\n  fi\n\n  sleep 2\ndone\n\n# If we've reached this point and a Service ID was not found,\n# then create the service\nOS_SERVICE_ID=$(openstack service create -f value -c id \\\n                --name=\"${OS_SERVICE_NAME}\" \\\n                --description \"${OS_SERVICE_DESC}\" \\\n                --enable \\\n                \"${OS_SERVICE_TYPE}\")\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/scripts/_ks-user.sh.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.scripts.keystone_user\" }}\n#!/bin/bash\n\n# Copyright 2017 Pete Birley\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -ex\n\nshopt -s nocasematch\n\nif [[ \"${SERVICE_OS_PROJECT_DOMAIN_NAME}\" == \"Default\" ]]\nthen\n  PROJECT_DOMAIN_ID=\"default\"\nelse\n  # Manage project domain\n  PROJECT_DOMAIN_ID=$(openstack domain create --or-show --enable -f value -c id \\\n    --description=\"Domain for ${SERVICE_OS_PROJECT_DOMAIN_NAME}\" \\\n    \"${SERVICE_OS_PROJECT_DOMAIN_NAME}\")\nfi\n\nif [[ \"${SERVICE_OS_USER_DOMAIN_NAME}\" == \"Default\" ]]\nthen\n  USER_DOMAIN_ID=\"default\"\nelse\n  # Manage user domain\n  USER_DOMAIN_ID=$(openstack domain create --or-show --enable -f value -c id \\\n    --description=\"Domain for ${SERVICE_OS_USER_DOMAIN_NAME}\" \\\n    \"${SERVICE_OS_USER_DOMAIN_NAME}\")\nfi\n\nshopt -u nocasematch\n\n# Manage user project\nUSER_PROJECT_DESC=\"Service Project for ${SERVICE_OS_PROJECT_DOMAIN_NAME}\"\nUSER_PROJECT_ID=$(openstack project create --or-show --enable -f value -c id \\\n    --domain=\"${PROJECT_DOMAIN_ID}\" \\\n    --description=\"${USER_PROJECT_DESC}\" \\\n    \"${SERVICE_OS_PROJECT_NAME}\");\n\n# Manage user\nUSER_DESC=\"Service User for ${SERVICE_OS_REGION_NAME}/${SERVICE_OS_USER_DOMAIN_NAME}/${SERVICE_OS_SERVICE_NAME}\"\nUSER_ID=$(openstack user create --or-show --enable -f value -c id \\\n    --domain=\"${USER_DOMAIN_ID}\" \\\n    --project-domain=\"${PROJECT_DOMAIN_ID}\" \\\n    --project=\"${USER_PROJECT_ID}\" \\\n    --description=\"${USER_DESC}\" \\\n    \"${SERVICE_OS_USERNAME}\");\n\n# Manage user password (we do this in a seperate step to ensure the password is updated if required)\nset +x\necho \"Setting user password via: openstack user set --password=xxxxxxx ${USER_ID}\"\nopenstack user set --password=\"${SERVICE_OS_PASSWORD}\" \"${USER_ID}\"\nset -x\n\nfunction ks_assign_user_role () {\n  if [[ \"$SERVICE_OS_ROLE\" == \"admin\" ]]\n  then\n    USER_ROLE_ID=\"$SERVICE_OS_ROLE\"\n  else\n    USER_ROLE_ID=$(openstack role create --or-show -f value -c id \"${SERVICE_OS_ROLE}\");\n  fi\n\n  # Manage user role assignment\n  openstack role add \\\n      --user=\"${USER_ID}\" \\\n      --user-domain=\"${USER_DOMAIN_ID}\" \\\n      --project-domain=\"${PROJECT_DOMAIN_ID}\" \\\n      --project=\"${USER_PROJECT_ID}\" \\\n      \"${USER_ROLE_ID}\"\n}\n\n# Manage user service role\nIFS=','\nfor SERVICE_OS_ROLE in ${SERVICE_OS_ROLES}; do\n  ks_assign_user_role\ndone\n\n# Manage user member role\n: ${MEMBER_OS_ROLE:=\"member\"}\nexport USER_ROLE_ID=$(openstack role create --or-show -f value -c id \\\n    \"${MEMBER_OS_ROLE}\");\nks_assign_user_role\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/scripts/_rabbit-init.sh.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.scripts.rabbit_init\" }}\n#!/bin/bash\nset -e\n# Extract connection details\nRABBIT_HOSTNAME=$(echo \"${RABBITMQ_ADMIN_CONNECTION}\" | \\\n  awk -F'[@]' '{print $2}' | \\\n  awk -F'[:/]' '{print $1}')\nRABBIT_PORT=$(echo \"${RABBITMQ_ADMIN_CONNECTION}\" | \\\n  awk -F'[@]' '{print $2}' | \\\n  awk -F'[:/]' '{print $2}')\n\n# Extract Admin User creadential\nRABBITMQ_ADMIN_USERNAME=$(echo \"${RABBITMQ_ADMIN_CONNECTION}\" | \\\n  awk -F'[@]' '{print $1}' | \\\n  awk -F'[//:]' '{print $4}')\nRABBITMQ_ADMIN_PASSWORD=$(echo \"${RABBITMQ_ADMIN_CONNECTION}\" | \\\n  awk -F'[@]' '{print $1}' | \\\n  awk -F'[//:]' '{print $5}' | \\\n  sed 's/%/\\\\x/g' | \\\n  xargs -0 printf \"%b\")\n\n# Extract User creadential\nRABBITMQ_USERNAME=$(echo \"${RABBITMQ_USER_CONNECTION}\" | \\\n  awk -F'[@]' '{print $1}' | \\\n  awk -F'[//:]' '{print $4}')\nRABBITMQ_PASSWORD=$(echo \"${RABBITMQ_USER_CONNECTION}\" | \\\n  awk -F'[@]' '{print $1}' | \\\n  awk -F'[//:]' '{print $5}' | \\\n  sed 's/%/\\\\x/g' | \\\n  xargs -0 printf \"%b\")\n\n# Extract User vHost\nRABBITMQ_VHOST=$(echo \"${RABBITMQ_USER_CONNECTION}\" | \\\n  awk -F'[@]' '{print $2}' | \\\n  awk -F'[:/]' '{print $3}')\n# Resolve vHost to / if no value is set\nRABBITMQ_VHOST=\"${RABBITMQ_VHOST:-/}\"\n\nfunction rabbitmqadmin_cli () {\n  if [ -n \"$RABBITMQ_X509\" ]\n  then\n    rabbitmqadmin \\\n      --ssl \\\n      --ssl-disable-hostname-verification \\\n      --ssl-ca-cert-file=\"${USER_CERT_PATH}/ca.crt\" \\\n      --ssl-cert-file=\"${USER_CERT_PATH}/tls.crt\" \\\n      --ssl-key-file=\"${USER_CERT_PATH}/tls.key\" \\\n      --host=\"${RABBIT_HOSTNAME}\" \\\n      --port=\"${RABBIT_PORT}\" \\\n      --username=\"${RABBITMQ_ADMIN_USERNAME}\" \\\n      --password=\"${RABBITMQ_ADMIN_PASSWORD}\" \\\n      ${@}\n  else\n    rabbitmqadmin \\\n      --host=\"${RABBIT_HOSTNAME}\" \\\n      --port=\"${RABBIT_PORT}\" \\\n      --username=\"${RABBITMQ_ADMIN_USERNAME}\" \\\n      --password=\"${RABBITMQ_ADMIN_PASSWORD}\" \\\n      ${@}\n  fi\n}\n\necho \"Managing: User: ${RABBITMQ_USERNAME}\"\nrabbitmqadmin_cli \\\n  declare user \\\n  name=\"${RABBITMQ_USERNAME}\" \\\n  password=\"${RABBITMQ_PASSWORD}\" \\\n  tags=\"user\"\n\necho \"Deleting Guest User\"\nrabbitmqadmin_cli \\\n  delete user \\\n  name=\"guest\" || true\n\nif [ \"${RABBITMQ_VHOST}\" != \"/\" ]\nthen\n  echo \"Managing: vHost: ${RABBITMQ_VHOST}\"\n  rabbitmqadmin_cli \\\n    declare vhost \\\n    name=\"${RABBITMQ_VHOST}\"\nelse\n  echo \"Skipping root vHost declaration: vHost: ${RABBITMQ_VHOST}\"\nfi\n\necho \"Managing: Permissions: ${RABBITMQ_USERNAME} on ${RABBITMQ_VHOST}\"\nrabbitmqadmin_cli \\\n  declare permission \\\n  vhost=\"${RABBITMQ_VHOST}\" \\\n  user=\"${RABBITMQ_USERNAME}\" \\\n  configure=\".*\" \\\n  write=\".*\" \\\n  read=\".*\"\n\nif [ ! -z \"$RABBITMQ_AUXILIARY_CONFIGURATION\" ]\nthen\n  echo \"Applying additional configuration\"\n  echo \"${RABBITMQ_AUXILIARY_CONFIGURATION}\" > /tmp/rmq_definitions.json\n  rabbitmqadmin_cli import /tmp/rmq_definitions.json\nfi\n\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/scripts/_rally_test.sh.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.scripts.rally_test\" -}}\n#!/bin/bash\nset -ex\n{{- $rallyTests := index . 0 }}\n\n: \"${RALLY_ENV_NAME:=\"openstack-helm\"}\"\n: \"${OS_INTERFACE:=\"public\"}\"\n: \"${RALLY_CLEANUP:=\"true\"}\"\n\nif [ \"x$RALLY_CLEANUP\" == \"xtrue\" ]; then\n  function rally_cleanup {\n    openstack user delete \\\n        --domain=\"${SERVICE_OS_USER_DOMAIN_NAME}\" \\\n        \"${SERVICE_OS_USERNAME}\"\n{{ $rallyTests.clean_up | default \"\" | indent 4 }}\n  }\n  trap rally_cleanup EXIT\nfi\n\nfunction create_or_update_db () {\n  revisionResults=$(rally db revision)\n  if [ $revisionResults = \"None\"  ]\n  then\n    rally db create\n  else\n    rally db upgrade\n  fi\n}\n\ncreate_or_update_db\n\ncat > /tmp/rally-config.json << EOF\n{\n    \"openstack\": {\n        \"auth_url\": \"${OS_AUTH_URL}\",\n        \"region_name\": \"${OS_REGION_NAME}\",\n        \"endpoint_type\": \"${OS_INTERFACE}\",\n        \"admin\": {\n            \"username\": \"${OS_USERNAME}\",\n            \"password\": \"${OS_PASSWORD}\",\n            \"user_domain_name\": \"${OS_USER_DOMAIN_NAME}\",\n            \"project_name\": \"${OS_PROJECT_NAME}\",\n            \"project_domain_name\": \"${OS_PROJECT_DOMAIN_NAME}\"\n        },\n        \"users\": [\n            {\n                \"username\": \"${SERVICE_OS_USERNAME}\",\n                \"password\": \"${SERVICE_OS_PASSWORD}\",\n                \"project_name\": \"${SERVICE_OS_PROJECT_NAME}\",\n                \"user_domain_name\": \"${SERVICE_OS_USER_DOMAIN_NAME}\",\n                \"project_domain_name\": \"${SERVICE_OS_PROJECT_DOMAIN_NAME}\"\n            }\n        ],\n        \"https_insecure\": false,\n        \"https_cacert\": \"${OS_CACERT}\"\n    }\n}\nEOF\nrally deployment create --file /tmp/rally-config.json --name \"${RALLY_ENV_NAME}\"\nrm -f /tmp/rally-config.json\nrally deployment use \"${RALLY_ENV_NAME}\"\nrally deployment check\n{{- if $rallyTests.run_tempest }}\nrally verify create-verifier --name \"${RALLY_ENV_NAME}-tempest\" --type tempest\nSERVICE_TYPE=\"$(rally deployment check | grep \"${RALLY_ENV_NAME}\" | awk -F \\| '{print $3}' | tr -d ' ' | tr -d '\\n')\"\nrally verify start --pattern \"tempest.api.${SERVICE_TYPE}*\"\nrally verify delete-verifier --id \"${RALLY_ENV_NAME}-tempest\" --force\n{{- end }}\nrally task validate /etc/rally/rally_tests.yaml\nrally task start /etc/rally/rally_tests.yaml\nrally task sla-check\nrally env cleanup\nrally deployment destroy --deployment \"${RALLY_ENV_NAME}\"\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/scripts/db-backup-restore/_backup_main.sh.tpl",
    "content": "{{- define \"helm-toolkit.scripts.db-backup-restore.backup_main\" }}\n#!/bin/bash\n\n# This file contains a database backup framework which database scripts\n# can use to perform a backup. The idea here is that the database-specific\n# functions will be implemented by the various databases using this script\n# (like mariadb, postgresql or etcd for example). The database-specific\n# script will need to first \"source\" this file like this:\n#   source /tmp/backup_main.sh\n#\n# Then the script should call the main backup function (backup_databases):\n#   backup_databases [scope]\n#       [scope] is an optional parameter, defaulted to \"all\". If only one specific\n#               database is required to be backed up then this parameter will\n#               contain the name of the database; otherwise all are backed up.\n#\n#       The framework will require the following variables to be exported:\n#\n#         export DB_NAMESPACE          Namespace where the database(s) reside\n#         export DB_NAME               Name of the database system\n#         export LOCAL_DAYS_TO_KEEP    Number of days to keep the local backups\n#         export REMOTE_DAYS_TO_KEEP   Number of days to keep the remote backups\n#         export ARCHIVE_DIR           Local location where the backup tarballs should\n#                                      be stored. (full directory path)\n#         export BACK_UP_MODE          Determines the mode of backup taken.\n#         export REMOTE_BACKUP_ENABLED \"true\" if remote backup enabled; false\n#                                      otherwise\n#         export CONTAINER_NAME        Name of the container on the RGW to store\n#                                      the backup tarball.\n#         export STORAGE_POLICY        Name of the storage policy defined on the\n#                                      RGW which is intended to store backups.\n#         RGW access variables:\n#           export OS_REGION_NAME          Name of the region the RGW resides in\n#           export OS_AUTH_URL             Keystone URL associated with the RGW\n#           export OS_PROJECT_NAME         Name of the project associated with the\n#                                          keystone user\n#           export OS_USERNAME             Name of the keystone user\n#           export OS_PASSWORD             Password of the keystone user\n#           export OS_USER_DOMAIN_NAME     Keystone domain the project belongs to\n#           export OS_PROJECT_DOMAIN_NAME  Keystone domain the user belongs to\n#           export OS_IDENTITY_API_VERSION Keystone API version to use\n#\n#           export REMOTE_BACKUP_RETRIES   Number of retries to send backup to remote\n#                                          in case of any temporary failures.\n#           export MIN_DELAY_SEND_REMOTE   Minimum seconds to delay before sending backup\n#                                          to remote to stagger backups being sent to RGW\n#           export MAX_DELAY_SEND_REMOTE   Maximum seconds to delay before sending backup\n#                                          to remote to stagger backups being sent to RGW.\n#                                          A random number between min and max delay is generated\n#                                          to set the delay.\n#\n#         RGW backup throttle limits variables:\n#           export THROTTLE_BACKUPS_ENABLED   Boolean variableto control backup functionality\n#           export THROTTLE_LIMIT             Number of simultaneous RGW upload sessions\n#           export THROTTLE_LOCK_EXPIRE_AFTER Time in seconds to expire flag file is orphaned\n#           export THROTTLE_RETRY_AFTER       Time in seconds to wait before retry\n#           export THROTTLE_CONTAINER_NAME    Name of RGW container to place flag falies into\n#\n# The database-specific functions that need to be implemented are:\n#   dump_databases_to_directory <directory> <err_logfile> [scope]\n#       where:\n#         <directory>   is the full directory path to dump the database files\n#                       into. This is a temporary directory for this backup only.\n#         <err_logfile> is the full directory path where error logs are to be\n#                       written by the application.\n#         [scope]       set to \"all\" if all databases are to be backed up; or\n#                       set to the name of a specific database to be backed up.\n#                       This optional parameter is defaulted to \"all\".\n#       returns: 0 if no errors; 1 if any errors occurred\n#\n#       This function is expected to dump the database file(s) to the specified\n#       directory path. If this function completes successfully (returns 0), the\n#       framework will automatically tar/zip the files in that directory and\n#       name the tarball appropriately according to the proper conventions.\n#\n#   verify_databases_backup_archives [scope]\n#       returns: 0 if no errors; 1 if any errors occurred\n#\n#       This function is expected to verify the database backup archives. If this function\n#        completes successfully (returns 0), the\n#       framework will automatically starts remote backup upload.\n#\n#\n# The functions in this file will take care of:\n#   1) Calling \"dump_databases_to_directory\" and then compressing the files,\n#      naming the tarball properly, and then storing it locally at the specified\n#      local directory.\n#   2) Sending the tarball built to the remote gateway, to be stored in the\n#      container configured to store database backups.\n#   3) Removing local backup tarballs which are older than the number of days\n#      specified by the \"LOCAL_DAYS_TO_KEEP\" variable.\n#   4) Removing remote backup tarballs (from the remote gateway) which are older\n#      than the number of days specified by the \"REMOTE_DAYS_TO_KEEP\" variable.\n#   5) Controlling remote storage gateway load from client side and throttling it\n#      by using a dedicated RGW container to store flag files defining upload session\n#      in progress\n#\n# Note: not using set -e in this script because more elaborate error handling\n# is needed.\n\nlog_backup_error_exit() {\n  MSG=$1\n  ERRCODE=${2:-0}\n  log ERROR \"${DB_NAME}_backup\" \"${DB_NAMESPACE} namespace: ${MSG}\"\n  rm -f $ERR_LOG_FILE\n  rm -rf $TMP_DIR\n  exit 0\n}\n\nlog_verify_backup_exit() {\n  MSG=$1\n  ERRCODE=${2:-0}\n  log ERROR \"${DB_NAME}_verify_backup\" \"${DB_NAMESPACE} namespace: ${MSG}\"\n  rm -f $ERR_LOG_FILE\n  # rm -rf $TMP_DIR\n  exit 0\n}\n\n\nlog() {\n  #Log message to a file or stdout\n  #TODO: This can be convert into mail alert of alert send to a monitoring system\n  #Params: $1 log level\n  #Params: $2 service\n  #Params: $3 message\n  #Params: $4 Destination\n  LEVEL=$1\n  SERVICE=$2\n  MSG=$3\n  DEST=$4\n  DATE=$(date +\"%m-%d-%y %H:%M:%S\")\n  if [[ -z \"$DEST\" ]]; then\n    echo \"${DATE} ${LEVEL}: $(hostname) ${SERVICE}: ${MSG}\"\n  else\n    echo \"${DATE} ${LEVEL}: $(hostname) ${SERVICE}: ${MSG}\" >>$DEST\n  fi\n}\n\n# Generate a random number between MIN_DELAY_SEND_REMOTE and\n# MAX_DELAY_SEND_REMOTE\nrandom_number() {\n  diff=$((${MAX_DELAY_SEND_REMOTE} - ${MIN_DELAY_SEND_REMOTE} + 1))\n  echo $(($(( ${RANDOM} % ${diff} )) + ${MIN_DELAY_SEND_REMOTE} ))\n}\n\n#Get the day delta since the archive file backup\nseconds_difference() {\n  ARCHIVE_DATE=$( date --date=\"$1\" +%s )\n  if [[ $? -ne 0 ]]; then\n    SECOND_DELTA=0\n  fi\n  CURRENT_DATE=$( date +%s )\n  SECOND_DELTA=$(($CURRENT_DATE-$ARCHIVE_DATE))\n  if [[ \"$SECOND_DELTA\" -lt 0 ]]; then\n    SECOND_DELTA=0\n  fi\n  echo $SECOND_DELTA\n}\n\n# Send the specified tarball file at the specified filepath to the\n# remote gateway.\nsend_to_remote_server() {\n  FILEPATH=$1\n  FILE=$2\n\n  # Grab the list of containers on the remote site\n  RESULT=$(openstack container list 2>&1)\n\n  if [[ $? -eq 0 ]]; then\n    printf \"%s\\n\" \"$RESULT\" | grep \"$CONTAINER_NAME\"\n    if [[ $? -ne 0 ]]; then\n      # Find the swift URL from the keystone endpoint list\n      SWIFT_URL=$(openstack catalog show object-store -c endpoints | grep public | awk '{print $4}')\n      if [[ $? -ne 0 ]]; then\n        log WARN \"${DB_NAME}_backup\" \"Unable to get object-store enpoints from keystone catalog.\"\n        return 2\n      fi\n\n      # Get a token from keystone\n      TOKEN=$(openstack token issue -f value -c id)\n      if [[ $? -ne 0 ]]; then\n        log WARN \"${DB_NAME}_backup\" \"Unable to get  keystone token.\"\n        return 2\n      fi\n\n      # Create the container\n      RES_FILE=$(mktemp -p /tmp)\n      curl -g -i -X PUT ${SWIFT_URL}/${CONTAINER_NAME} \\\n           -H \"X-Auth-Token: ${TOKEN}\" \\\n           -H \"X-Storage-Policy: ${STORAGE_POLICY}\" 2>&1 > $RES_FILE\n\n      if [[ $? -ne 0 || $(grep \"HTTP\" $RES_FILE | awk '{print $2}') -ge 400 ]]; then\n        log WARN \"${DB_NAME}_backup\" \"Unable to create container ${CONTAINER_NAME}\"\n        cat $RES_FILE\n        rm -f $RES_FILE\n        return 2\n      fi\n      rm -f $RES_FILE\n\n      swift stat $CONTAINER_NAME\n      if [[ $? -ne 0 ]]; then\n        log WARN \"${DB_NAME}_backup\" \"Unable to retrieve container ${CONTAINER_NAME} details after creation.\"\n        return 2\n      fi\n    fi\n  else\n    echo $RESULT | grep -E \"HTTP 401|HTTP 403\"\n    if [[ $? -eq 0 ]]; then\n      log ERROR \"${DB_NAME}_backup\" \"Access denied by keystone: ${RESULT}\"\n      return 2\n    else\n      echo $RESULT | grep -E \"ConnectionError|Failed to discover available identity versions|Service Unavailable|HTTP 50\"\n      if [[ $? -eq 0 ]]; then\n        log WARN \"${DB_NAME}_backup\" \"Could not reach the RGW: ${RESULT}\"\n        # In this case, keystone or the site/node may be temporarily down.\n        # Return slightly different error code so the calling code can retry\n        return 2\n      else\n        log ERROR \"${DB_NAME}_backup\" \"Could not get container list: ${RESULT}\"\n        return 2\n      fi\n    fi\n  fi\n\n  # load balance delay\n  DELAY=$((1 + ${RANDOM} % 30))\n  echo \"Sleeping for ${DELAY} seconds to spread the load in time...\"\n  sleep ${DELAY}\n\n  #---------------------------------------------------------------------------\n  # Remote backup throttling\n  export THROTTLE_BACKUPS_ENABLED=$(echo $THROTTLE_BACKUPS_ENABLED | sed 's/\"//g')\n  if $THROTTLE_BACKUPS_ENABLED; then\n    # Remove Quotes from the constants which were added due to reading\n    # from secret.\n    export THROTTLE_LIMIT=$(echo $THROTTLE_LIMIT | sed 's/\"//g')\n    export THROTTLE_LOCK_EXPIRE_AFTER=$(echo $THROTTLE_LOCK_EXPIRE_AFTER | sed 's/\"//g')\n    export THROTTLE_RETRY_AFTER=$(echo $THROTTLE_RETRY_AFTER | sed 's/\"//g')\n    export THROTTLE_CONTAINER_NAME=$(echo $THROTTLE_CONTAINER_NAME | sed 's/\"//g')\n\n    # load balance delay\n    RESULT=$(openstack container list 2>&1)\n\n    if [[ $? -eq 0 ]]; then\n      printf \"%s\\n\" \"$RESULT\" | grep \"$THROTTLE_CONTAINER_NAME\"\n      if [[ $? -ne 0 ]]; then\n        # Find the swift URL from the keystone endpoint list\n        SWIFT_URL=$(openstack catalog show object-store -c endpoints | grep public | awk '{print $4}')\n        if [[ $? -ne 0 ]]; then\n          log WARN \"${DB_NAME}_backup\" \"Unable to get object-store enpoints from keystone catalog.\"\n          return 2\n        fi\n\n        # Get a token from keystone\n        TOKEN=$(openstack token issue -f value -c id)\n        if [[ $? -ne 0 ]]; then\n          log WARN \"${DB_NAME}_backup\" \"Unable to get  keystone token.\"\n          return 2\n        fi\n\n        # Create the container\n        RES_FILE=$(mktemp -p /tmp)\n        curl -g -i -X PUT ${SWIFT_URL}/${THROTTLE_CONTAINER_NAME} \\\n            -H \"X-Auth-Token: ${TOKEN}\" \\\n            -H \"X-Storage-Policy: ${STORAGE_POLICY}\" 2>&1 > $RES_FILE\n\n        if [[ $? -ne 0 || $(grep \"HTTP\" $RES_FILE | awk '{print $2}') -ge 400 ]]; then\n          log WARN \"${DB_NAME}_backup\" \"Unable to create container ${THROTTLE_CONTAINER_NAME}\"\n          cat $RES_FILE\n          rm -f $RES_FILE\n          return 2\n        fi\n        rm -f $RES_FILE\n\n        swift stat $THROTTLE_CONTAINER_NAME\n        if [[ $? -ne 0 ]]; then\n          log WARN \"${DB_NAME}_backup\" \"Unable to retrieve container ${THROTTLE_CONTAINER_NAME} details after creation.\"\n          return 2\n        fi\n      fi\n    else\n      echo $RESULT | grep -E \"HTTP 401|HTTP 403\"\n      if [[ $? -eq 0 ]]; then\n        log ERROR \"${DB_NAME}_backup\" \"Access denied by keystone: ${RESULT}\"\n        return 2\n      else\n        echo $RESULT | grep -E \"ConnectionError|Failed to discover available identity versions|Service Unavailable|HTTP 50\"\n        if [[ $? -eq 0 ]]; then\n          log WARN \"${DB_NAME}_backup\" \"Could not reach the RGW: ${RESULT}\"\n          # In this case, keystone or the site/node may be temporarily down.\n          # Return slightly different error code so the calling code can retry\n          return 2\n        else\n          log ERROR \"${DB_NAME}_backup\" \"Could not get container list: ${RESULT}\"\n          return 2\n        fi\n      fi\n    fi\n\n    NUMBER_OF_SESSIONS=$(openstack object list $THROTTLE_CONTAINER_NAME -f value | wc -l)\n    log INFO  \"${DB_NAME}_backup\"  \"There are ${NUMBER_OF_SESSIONS} remote sessions right now.\"\n    while [[ ${NUMBER_OF_SESSIONS} -ge ${THROTTLE_LIMIT} ]]\n    do\n      log INFO \"${DB_NAME}_backup\" \"Current number of active uploads is ${NUMBER_OF_SESSIONS}>=${THROTTLE_LIMIT}!\"\n      log INFO \"${DB_NAME}_backup\" \"Retrying in ${THROTTLE_RETRY_AFTER} seconds....\"\n      sleep ${THROTTLE_RETRY_AFTER}\n      NUMBER_OF_SESSIONS=$(openstack object list $THROTTLE_CONTAINER_NAME -f value | wc -l)\n      log INFO  \"${DB_NAME}_backup\"  \"There are ${NUMBER_OF_SESSIONS} remote sessions right now.\"\n    done\n\n    # Create a lock file in THROTTLE_CONTAINER\n    THROTTLE_FILEPATH=$(mktemp -d)\n    THROTTLE_FILE=${CONTAINER_NAME}.lock\n    date +%s > $THROTTLE_FILEPATH/$THROTTLE_FILE\n\n    # Create an object to store the file\n    openstack object create --name $THROTTLE_FILE $THROTTLE_CONTAINER_NAME $THROTTLE_FILEPATH/$THROTTLE_FILE\n    if [[ $? -ne 0 ]]; then\n      log WARN \"${DB_NAME}_backup\" \"Cannot create throttle container object ${THROTTLE_FILE}!\"\n      return 2\n    fi\n\n    swift post  $THROTTLE_CONTAINER_NAME $THROTTLE_FILE -H \"X-Delete-After:${THROTTLE_LOCK_EXPIRE_AFTER}\"\n    if [[ $? -ne 0 ]]; then\n      log WARN \"${DB_NAME}_backup\" \"Cannot set throttle container object ${THROTTLE_FILE} expiration header!\"\n      return 2\n    fi\n    openstack object show $THROTTLE_CONTAINER_NAME $THROTTLE_FILE\n    if [[ $? -ne 0 ]]; then\n      log WARN \"${DB_NAME}_backup\" \"Unable to retrieve throttle container object $THROTTLE_FILE after creation.\"\n      return 2\n    fi\n  fi\n\n  #---------------------------------------------------------------------------\n\n  # Create an object to store the file\n  openstack object create --name $FILE $CONTAINER_NAME $FILEPATH/$FILE\n  if [[ $? -ne 0 ]]; then\n    log WARN \"${DB_NAME}_backup\" \"Cannot create container object ${FILE}!\"\n    return 2\n  fi\n\n  openstack object show $CONTAINER_NAME $FILE\n  if [[ $? -ne 0 ]]; then\n    log WARN \"${DB_NAME}_backup\" \"Unable to retrieve container object $FILE after creation.\"\n    return 2\n  fi\n\n  # Remote backup verification\n  MD5_REMOTE=$(openstack object show $CONTAINER_NAME $FILE -f json | jq -r \".etag\")\n  MD5_LOCAL=$(cat ${FILEPATH}/${FILE} | md5sum | awk '{print $1}')\n  log INFO \"${DB_NAME}_backup\" \"Obtained MD5 hash for the file $FILE in container $CONTAINER_NAME.\"\n  log INFO \"${DB_NAME}_backup\" \"Local MD5 hash is ${MD5_LOCAL}.\"\n  log INFO \"${DB_NAME}_backup\" \"Remote MD5 hash is ${MD5_REMOTE}.\"\n  if [[ \"${MD5_LOCAL}\" == \"${MD5_REMOTE}\" ]]; then\n      log INFO \"${DB_NAME}_backup\" \"The local backup & remote backup MD5 hash values are matching for file $FILE in container $CONTAINER_NAME.\"\n  else\n      log ERROR \"${DB_NAME}_backup\" \"Mismatch between the local backup & remote backup MD5 hash values\"\n      return 2\n  fi\n  rm -f ${REMOTE_FILE}\n\n  #---------------------------------------------------------------------------\n  # Remote backup throttling\n  export THROTTLE_BACKUPS_ENABLED=$(echo $THROTTLE_BACKUPS_ENABLED | sed 's/\"//g')\n  if $THROTTLE_BACKUPS_ENABLED; then\n    # Remove flag file\n    # Delete an object to remove the flag file\n    openstack object delete $THROTTLE_CONTAINER_NAME $THROTTLE_FILE\n    if [[ $? -ne 0 ]]; then\n      log WARN \"${DB_NAME}_backup\" \"Cannot delete throttle container object ${THROTTLE_FILE}\"\n      return 0\n    else\n      log INFO \"${DB_NAME}_backup\" \"The throttle container object ${THROTTLE_FILE} has been successfully removed.\"\n    fi\n    rm -f ${THROTTLE_FILEPATH}/${THROTTLE_FILE}\n  fi\n\n  #---------------------------------------------------------------------------\n\n  log INFO \"${DB_NAME}_backup\" \"Created file $FILE in container $CONTAINER_NAME successfully.\"\n  return 0\n}\n\n# This function attempts to store the built tarball to the remote gateway,\n# with built-in logic to handle error cases like:\n#   1) Network connectivity issues - retries for a specific amount of time\n#   2) Authorization errors - immediately logs an ERROR and returns\nfunction store_backup_remotely() {\n  local FILEPATH=$1\n  local FILE=$2\n  local count=0\n\n  while [[ ${count} -lt ${REMOTE_BACKUP_RETRIES} ]]; do\n    # Store the new archive to the remote backup storage facility.\n    send_to_remote_server $FILEPATH $FILE\n    SEND_RESULT=\"$?\"\n\n    # Check if successful\n    if [[ $SEND_RESULT -eq 0 ]]; then\n      log INFO \"${DB_NAME}_backup\" \"Backup file ${FILE} successfully sent to RGW.\"\n      return 0\n    elif [[ $SEND_RESULT -eq 2 ]]; then\n      if [[ ${count} -ge ${REMOTE_BACKUP_RETRIES} ]]; then\n        log ERROR \"${DB_NAME}_backup\" \"Backup file ${FILE} could not be sent to the RGW in \" \\\n        \"${REMOTE_BACKUP_RETRIES} retries. Errors encountered. Exiting.\"\n        break\n      fi\n      # Temporary failure occurred. We need to retry\n      log WARN \"${DB_NAME}_backup\" \"Backup file ${FILE} could not be sent to RGW due to connection issue.\"\n      sleep_time=$(random_number)\n      log INFO \"${DB_NAME}_backup\" \"Sleeping ${sleep_time} seconds waiting for RGW to become available...\"\n      sleep ${sleep_time}\n      log INFO \"${DB_NAME}_backup\" \"Retrying...\"\n    else\n      log ERROR \"${DB_NAME}_backup\" \"Backup file ${FILE} could not be sent to the RGW. Errors encountered. Exiting.\"\n      break\n    fi\n\n    # Increment the counter\n    count=$((count+1))\n  done\n\n  log INFO \"${DB_NAME}_backup\" \"Switching to failover RGW...\"\n\n  # If initial attempts failed and failover variables are defined, retry with failover environment variables\n  if [[ $SEND_RESULT -ne 0 ]] && \\\n    [[ -n \"${OS_AUTH_URL_FAILOVER}\" ]] && \\\n    [[ -n \"${OS_REGION_NAME_FAILOVER}\" ]] && \\\n    [[ -n \"${OS_INTERFACE_FAILOVER}\" ]] && \\\n    [[ -n \"${OS_PROJECT_DOMAIN_NAME_FAILOVER}\" ]] && \\\n    [[ -n \"${OS_PROJECT_NAME_FAILOVER}\" ]] && \\\n    [[ -n \"${OS_USER_DOMAIN_NAME_FAILOVER}\" ]] && \\\n    [[ -n \"${OS_USERNAME_FAILOVER}\" ]] && \\\n    [[ -n \"${OS_PASSWORD_FAILOVER}\" ]]; then\n    log INFO \"${DB_NAME}_backup\" \"Initial attempts failed. Retrying with failover environment variables...\"\n\n    # Redefine OS_* variables with OS_*_FAILOVER ones\n    export OS_AUTH_URL=${OS_AUTH_URL_FAILOVER}\n    export OS_REGION_NAME=${OS_REGION_NAME_FAILOVER}\n    export OS_INTERFACE=${OS_INTERFACE_FAILOVER}\n    export OS_PROJECT_DOMAIN_NAME=${OS_PROJECT_DOMAIN_NAME_FAILOVER}\n    export OS_PROJECT_NAME=${OS_PROJECT_NAME_FAILOVER}\n    export OS_USER_DOMAIN_NAME=${OS_USER_DOMAIN_NAME_FAILOVER}\n    export OS_USERNAME=${OS_USERNAME_FAILOVER}\n    export OS_PASSWORD=${OS_PASSWORD_FAILOVER}\n    export OS_DEFAULT_DOMAIN=${OS_DEFAULT_DOMAIN_FAILOVER}\n\n    count=0\n    while [[ ${count} -lt ${REMOTE_BACKUP_RETRIES} ]]; do\n      # Store the new archive to the remote backup storage facility.\n      send_to_remote_server $FILEPATH $FILE\n      SEND_RESULT=\"$?\"\n\n      # Check if successful\n      if [[ $SEND_RESULT -eq 0 ]]; then\n        log INFO \"${DB_NAME}_backup\" \"Backup file ${FILE} successfully sent to failover RGW.\"\n        return 0\n      elif [[ $SEND_RESULT -eq 2 ]]; then\n        if [[ ${count} -ge ${REMOTE_BACKUP_RETRIES} ]]; then\n          log ERROR \"${DB_NAME}_backup\" \"Backup file ${FILE} could not be sent to the failover RGW in \" \\\n          \"${REMOTE_BACKUP_RETRIES} retries. Errors encountered. Exiting.\"\n          break\n        fi\n        # Temporary failure occurred. We need to retry\n        log WARN \"${DB_NAME}_backup\" \"Backup file ${FILE} could not be sent to failover RGW due to connection issue.\"\n        sleep_time=$(random_number)\n        log INFO \"${DB_NAME}_backup\" \"Sleeping ${sleep_time} seconds waiting for failover RGW to become available...\"\n        sleep ${sleep_time}\n        log INFO \"${DB_NAME}_backup\" \"Retrying...\"\n      else\n        log ERROR \"${DB_NAME}_backup\" \"Backup file ${FILE} could not be sent to the failover RGW. Errors encountered. Exiting.\"\n        break\n      fi\n\n      # Increment the counter\n      count=$((count+1))\n    done\n  fi\n\n  return 1\n}\n\nfunction get_archive_date(){\n# get_archive_date function returns correct archive date\n# for different formats of archives' names\n# the old one: <database name>.<namespace>.<table name | all>.<date-time>.tar.gz\n# the new one: <database name>.<namespace>.<table name | all>.<backup mode>.<date-time>.tar.gz\n  local A_FILE=\"$1\"\n  awk -F. '{print $(NF-2)}' <<< ${A_FILE} | tr -d \"Z\"\n}\n\n# This function takes a list of archives' names as an input\n# and creates a hash table where keys are number of seconds\n# between current date and archive date (see seconds_difference),\n# and values are space separated archives' names\n#\n# +------------+---------------------------------------------------------------------------------------------------------+\n# | 1265342678 | \"tmp/mysql.backup.auto.2022-02-14T10:13:13Z.tar.gz\"                                                     |\n# +------------+---------------------------------------------------------------------------------------------------------+\n# | 2346254257 | \"tmp/mysql.backup.auto.2022-02-11T10:13:13Z.tar.gz tmp/mysql.backup.manual.2022-02-11T10:13:13Z.tar.gz\" |\n# +------------+---------------------------------------------------------------------------------------------------------+\n# <...>\n# +------------+---------------------------------------------------------------------------------------------------------+\n# | 6253434567 | \"tmp/mysql.backup.manual.2022-02-01T10:13:13Z.tar.gz\"                                                   |\n# +------------+---------------------------------------------------------------------------------------------------------+\n# We will use the explained above data stracture to cover rare, but still\n# possible case, when we have several backups of the same date. E.g.\n# one manual, and one automatic.\n\ndeclare -A fileTable\ncreate_hash_table() {\nunset fileTable\nfileList=$@\n  for ARCHIVE_FILE in ${fileList}; do\n    # Creating index, we will round given ARCHIVE_DATE to the midnight (00:00:00)\n    # to take in account a possibility, that we can have more than one scheduled\n    # backup per day.\n    ARCHIVE_DATE=$(get_archive_date ${ARCHIVE_FILE})\n    ARCHIVE_DATE=$(date --date=${ARCHIVE_DATE} +%D)\n    log INFO \"${DB_NAME}_backup\" \"Archive date to build index: ${ARCHIVE_DATE}\"\n    INDEX=$(seconds_difference ${ARCHIVE_DATE})\n    if [[ -z fileTable[${INDEX}] ]]; then\n      fileTable[${INDEX}]=${ARCHIVE_FILE}\n    else\n      fileTable[${INDEX}]=\"${fileTable[${INDEX}]} ${ARCHIVE_FILE}\"\n    fi\n    echo \"INDEX: ${INDEX} VALUE:  ${fileTable[${INDEX}]}\"\n done\n}\n\nfunction get_backup_prefix() {\n# Create list of all possible prefixes in a format:\n# <db_name>.<namespace> to cover a possible situation\n# when different backups of different databases and/or\n# namespaces share the same local or remote storage.\n  ALL_FILES=($@)\n  PREFIXES=()\n  for fname in ${ALL_FILES[@]}; do\n    prefix=$(basename ${fname} | cut -d'.' -f1,2 )\n    for ((i=0; i<${#PREFIXES[@]}; i++)) do\n      if [[ ${PREFIXES[${i}]} == ${prefix} ]]; then\n        prefix=\"\"\n        break\n      fi\n    done\n    if [[ ! -z ${prefix} ]]; then\n        PREFIXES+=(${prefix})\n    fi\n  done\n}\n\nremove_old_local_archives() {\n  SECONDS_TO_KEEP=$(( $((${LOCAL_DAYS_TO_KEEP}))*86400))\n  log INFO \"${DB_NAME}_backup\" \"Deleting backups older than ${LOCAL_DAYS_TO_KEEP} days (${SECONDS_TO_KEEP} seconds)\"\n  if [[ -d $ARCHIVE_DIR ]]; then\n    count=0\n    # We iterate over the hash table, checking the delta in seconds (hash keys),\n    # and minimum number of backups we must have in place. List of keys has to be sorted.\n    for INDEX in $(tr \" \" \"\\n\" <<< ${!fileTable[@]} | sort -n -); do\n      ARCHIVE_FILE=${fileTable[${INDEX}]}\n      if [[ ${INDEX} -lt ${SECONDS_TO_KEEP} || ${count} -lt ${LOCAL_DAYS_TO_KEEP} ]]; then\n        ((count++))\n        log INFO \"${DB_NAME}_backup\" \"Keeping file(s) ${ARCHIVE_FILE}.\"\n      else\n        log INFO \"${DB_NAME}_backup\" \"Deleting file(s) ${ARCHIVE_FILE}.\"\n          rm -f ${ARCHIVE_FILE}\n          if [[ $? -ne 0 ]]; then\n            # Log error but don't exit so we can finish the script\n            # because at this point we haven't sent backup to RGW yet\n            log ERROR \"${DB_NAME}_backup\" \"Failed to cleanup local backup. Cannot remove some of ${ARCHIVE_FILE}\"\n          fi\n      fi\n    done\n  else\n    log WARN \"${DB_NAME}_backup\" \"The local backup directory ${$ARCHIVE_DIR} does not exist.\"\n  fi\n}\n\nprepare_list_of_remote_backups() {\n  BACKUP_FILES=$(mktemp -p /tmp)\n  DB_BACKUP_FILES=$(mktemp -p /tmp)\n  openstack object list $CONTAINER_NAME > $BACKUP_FILES\n  if [[ $? -ne 0 ]]; then\n    log_backup_error_exit \\\n      \"Failed to cleanup remote backup. Could not obtain a list of current backup files in the RGW\"\n  fi\n  # Filter out other types of backup files\n  cat $BACKUP_FILES | grep $DB_NAME | grep $DB_NAMESPACE | awk '{print $2}' > $DB_BACKUP_FILES\n}\n\n# The logic implemented with this function is absolutely similar\n# to the function remove_old_local_archives (see above)\nremove_old_remote_archives() {\n  count=0\n  SECONDS_TO_KEEP=$((${REMOTE_DAYS_TO_KEEP}*86400))\n  log INFO \"${DB_NAME}_backup\" \"Deleting backups older than ${REMOTE_DAYS_TO_KEEP} days (${SECONDS_TO_KEEP} seconds)\"\n  for INDEX in $(tr \" \" \"\\n\" <<< ${!fileTable[@]} | sort -n -); do\n    ARCHIVE_FILE=${fileTable[${INDEX}]}\n    if [[ ${INDEX} -lt ${SECONDS_TO_KEEP} || ${count} -lt ${REMOTE_DAYS_TO_KEEP} ]]; then\n      ((count++))\n      log INFO \"${DB_NAME}_backup\" \"Keeping remote backup(s) ${ARCHIVE_FILE}.\"\n    else\n      log INFO \"${DB_NAME}_backup\" \"Deleting remote backup(s) ${ARCHIVE_FILE} from the RGW\"\n      openstack object delete ${CONTAINER_NAME} ${ARCHIVE_FILE} ||  log WARN \"${DB_NAME}_backup\" \\\n        \"Failed to cleanup remote backup. Cannot delete container object ${ARCHIVE_FILE}\"\n    fi\n  done\n\n  # Cleanup now that we're done.\n  for fd in ${BACKUP_FILES} ${DB_BACKUP_FILES}; do\n    if [[ -f ${fd} ]]; then\n      rm -f ${fd}\n    else\n      log WARN \"${DB_NAME}_backup\" \"Can not delete a temporary file ${fd}\"\n    fi\n  done\n}\n\n# Main function to backup the databases. Calling functions need to supply:\n#  1) The directory where the final backup will be kept after it is compressed.\n#  2) A temporary directory to use for placing database files to be compressed.\n#     Note: this temp directory will be deleted after backup is done.\n#  3) Optional \"scope\" parameter indicating what database to back up. Defaults\n#     to \"all\".\nbackup_databases() {\n  SCOPE=${1:-\"all\"}\n\n  # Create necessary directories if they do not exist.\n  mkdir -p $ARCHIVE_DIR || log_backup_error_exit \\\n    \"Backup of the ${DB_NAME} database failed. Cannot create directory ${ARCHIVE_DIR}!\"\n  export TMP_DIR=$(mktemp -d) || log_backup_error_exit \\\n    \"Backup of the ${DB_NAME} database failed. Cannot create temp directory!\"\n\n  # Create temporary log file\n  export ERR_LOG_FILE=$(mktemp -p /tmp) || log_backup_error_exit \\\n    \"Backup of the ${DB_NAME} database failed. Cannot create log file!\"\n\n  # It is expected that this function will dump the database files to the $TMP_DIR\n  dump_databases_to_directory $TMP_DIR $ERR_LOG_FILE $SCOPE\n\n  # If successful, there should be at least one file in the TMP_DIR\n  if [[ $? -ne 0 || $(ls $TMP_DIR | wc -w) -eq 0 ]]; then\n    cat $ERR_LOG_FILE\n    log_backup_error_exit \"Backup of the ${DB_NAME} database failed and needs attention.\"\n  fi\n\n  log INFO \"${DB_NAME}_backup\" \"Databases dumped successfully. Creating tarball...\"\n\n  NOW=$(date +\"%Y-%m-%dT%H:%M:%SZ\")\n  if [[ -z \"${BACK_UP_MODE}\" ]]; then\n    TARBALL_FILE=\"${DB_NAME}.${DB_NAMESPACE}.${SCOPE}.${NOW}.tar.gz\"\n  else\n    TARBALL_FILE=\"${DB_NAME}.${DB_NAMESPACE}.${SCOPE}.${BACK_UP_MODE}.${NOW}.tar.gz\"\n  fi\n\n  cd $TMP_DIR || log_backup_error_exit \\\n    \"Backup of the ${DB_NAME} database failed. Cannot change to directory $TMP_DIR\"\n\n  #Archive the current database files\n  tar zcvf $ARCHIVE_DIR/$TARBALL_FILE *\n  if [[ $? -ne 0 ]]; then\n    log_backup_error_exit \\\n      \"Backup ${DB_NAME} to local file system failed. Backup tarball could not be created.\"\n  fi\n\n  # Get the size of the file\n  ARCHIVE_SIZE=$(ls -l $ARCHIVE_DIR/$TARBALL_FILE | awk '{print $5}')\n\n  log INFO \"${DB_NAME}_backup\" \"Tarball $TARBALL_FILE created successfully.\"\n\n  cd $ARCHIVE_DIR\n\n  #Only delete the old archive after a successful archive\n  export LOCAL_DAYS_TO_KEEP=$(echo $LOCAL_DAYS_TO_KEEP | sed 's/\"//g')\n  if [[ \"$LOCAL_DAYS_TO_KEEP\" -gt 0 ]]; then\n    get_backup_prefix $(ls -1 ${ARCHIVE_DIR}/*.gz)\n    for ((i=0; i<${#PREFIXES[@]}; i++)); do\n      echo \"Working with prefix: ${PREFIXES[i]}\"\n      create_hash_table $(ls -1 ${ARCHIVE_DIR}/${PREFIXES[i]}*.gz)\n      remove_old_local_archives\n    done\n  fi\n\n  # Local backup verification process\n\n  # It is expected that this function will verify the database backup files\n  if verify_databases_backup_archives ${SCOPE}; then\n    log INFO \"${DB_NAME}_backup_verify\" \"Databases backup verified successfully. Uploading verified backups to remote location...\"\n  else\n    # If successful, there should be at least one file in the TMP_DIR\n    if [[ $(ls $TMP_DIR | wc -w) -eq 0 ]]; then\n      cat $ERR_LOG_FILE\n    fi\n    log_verify_backup_exit \"Verify of the ${DB_NAME} database backup failed and needs attention.\"\n    exit 1\n  fi\n\n  # Remove the temporary directory and files as they are no longer needed.\n  rm -rf $TMP_DIR\n  rm -f $ERR_LOG_FILE\n\n  # Remote backup\n  REMOTE_BACKUP=$(echo $REMOTE_BACKUP_ENABLED | sed 's/\"//g')\n  if $REMOTE_BACKUP; then\n    # Remove Quotes from the constants which were added due to reading\n    # from secret.\n    export REMOTE_BACKUP_RETRIES=$(echo $REMOTE_BACKUP_RETRIES | sed 's/\"//g')\n    export MIN_DELAY_SEND_REMOTE=$(echo $MIN_DELAY_SEND_REMOTE | sed 's/\"//g')\n    export MAX_DELAY_SEND_REMOTE=$(echo $MAX_DELAY_SEND_REMOTE | sed 's/\"//g')\n    export REMOTE_DAYS_TO_KEEP=$(echo $REMOTE_DAYS_TO_KEEP | sed 's/\"//g')\n\n    store_backup_remotely $ARCHIVE_DIR $TARBALL_FILE\n    if [[ $? -ne 0 ]]; then\n      # This error should print first, then print the summary as the last\n      # thing that the user sees in the output.\n      log ERROR \"${DB_NAME}_backup\" \"Backup ${TARBALL_FILE} could not be sent to remote RGW.\"\n      echo \"==================================================================\"\n      echo \"Local backup successful, but could not send to remote RGW.\"\n      echo \"Backup archive name: $TARBALL_FILE\"\n      echo \"Backup archive size: $ARCHIVE_SIZE\"\n      echo \"==================================================================\"\n      # Because the local backup was successful, exit with 0 so the pod will not\n      # continue to restart and fill the disk with more backups. The ERRORs are\n      # logged and alerting system should catch those errors and flag the operator.\n      exit 0\n    fi\n\n    #Only delete the old archive after a successful archive\n    if [[ \"$REMOTE_DAYS_TO_KEEP\" -gt 0 ]]; then\n      prepare_list_of_remote_backups\n      get_backup_prefix $(cat $DB_BACKUP_FILES)\n      for ((i=0; i<${#PREFIXES[@]}; i++)); do\n        echo \"Working with prefix: ${PREFIXES[i]}\"\n        create_hash_table $(cat ${DB_BACKUP_FILES} | grep ${PREFIXES[i]})\n        remove_old_remote_archives\n      done\n    fi\n\n    echo \"==================================================================\"\n    echo \"Local backup and backup to remote RGW successful!\"\n    echo \"Backup archive name: $TARBALL_FILE\"\n    echo \"Backup archive size: $ARCHIVE_SIZE\"\n    echo \"==================================================================\"\n  else\n    # Remote backup is not enabled. This is ok; at least we have a local backup.\n    log INFO \"${DB_NAME}_backup\" \"Skipping remote backup, as it is not enabled.\"\n\n    echo \"==================================================================\"\n    echo \"Local backup successful!\"\n    echo \"Backup archive name: $TARBALL_FILE\"\n    echo \"Backup archive size: $ARCHIVE_SIZE\"\n    echo \"==================================================================\"\n  fi\n}\n{{- end }}"
  },
  {
    "path": "helm-toolkit/templates/scripts/db-backup-restore/_restore_main.sh.tpl",
    "content": "{{- define \"helm-toolkit.scripts.db-backup-restore.restore_main\" }}\n#!/bin/bash\n\n# This file contains a database restore framework which database scripts\n# can use to perform a backup. The idea here is that the database-specific\n# functions will be implemented by the various databases using this script\n# (like mariadb, postgresql or etcd for example). The database-specific\n# script will need to first \"source\" this file like this:\n#   source /tmp/restore_main.sh\n#\n# Then the script should call the main CLI function (cli_main):\n#   cli_main <arg_list>\n#       where:\n#         <arg_list>    is the list of arguments given by the user\n#\n#       The framework will require the following variables to be exported:\n#\n#         export DB_NAMESPACE        Namespace where the database(s) reside\n#         export DB_NAME             Name of the database system\n#         export ARCHIVE_DIR         Location where the backup tarballs should\n#                                    be stored. (full directory path which\n#                                    should already exist)\n#         export CONTAINER_NAME      Name of the container on the RGW where\n#                                    the backups are stored.\n#         RGW access variables:\n#           export OS_REGION_NAME          Name of the region the RGW resides in\n#           export OS_AUTH_URL             Keystone URL associated with the RGW\n#           export OS_PROJECT_NAME         Name of the project associated with the\n#                                          keystone user\n#           export OS_USERNAME             Name of the keystone user\n#           export OS_PASSWORD             Password of the keystone user\n#           export OS_USER_DOMAIN_NAME     Keystone domain the project belongs to\n#           export OS_PROJECT_DOMAIN_NAME  Keystone domain the user belongs to\n#           export OS_IDENTITY_API_VERSION Keystone API version to use\n#\n# The database-specific functions that need to be implemented are:\n#   get_databases\n#       where:\n#         <tmp_dir>     is the full directory path where the decompressed\n#                       database files reside\n#         <db_file>     is the full path of the file to write the database\n#                       names into, one database per line\n#       returns: 0 if no errors; 1 if any errors occurred\n#\n#       This function is expected to extract the database names from the\n#       uncompressed database files found in the given \"tmp_dir\", which is\n#       the staging directory for database restore. The database names\n#       should be written to the given \"db_file\", one database name per\n#       line.\n#\n#   get_tables\n#         <db_name>     is the name of the database to get the tables from\n#         <tmp_dir>     is the full directory path where the decompressed\n#                       database files reside\n#         <table_file>  is the full path of the file to write the table\n#                       names into, one table per line\n#       returns: 0 if no errors; 1 if any errors occurred\n#\n#       This function is expected to extract the table names from the given\n#       database, found in the uncompressed database files located in the\n#       given \"tmp_dir\", which is the staging directory for database restore.\n#       The table names should be written to the given \"table_file\", one\n#       table name per line.\n#\n#   get_rows\n#         <table_name>  is the name of the table to get the rows from\n#         <db_name>     is the name of the database the table resides in\n#         <tmp_dir>     is the full directory path where the decompressed\n#                       database files reside\n#         <rows_file>   is the full path of the file to write the table\n#                       row data into, one row (INSERT statement) per line\n#       returns: 0 if no errors; 1 if any errors occurred\n#\n#       This function is expected to extract the rows from the given table\n#       in the given database, found in the uncompressed database files\n#       located in the given \"tmp_dir\", which is the staging directory for\n#       database restore. The table rows should be written to the given\n#       \"rows_file\", one row (INSERT statement) per line.\n#\n#   get_schema\n#         <table_name>  is the name of the table to get the schema from\n#         <db_name>     is the name of the database the table resides in\n#         <tmp_dir>     is the full directory path where the decompressed\n#                       database files reside\n#         <schema_file> is the full path of the file to write the table\n#                       schema data into\n#       returns: 0 if no errors; 1 if any errors occurred\n#\n#       This function is expected to extract the schema from the given table\n#       in the given database, found in the uncompressed database files\n#       located in the given \"tmp_dir\", which is the staging directory for\n#       database restore. The table schema and related alterations and\n#       grant information should be written to the given \"schema_file\".\n#\n#   restore_single_db\n#       where:\n#         <db_name>     is the name of the database to be restored\n#         <tmp_dir>     is the full directory path where the decompressed\n#                       database files reside\n#       returns: 0 if no errors; 1 if any errors occurred\n#\n#       This function is expected to restore the database given as \"db_name\"\n#       using the database files located in the \"tmp_dir\". The framework\n#       will delete the \"tmp_dir\" and the files in it after the restore is\n#       complete.\n#\n#   restore_all_dbs\n#       where:\n#         <tmp_dir>     is the full directory path where the decompressed\n#                       database files reside\n#       returns: 0 if no errors; 1 if any errors occurred\n#\n#       This function is expected to restore all of the databases which\n#       are backed up in the database files located in the \"tmp_dir\". The\n#       framework will delete the \"tmp_dir\" and the files in it after the\n#       restore is complete.\n#\n# The functions in this file will take care of:\n#   1) The CLI parameter parsing for the arguments passed in by the user.\n#   2) The listing of either local or remote archive files at the request\n#      of the user.\n#   3) The retrieval/download of an archive file located either in the local\n#      file system or remotely stored on an RGW.\n#   4) Calling either \"restore_single_db\" or \"restore_all_dbs\" when the user\n#      chooses to restore a database or all databases.\n#   5) The framework will call \"get_databases\" when it needs a list of\n#      databases when the user requests a database list or when the user\n#      requests to restore a single database (to ensure it exists in the\n#      archive). Similarly, the framework will call \"get_tables\", \"get_rows\",\n#      or \"get_schema\" when it needs that data requested by the user.\n#\n\nusage() {\n  ret_val=$1\n  echo \"Usage:\"\n  echo \"Restore command options\"\n  echo \"=============================\"\n  echo \"help\"\n  echo \"list_archives [remote]\"\n  echo \"list_databases <archive_filename> [remote]\"\n  echo \"list_tables <archive_filename> <dbname> [remote]\"\n  echo \"list_rows <archive_filename> <dbname> <table_name> [remote]\"\n  echo \"list_schema <archive_filename> <dbname> <table_name> [remote]\"\n  echo \"restore <archive_filename> <db_specifier> [remote]\"\n  echo \"        where <db_specifier> = <dbname> | ALL\"\n  echo \"delete_archive <archive_filename> [remote]\"\n  clean_and_exit $ret_val \"\"\n}\n\nlog() {\n  #Log message to a file or stdout\n  #TODO: This can be convert into mail alert of alert send to a monitoring system\n  #Params: $1 log level\n  #Params: $2 service\n  #Params: $3 message\n  #Params: $4 Destination\n  LEVEL=$1\n  SERVICE=$2\n  MSG=$3\n  DEST=$4\n  DATE=$(date +\"%m-%d-%y %H:%M:%S\")\n  if [[ -z \"$DEST\" ]]; then\n    echo \"${DATE} ${LEVEL}: $(hostname) ${SERVICE}: ${MSG}\"\n  else\n    echo \"${DATE} ${LEVEL}: $(hostname) ${SERVICE}: ${MSG}\" >>$DEST\n  fi\n}\n\n#Exit cleanly with some message and return code\nclean_and_exit() {\n  RETCODE=$1\n  MSG=$2\n\n  # Clean/remove temporary directories/files\n  rm -rf $TMP_DIR\n  rm -f $RESULT_FILE\n\n  if [[ \"x${MSG}\" != \"x\" ]]; then\n    echo $MSG\n  fi\n  exit $RETCODE\n}\n\ndetermine_resulting_error_code() {\n  RESULT=\"$1\"\n\n  echo ${RESULT} | grep \"HTTP 404\"\n  if [[ $? -eq 0 ]]; then\n    log ERROR \"${DB_NAME}_restore\" \"Could not find the archive: ${RESULT}\"\n    return 1\n  else\n    echo ${RESULT} | grep \"HTTP 401\"\n    if [[ $? -eq 0 ]]; then\n      log ERROR \"${DB_NAME}_restore\" \"Could not access the archive: ${RESULT}\"\n      return 1\n    else\n      echo ${RESULT} | grep \"HTTP 503\"\n      if [[ $? -eq 0 ]]; then\n        log WARN \"${DB_NAME}_restore\" \"RGW service is unavailable. ${RESULT}\"\n        # In this case, the RGW may be temporarily down.\n        # Return slightly different error code so the calling code can retry\n        return 2\n      else\n        echo ${RESULT} | grep \"ConnectionError\"\n        if [[ $? -eq 0 ]]; then\n          log WARN \"${DB_NAME}_restore\" \"Could not reach the RGW: ${RESULT}\"\n          # In this case, keystone or the site/node may be temporarily down.\n          # Return slightly different error code so the calling code can retry\n          return 2\n        else\n          log ERROR \"${DB_NAME}_restore\" \"Archive ${ARCHIVE} could not be retrieved: ${RESULT}\"\n          return 1\n        fi\n      fi\n    fi\n  fi\n  return 0\n}\n\n# Retrieve a list of archives from the RGW.\nfunction retrieve_remote_listing() {\n  # List archives from PRIMARY RGW\n  log INFO \"${DB_NAME}_restore\" \"Listing archives from PRIMARY RGW...\"\n  list_archives_from_rgw \"PRIMARY\"\n  local primary_result=$?\n\n  # Check if failover environment variables are defined\n  if [[ -n \"${OS_AUTH_URL_FAILOVER}\" ]] && \\\n    [[ -n \"${OS_REGION_NAME_FAILOVER}\" ]] && \\\n    [[ -n \"${OS_INTERFACE_FAILOVER}\" ]] && \\\n    [[ -n \"${OS_PROJECT_DOMAIN_NAME_FAILOVER}\" ]] && \\\n    [[ -n \"${OS_PROJECT_NAME_FAILOVER}\" ]] && \\\n    [[ -n \"${OS_USER_DOMAIN_NAME_FAILOVER}\" ]] && \\\n    [[ -n \"${OS_USERNAME_FAILOVER}\" ]] && \\\n    [[ -n \"${OS_PASSWORD_FAILOVER}\" ]]; then\n    # Redefine OS_* variables with OS_*_FAILOVER ones\n    log INFO \"${DB_NAME}_restore\" \"Listing archives from FAILOVER RGW...\"\n\n    # Saving original OS_* variables as OS_*_PRIMARY\n    export OS_AUTH_URL_PRIMARY=${OS_AUTH_URL}\n    export OS_REGION_NAME_PRIMARY=${OS_REGION_NAME}\n    export OS_INTERFACE_PRIMARY=${OS_INTERFACE}\n    export OS_PROJECT_DOMAIN_NAME_PRIMARY=${OS_PROJECT_DOMAIN_NAME}\n    export OS_PROJECT_NAME_PRIMARY=${OS_PROJECT_NAME}\n    export OS_USER_DOMAIN_NAME_PRIMARY=${OS_USER_DOMAIN_NAME}\n    export OS_USERNAME_PRIMARY=${OS_USERNAME}\n    export OS_PASSWORD_PRIMARY=${OS_PASSWORD}\n    export OS_DEFAULT_DOMAIN_PRIMARY=${OS_DEFAULT_DOMAIN}\n\n    export OS_AUTH_URL=${OS_AUTH_URL_FAILOVER}\n    export OS_REGION_NAME=${OS_REGION_NAME_FAILOVER}\n    export OS_INTERFACE=${OS_INTERFACE_FAILOVER}\n    export OS_PROJECT_DOMAIN_NAME=${OS_PROJECT_DOMAIN_NAME_FAILOVER}\n    export OS_PROJECT_NAME=${OS_PROJECT_NAME_FAILOVER}\n    export OS_USER_DOMAIN_NAME=${OS_USER_DOMAIN_NAME_FAILOVER}\n    export OS_USERNAME=${OS_USERNAME_FAILOVER}\n    export OS_PASSWORD=${OS_PASSWORD_FAILOVER}\n    export OS_DEFAULT_DOMAIN=${OS_DEFAULT_DOMAIN_FAILOVER}\n\n    list_archives_from_rgw \"FAILOVER\"\n    local failover_result=$?\n\n    # Restore original OS_* variables from OS_*_PRIMARY\n    export OS_AUTH_URL=${OS_AUTH_URL_PRIMARY}\n    export OS_REGION_NAME=${OS_REGION_NAME_PRIMARY}\n    export OS_INTERFACE=${OS_INTERFACE_PRIMARY}\n    export OS_PROJECT_DOMAIN_NAME=${OS_PROJECT_DOMAIN_NAME_PRIMARY}\n    export OS_PROJECT_NAME=${OS_PROJECT_NAME_PRIMARY}\n    export OS_USER_DOMAIN_NAME=${OS_USER_DOMAIN_NAME_PRIMARY}\n    export OS_USERNAME=${OS_USERNAME_PRIMARY}\n    export OS_PASSWORD=${OS_PASSWORD_PRIMARY}\n    export OS_DEFAULT_DOMAIN=${OS_DEFAULT_DOMAIN_PRIMARY}\n\n    # Return success if either primary or failover listing was successful\n    if [[ $primary_result -eq 0 || $failover_result -eq 0 ]]; then\n      return 0\n    else\n      return 1\n    fi\n  else\n    # Return the result of the primary listing if failover variables are not defined\n    return $primary_result\n  fi\n}\n\nfunction list_archives_from_rgw() {\n  local prefix=$1\n  local RESULT\n\n  log INFO \"${DB_NAME}_restore\" \"Obtaining list of archives from ${prefix} RGW...\"\n  RESULT=$(openstack container show $CONTAINER_NAME 2>&1)\n  if [[ $? -eq 0 ]]; then\n    openstack object list $CONTAINER_NAME | grep $DB_NAME | grep $DB_NAMESPACE | awk -v prefix=\"$prefix\" '{print prefix \":\" $2}' >> $TMP_DIR/archive_list\n    if [[ $? -ne 0 ]]; then\n      log ERROR \"${DB_NAME}_restore\" \"Container object listing could not be obtained from ${prefix} RGW.\"\n      return 1\n    else\n      log INFO \"${DB_NAME}_restore\" \"Archive listing successfully retrieved from ${prefix} RGW.\"\n    fi\n  else\n    log ERROR \"${DB_NAME}_restore\" \"Failed to obtain container show from ${prefix} RGW: ${RESULT}\"\n    return 1\n  fi\n\n  return 0\n}\n\n# Retrieve a single archive from the RGW.\nretrieve_remote_archive() {\n  local archive=$1\n  local prefix=$(echo $archive | awk -F: '{print $1}')\n  local filename\n\n  if [[ $prefix == \"PRIMARY\" || $prefix == \"FAILOVER\" ]]; then\n    filename=$(echo $archive | awk -F: '{print $2\":\"$3\":\"$4}')\n  else\n    filename=$archive\n  fi\n\n  if [[ $prefix == \"PRIMARY\" ]]; then\n    log INFO \"${DB_NAME}_restore\" \"Retrieving archive ${filename} from PRIMARY RGW...\"\n    retrieve_archive_from_rgw $filename\n    return $?\n  elif [[ $prefix == \"FAILOVER\" ]]; then\n    log INFO \"${DB_NAME}_restore\" \"Retrieving archive ${filename} from FAILOVER RGW...\"\n\n    # Saving original OS_* variables as OS_*_PRIMARY\n    export OS_AUTH_URL_PRIMARY=${OS_AUTH_URL}\n    export OS_REGION_NAME_PRIMARY=${OS_REGION_NAME}\n    export OS_INTERFACE_PRIMARY=${OS_INTERFACE}\n    export OS_PROJECT_DOMAIN_NAME_PRIMARY=${OS_PROJECT_DOMAIN_NAME}\n    export OS_PROJECT_NAME_PRIMARY=${OS_PROJECT_NAME}\n    export OS_USER_DOMAIN_NAME_PRIMARY=${OS_USER_DOMAIN_NAME}\n    export OS_USERNAME_PRIMARY=${OS_USERNAME}\n    export OS_PASSWORD_PRIMARY=${OS_PASSWORD}\n    export OS_DEFAULT_DOMAIN_PRIMARY=${OS_DEFAULT_DOMAIN}\n\n    # Redefine OS_* variables with OS_*_FAILOVER ones\n    export OS_AUTH_URL=${OS_AUTH_URL_FAILOVER}\n    export OS_REGION_NAME=${OS_REGION_NAME_FAILOVER}\n    export OS_INTERFACE=${OS_INTERFACE_FAILOVER}\n    export OS_PROJECT_DOMAIN_NAME=${OS_PROJECT_DOMAIN_NAME_FAILOVER}\n    export OS_PROJECT_NAME=${OS_PROJECT_NAME_FAILOVER}\n    export OS_USER_DOMAIN_NAME=${OS_USER_DOMAIN_NAME_FAILOVER}\n    export OS_USERNAME=${OS_USERNAME_FAILOVER}\n    export OS_PASSWORD=${OS_PASSWORD_FAILOVER}\n    export OS_DEFAULT_DOMAIN=${OS_DEFAULT_DOMAIN_FAILOVER}\n\n    retrieve_archive_from_rgw $filename\n    local result=$?\n\n    # Restore original OS_* variables from OS_*_PRIMARY\n    export OS_AUTH_URL=${OS_AUTH_URL_PRIMARY}\n    export OS_REGION_NAME=${OS_REGION_NAME_PRIMARY}\n    export OS_INTERFACE=${OS_INTERFACE_PRIMARY}\n    export OS_PROJECT_DOMAIN_NAME=${OS_PROJECT_DOMAIN_NAME_PRIMARY}\n    export OS_PROJECT_NAME=${OS_PROJECT_NAME_PRIMARY}\n    export OS_USER_DOMAIN_NAME=${OS_USER_DOMAIN_NAME_PRIMARY}\n    export OS_USERNAME=${OS_USERNAME_PRIMARY}\n    export OS_PASSWORD=${OS_PASSWORD_PRIMARY}\n    export OS_DEFAULT_DOMAIN=${OS_DEFAULT_DOMAIN_PRIMARY}\n\n    return $result\n  else\n    log ERROR \"${DB_NAME}_restore\" \"Invalid prefix ${prefix} for archive ${archive}.\"\n    return 1\n  fi\n}\n\n# Function to retrieve an archive from RGW\nretrieve_archive_from_rgw() {\n  local filename=$1\n  local RESULT\n\n  log INFO \"${DB_NAME}_restore\" \"Obtaining archive ${filename} from RGW...\"\n  RESULT=$(openstack object save --file $TMP_DIR/${filename} $CONTAINER_NAME ${filename}  2>&1)\n  if [[ $? -eq 0 ]]; then\n    log INFO \"${DB_NAME}_restore\" \"Archive ${filename} successfully retrieved.\"\n    return 0\n  else\n    log ERROR \"${DB_NAME}_restore\" \"Failed to retrieve archive ${filename}.\"\n    determine_resulting_error_code \"${RESULT}\"\n    return $?\n  fi\n}\n\n# Delete an archive from the RGW.\n# Delete a single archive from the RGW.\ndelete_remote_archive() {\n  local archive=$1\n  local prefix=$(echo $archive | awk -F: '{print $1}')\n  local filename\n\n  if [[ $prefix == \"PRIMARY\" || $prefix == \"FAILOVER\" ]]; then\n    filename=$(echo $archive | awk -F: '{print $2\":\"$3\":\"$4}')\n  else\n    filename=$archive\n  fi\n\n  if [[ $prefix == \"PRIMARY\" ]]; then\n    log INFO \"${DB_NAME}_restore\" \"Deleting archive ${filename} from PRIMARY RGW...\"\n    delete_archive_from_rgw $filename\n    return $?\n  elif [[ $prefix == \"FAILOVER\" ]]; then\n    log INFO \"${DB_NAME}_restore\" \"Deleting archive ${filename} from FAILOVER RGW...\"\n\n    # Saving original OS_* variables as OS_*_PRIMARY\n    export OS_AUTH_URL_PRIMARY=${OS_AUTH_URL}\n    export OS_REGION_NAME_PRIMARY=${OS_REGION_NAME}\n    export OS_INTERFACE_PRIMARY=${OS_INTERFACE}\n    export OS_PROJECT_DOMAIN_NAME_PRIMARY=${OS_PROJECT_DOMAIN_NAME}\n    export OS_PROJECT_NAME_PRIMARY=${OS_PROJECT_NAME}\n    export OS_USER_DOMAIN_NAME_PRIMARY=${OS_USER_DOMAIN_NAME}\n    export OS_USERNAME_PRIMARY=${OS_USERNAME}\n    export OS_PASSWORD_PRIMARY=${OS_PASSWORD}\n    export OS_DEFAULT_DOMAIN_PRIMARY=${OS_DEFAULT_DOMAIN}\n\n    # Redefine OS_* variables with OS_*_FAILOVER ones\n    export OS_AUTH_URL=${OS_AUTH_URL_FAILOVER}\n    export OS_REGION_NAME=${OS_REGION_NAME_FAILOVER}\n    export OS_INTERFACE=${OS_INTERFACE_FAILOVER}\n    export OS_PROJECT_DOMAIN_NAME=${OS_PROJECT_DOMAIN_NAME_FAILOVER}\n    export OS_PROJECT_NAME=${OS_PROJECT_NAME_FAILOVER}\n    export OS_USER_DOMAIN_NAME=${OS_USER_DOMAIN_NAME_FAILOVER}\n    export OS_USERNAME=${OS_USERNAME_FAILOVER}\n    export OS_PASSWORD=${OS_PASSWORD_FAILOVER}\n    export OS_DEFAULT_DOMAIN=${OS_DEFAULT_DOMAIN_FAILOVER}\n\n    delete_archive_from_rgw $filename\n    local result=$?\n\n    # Restore original OS_* variables from OS_*_PRIMARY\n    export OS_AUTH_URL=${OS_AUTH_URL_PRIMARY}\n    export OS_REGION_NAME=${OS_REGION_NAME_PRIMARY}\n    export OS_INTERFACE=${OS_INTERFACE_PRIMARY}\n    export OS_PROJECT_DOMAIN_NAME=${OS_PROJECT_DOMAIN_NAME_PRIMARY}\n    export OS_PROJECT_NAME=${OS_PROJECT_NAME_PRIMARY}\n    export OS_USER_DOMAIN_NAME=${OS_USER_DOMAIN_NAME_PRIMARY}\n    export OS_USERNAME=${OS_USERNAME_PRIMARY}\n    export OS_PASSWORD=${OS_PASSWORD_PRIMARY}\n    export OS_DEFAULT_DOMAIN=${OS_DEFAULT_DOMAIN_PRIMARY}\n\n    return $result\n  else\n    log ERROR \"${DB_NAME}_restore\" \"Invalid prefix ${prefix} for archive ${archive}.\"\n    return 1\n  fi\n}\n\n# Function to delete an archive from RGW\ndelete_archive_from_rgw() {\n  local filename=$1\n  local RESULT\n\n  RESULT=$(openstack object delete $CONTAINER_NAME $filename 2>&1)\n  if [[ $? -eq 0 ]]; then\n    log INFO \"${DB_NAME}_restore\" \"Archive ${filename} successfully deleted.\"\n    return 0\n  else\n    log ERROR \"${DB_NAME}_restore\" \"Failed to delete archive ${filename}.\"\n    determine_resulting_error_code \"${RESULT}\"\n    return $?\n  fi\n}\n\n# Display all archives\nlist_archives() {\n  REMOTE=$1\n\n  if [[ \"x${REMOTE^^}\" == \"xREMOTE\" ]]; then\n    retrieve_remote_listing\n    if [[ $? -eq 0 && -e $TMP_DIR/archive_list ]]; then\n      echo\n      echo \"All Archives from RGW Data Store\"\n      echo \"==============================================\"\n      cat $TMP_DIR/archive_list | sort\n      clean_and_exit 0 \"\"\n    else\n      clean_and_exit 1 \"ERROR: Archives could not be retrieved from the RGW.\"\n    fi\n  elif [[ \"x${REMOTE}\" == \"x\" ]]; then\n    if [[ -d $ARCHIVE_DIR ]]; then\n      archives=$(find $ARCHIVE_DIR/ -iname \"*.gz\" -print | sort)\n      echo\n      echo \"All Local Archives\"\n      echo \"==============================================\"\n      for archive in $archives\n      do\n        echo $archive | cut -d '/' -f8-\n      done\n      clean_and_exit 0 \"\"\n    else\n      clean_and_exit 1 \"ERROR: Local archive directory is not available.\"\n    fi\n  else\n    usage 1\n  fi\n}\n\n# Retrieve the archive from the desired location and decompress it into\n# the restore directory\nget_archive() {\n  local archive=$1\n  local prefix=$(echo $archive | awk -F: '{print $1}')\n  local filename\n\n  if [[ $prefix == \"PRIMARY\" || $prefix == \"FAILOVER\" ]]; then\n    filename=$(echo $archive | awk -F: '{print $2\":\"$3\":\"$4}')\n  else\n    filename=$archive\n  fi\n  REMOTE=$2\n\n  if [[ \"x$REMOTE\" == \"xremote\" ]]; then\n    log INFO \"${DB_NAME}_restore\" \"Retrieving archive ${prefix}:${filename} from the remote RGW...\"\n    retrieve_remote_archive ${prefix}:${filename}\n    if [[ $? -ne 0 ]]; then\n      clean_and_exit 1 \"ERROR: Could not retrieve remote archive: ${prefix}:${filename}\"\n    fi\n  elif [[ \"x$REMOTE\" == \"x\" ]]; then\n    if [[ -e $ARCHIVE_DIR/$filename ]]; then\n      cp $ARCHIVE_DIR/$filename $TMP_DIR/$filename\n      if [[ $? -ne 0 ]]; then\n        clean_and_exit 1 \"ERROR: Could not copy local archive to restore directory.\"\n      fi\n    else\n      clean_and_exit 1 \"ERROR: Local archive file could not be found.\"\n    fi\n  else\n    usage 1\n  fi\n\n  log INFO \"${DB_NAME}_restore\" \"Decompressing archive $filename...\"\n\n  cd $TMP_DIR\n  tar zxvf - < $TMP_DIR/$filename 1>/dev/null\n  if [[ $? -ne 0 ]]; then\n    clean_and_exit 1 \"ERROR: Archive decompression failed.\"\n  fi\n}\n\n# Display all databases from an archive\nlist_databases() {\n  ARCHIVE_FILE=$1\n  REMOTE=$2\n  WHERE=\"local\"\n\n  if [[ -n ${REMOTE} ]]; then\n    WHERE=\"remote\"\n  fi\n\n  # Get the archive from the source location (local/remote)\n  get_archive $ARCHIVE_FILE $REMOTE\n\n  # Expectation is that the database listing will be put into\n  # the given file one database per line\n  get_databases $TMP_DIR $RESULT_FILE\n  if [[ \"$?\" -ne 0 ]]; then\n    clean_and_exit 1 \"ERROR: Could not retrieve databases from $WHERE archive $ARCHIVE_FILE.\"\n  fi\n\n  if [[ -f \"$RESULT_FILE\" ]]; then\n    echo \" \"\n    echo \"Databases in the $WHERE archive $ARCHIVE_FILE\"\n    echo \"================================================================================\"\n    cat $RESULT_FILE\n  else\n    clean_and_exit 1 \"ERROR: Databases file missing. Could not list databases from $WHERE archive $ARCHIVE_FILE.\"\n  fi\n}\n\n# Display all tables of a database from an archive\nlist_tables() {\n  ARCHIVE_FILE=$1\n  DATABASE=$2\n  REMOTE=$3\n  WHERE=\"local\"\n\n  if [[ -n ${REMOTE} ]]; then\n    WHERE=\"remote\"\n  fi\n\n  # Get the archive from the source location (local/remote)\n  get_archive $ARCHIVE_FILE $REMOTE\n\n  # Expectation is that the database listing will be put into\n  # the given file one table per line\n  get_tables $DATABASE $TMP_DIR $RESULT_FILE\n  if [[ \"$?\" -ne 0 ]]; then\n    clean_and_exit 1 \"ERROR: Could not retrieve tables for database ${DATABASE} from $WHERE archive $ARCHIVE_FILE.\"\n  fi\n\n  if [[ -f \"$RESULT_FILE\" ]]; then\n    echo \" \"\n    echo \"Tables in database $DATABASE from $WHERE archive $ARCHIVE_FILE\"\n    echo \"================================================================================\"\n    cat $RESULT_FILE\n  else\n    clean_and_exit 1 \"ERROR: Tables file missing. Could not list tables of database ${DATABASE} from $WHERE archive $ARCHIVE_FILE.\"\n  fi\n}\n\n# Display all rows of the given database table from an archive\nlist_rows() {\n  ARCHIVE_FILE=$1\n  DATABASE=$2\n  TABLE=$3\n  REMOTE=$4\n  WHERE=\"local\"\n\n  if [[ -n ${REMOTE} ]]; then\n    WHERE=\"remote\"\n  fi\n\n  # Get the archive from the source location (local/remote)\n  get_archive $ARCHIVE_FILE $REMOTE\n\n  # Expectation is that the database listing will be put into\n  # the given file one table per line\n  get_rows $DATABASE $TABLE $TMP_DIR $RESULT_FILE\n  if [[ \"$?\" -ne 0 ]]; then\n    clean_and_exit 1 \"ERROR: Could not retrieve rows in table ${TABLE} of database ${DATABASE} from $WHERE archive $ARCHIVE_FILE.\"\n  fi\n\n  if [[ -f \"$RESULT_FILE\" ]]; then\n    echo \" \"\n    echo \"Rows in table $TABLE of database $DATABASE from $WHERE archive $ARCHIVE_FILE\"\n    echo \"================================================================================\"\n    cat $RESULT_FILE\n  else\n    clean_and_exit 1 \"ERROR: Rows file missing. Could not list rows in table ${TABLE} of database ${DATABASE} from $WHERE archive $ARCHIVE_FILE.\"\n  fi\n}\n\n# Display the schema information of the given database table from an archive\nlist_schema() {\n  ARCHIVE_FILE=$1\n  DATABASE=$2\n  TABLE=$3\n  REMOTE=$4\n  WHERE=\"local\"\n\n  if [[ -n ${REMOTE} ]]; then\n    WHERE=\"remote\"\n  fi\n\n  # Get the archive from the source location (local/remote)\n  get_archive $ARCHIVE_FILE $REMOTE\n\n  # Expectation is that the schema information will be placed into\n  # the given schema file.\n  get_schema $DATABASE $TABLE $TMP_DIR $RESULT_FILE\n  if [[ \"$?\" -ne 0 ]]; then\n    clean_and_exit 1 \"ERROR: Could not retrieve schema for table ${TABLE} of database ${DATABASE} from $WHERE archive $ARCHIVE_FILE.\"\n  fi\n\n  if [[ -f \"$RESULT_FILE\" ]]; then\n    echo \" \"\n    echo \"Schema for table $TABLE of database $DATABASE from $WHERE archive $ARCHIVE_FILE\"\n    echo \"================================================================================\"\n    cat $RESULT_FILE\n  else\n    clean_and_exit 1 \"ERROR: Schema file missing. Could not list schema for table ${TABLE} of database ${DATABASE} from $WHERE archive $ARCHIVE_FILE.\"\n  fi\n}\n\n# Delete an archive\ndelete_archive() {\n  ARCHIVE_FILE=$1\n  REMOTE=$2\n  WHERE=\"local\"\n\n  if [[ -n ${REMOTE} ]]; then\n    WHERE=\"remote\"\n  fi\n\n  if [[ \"${WHERE}\" == \"remote\" ]]; then\n    delete_remote_archive ${ARCHIVE_FILE}\n    if [[ $? -ne 0 ]]; then\n      clean_and_exit 1 \"ERROR: Could not delete remote archive: ${ARCHIVE_FILE}\"\n    fi\n  else # Local\n    if [[ -e ${ARCHIVE_DIR}/${ARCHIVE_FILE} ]]; then\n      rm -f ${ARCHIVE_DIR}/${ARCHIVE_FILE}\n      if [[ $? -ne 0 ]]; then\n        clean_and_exit 1 \"ERROR: Could not delete local archive.\"\n      fi\n    else\n      clean_and_exit 1 \"ERROR: Local archive file could not be found.\"\n    fi\n  fi\n\n  log INFO \"${DB_NAME}_restore\" \"Successfully deleted archive ${ARCHIVE_FILE} from ${WHERE} storage.\"\n}\n\n\n# Return 1 if the given database exists in the database file. 0 otherwise.\ndatabase_exists() {\n  DB=$1\n\n  grep \"${DB}\" ${RESULT_FILE}\n  if [[ $? -eq 0 ]]; then\n    return 1\n  fi\n  return 0\n}\n\n# This is the main CLI interpreter function\ncli_main() {\n  ARGS=(\"$@\")\n\n  # Create the ARCHIVE DIR if it's not already there.\n  mkdir -p $ARCHIVE_DIR\n\n  # Create temp directory for a staging area to decompress files into\n  export TMP_DIR=$(mktemp -d)\n\n  # Create a temp file for storing list of databases (if needed)\n  export RESULT_FILE=$(mktemp -p /tmp)\n\n  case \"${ARGS[0]}\" in\n    \"help\")\n      usage 0\n      ;;\n\n    \"list_archives\")\n      if [[ ${#ARGS[@]} -gt 2 ]]; then\n        usage 1\n      elif [[ ${#ARGS[@]} -eq 1 ]]; then\n        list_archives\n      else\n        list_archives ${ARGS[1]}\n      fi\n      clean_and_exit 0\n      ;;\n\n    \"list_databases\")\n      if [[ ${#ARGS[@]} -lt 2 || ${#ARGS[@]} -gt 3 ]]; then\n        usage 1\n      elif [[ ${#ARGS[@]} -eq 2 ]]; then\n        list_databases ${ARGS[1]}\n      else\n        list_databases ${ARGS[1]} ${ARGS[2]}\n      fi\n      ;;\n\n    \"list_tables\")\n      if [[ ${#ARGS[@]} -lt 3 || ${#ARGS[@]} -gt 4 ]]; then\n        usage 1\n      elif [[ ${#ARGS[@]} -eq 3 ]]; then\n        list_tables ${ARGS[1]} ${ARGS[2]}\n      else\n        list_tables ${ARGS[1]} ${ARGS[2]} ${ARGS[3]}\n      fi\n      ;;\n\n    \"list_rows\")\n      if [[ ${#ARGS[@]} -lt 4 || ${#ARGS[@]} -gt 5 ]]; then\n        usage 1\n      elif [[ ${#ARGS[@]} -eq 4 ]]; then\n        list_rows ${ARGS[1]} ${ARGS[2]} ${ARGS[3]}\n      else\n        list_rows ${ARGS[1]} ${ARGS[2]} ${ARGS[3]} ${ARGS[4]}\n      fi\n      ;;\n\n    \"list_schema\")\n      if [[ ${#ARGS[@]} -lt 4 || ${#ARGS[@]} -gt 5 ]]; then\n        usage 1\n      elif [[ ${#ARGS[@]} -eq 4 ]]; then\n        list_schema ${ARGS[1]} ${ARGS[2]} ${ARGS[3]}\n      else\n        list_schema ${ARGS[1]} ${ARGS[2]} ${ARGS[3]} ${ARGS[4]}\n      fi\n      ;;\n\n    \"restore\")\n      REMOTE=\"\"\n      if [[ ${#ARGS[@]} -lt 3 || ${#ARGS[@]} -gt 4 ]]; then\n        usage 1\n      elif [[ ${#ARGS[@]} -eq 4 ]]; then\n        REMOTE=${ARGS[3]}\n      fi\n\n      ARCHIVE=${ARGS[1]}\n      DB_SPEC=${ARGS[2]}\n\n      #Get all the databases in that archive\n      get_archive $ARCHIVE $REMOTE\n\n      if [[ \"$( echo $DB_SPEC | tr '[a-z]' '[A-Z]')\" != \"ALL\" ]]; then\n        # Expectation is that the database listing will be put into\n        # the given file one database per line\n        get_databases $TMP_DIR $RESULT_FILE\n        if [[ \"$?\" -ne 0 ]]; then\n          clean_and_exit 1 \"ERROR: Could not get the list of databases to restore.\"\n        fi\n\n        if [[ ! $DB_NAMESPACE == \"kube-system\" ]]; then\n          #check if the requested database is available in the archive\n          database_exists $DB_SPEC\n          if [[ $? -ne 1 ]]; then\n            clean_and_exit 1 \"ERROR: Database ${DB_SPEC} does not exist.\"\n          fi\n        fi\n\n        log INFO \"${DB_NAME}_restore\" \"Restoring Database $DB_SPEC And Grants\"\n        restore_single_db $DB_SPEC $TMP_DIR\n        if [[ \"$?\" -eq 0 ]]; then\n          log INFO \"${DB_NAME}_restore\" \"Single database restored successfully.\"\n        else\n          clean_and_exit 1 \"ERROR: Single database restore failed.\"\n        fi\n        clean_and_exit 0 \"\"\n      else\n        log INFO \"${DB_NAME}_restore\" \"Restoring All The Databases. This could take a few minutes...\"\n        restore_all_dbs $TMP_DIR\n        if [[ \"$?\" -eq 0 ]]; then\n          log INFO \"${DB_NAME}_restore\" \"All databases restored successfully.\"\n        else\n          clean_and_exit 1 \"ERROR: Database restore failed.\"\n        fi\n        clean_and_exit 0 \"\"\n      fi\n      ;;\n    \"delete_archive\")\n      if [[ ${#ARGS[@]} -lt 2 || ${#ARGS[@]} -gt 3 ]]; then\n        usage 1\n      elif [[ ${#ARGS[@]} -eq 2 ]]; then\n        delete_archive ${ARGS[1]}\n      else\n        delete_archive ${ARGS[1]} ${ARGS[2]}\n      fi\n      ;;\n    *)\n      usage 1\n      ;;\n  esac\n\n  clean_and_exit 0 \"\"\n}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_custom_job_annotations.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Adds custom annotations to the job spec of a component.\nexamples:\n  - values: |\n      annotations:\n        job:\n          default:\n            custom.tld/key: \"value\"\n            custom.tld/key2: \"value2\"\n          keystone_domain_manage:\n            another.tld/foo: \"bar\"\n    usage: |\n      {{ tuple \"keystone_domain_manage\" . | include \"helm-toolkit.snippets.custom_job_annotations\" }}\n    return: |\n      another.tld/foo: bar\n  - values: |\n      annotations:\n        job:\n          default:\n            custom.tld/key: \"value\"\n            custom.tld/key2: \"value2\"\n          keystone_domain_manage:\n            another.tld/foo: \"bar\"\n    usage: |\n      {{ tuple \"keystone_bootstrap\" . | include \"helm-toolkit.snippets.custom_job_annotations\" }}\n    return: |\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n  - values: |\n      annotations:\n        job:\n          default:\n            custom.tld/key: \"value\"\n            custom.tld/key2: \"value2\"\n          keystone_domain_manage:\n            another.tld/foo: \"bar\"\n          keystone_bootstrap:\n    usage: |\n      {{ tuple \"keystone_bootstrap\" . | include \"helm-toolkit.snippets.custom_job_annotations\" }}\n    return: |\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n*/}}\n\n{{- define \"helm-toolkit.snippets.custom_job_annotations\" -}}\n{{- $envAll := index . 1 -}}\n{{- $component := index . 0 | replace \"-\" \"_\" -}}\n{{- if (hasKey $envAll.Values \"annotations\") -}}\n{{- if (hasKey $envAll.Values.annotations \"job\") -}}\n{{- $annotationsMap := $envAll.Values.annotations.job -}}\n{{- $defaultAnnotations := dict -}}\n{{- if (hasKey $annotationsMap \"default\" ) -}}\n{{- $defaultAnnotations = $annotationsMap.default -}}\n{{- end -}}\n{{- $annotations := index $annotationsMap $component | default $defaultAnnotations -}}\n{{- if (not (empty $annotations)) -}}\n{{- toYaml $annotations -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_custom_pod_annotations.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Adds custom annotations to the pod spec of a component.\nexamples:\n  - values: |\n      annotations:\n        pod:\n          default:\n            custom.tld/key: \"value\"\n            custom.tld/key2: \"value2\"\n          nova_compute:\n            another.tld/foo: \"bar\"\n    usage: |\n      {{ tuple \"nova_compute\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" }}\n    return: |\n      another.tld/foo: bar\n  - values: |\n      annotations:\n        pod:\n          default:\n            custom.tld/key: \"value\"\n            custom.tld/key2: \"value2\"\n          nova_compute:\n            another.tld/foo: \"bar\"\n    usage: |\n      {{ tuple \"nova_api\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" }}\n    return: |\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n  - values: |\n      annotations:\n        pod:\n          default:\n            custom.tld/key: \"value\"\n            custom.tld/key2: \"value2\"\n          nova_compute:\n            another.tld/foo: \"bar\"\n          nova_api:\n    usage: |\n      {{ tuple \"nova_api\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" }}\n    return: |\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n*/}}\n\n{{- define \"helm-toolkit.snippets.custom_pod_annotations\" -}}\n{{- $component := index . 0 -}}\n{{- $envAll := index . 1 -}}\n{{- if (hasKey $envAll.Values \"annotations\") -}}\n{{- if (hasKey $envAll.Values.annotations \"pod\") -}}\n{{- $annotationsMap := $envAll.Values.annotations.pod -}}\n{{- $defaultAnnotations := dict -}}\n{{- if (hasKey $annotationsMap \"default\" ) -}}\n{{- $defaultAnnotations = $annotationsMap.default -}}\n{{- end -}}\n{{- $annotations := index $annotationsMap $component | default $defaultAnnotations -}}\n{{- if (not (empty $annotations)) -}}\n{{- toYaml $annotations -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_custom_secret_annotations.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Adds custom annotations to the secret spec of a component.\nexamples:\n  - values: |\n      annotations:\n        secret:\n          default:\n            custom.tld/key: \"value\"\n            custom.tld/key2: \"value2\"\n          identity:\n            admin:\n              another.tld/foo: \"bar\"\n    usage: |\n      {{ tuple \"identity\" \"admin\" . | include \"helm-toolkit.snippets.custom_secret_annotations\" }}\n    return: |\n      another.tld/foo: bar\n  - values: |\n      annotations:\n        secret:\n          default:\n            custom.tld/key: \"value\"\n            custom.tld/key2: \"value2\"\n          identity:\n            admin:\n              another.tld/foo: \"bar\"\n    usage: |\n      {{ tuple \"oslo_db\" \"admin\" . | include \"helm-toolkit.snippets.custom_secret_annotations\" }}\n    return: |\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n  - values: |\n      annotations:\n        secret:\n          default:\n            custom.tld/key: \"value\"\n            custom.tld/key2: \"value2\"\n          identity:\n            admin:\n              another.tld/foo: \"bar\"\n          oslo_db:\n            admin:\n    usage: |\n      {{ tuple \"oslo_db\" \"admin\" . | include \"helm-toolkit.snippets.custom_secret_annotations\" }}\n    return: |\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n*/}}\n\n{{- define \"helm-toolkit.snippets.custom_secret_annotations\" -}}\n{{- $secretType := index . 0 -}}\n{{- $userClass := index . 1 | replace \"-\" \"_\" -}}\n{{- $envAll := index . 2 -}}\n{{- if (hasKey $envAll.Values \"annotations\") -}}\n{{- if (hasKey $envAll.Values.annotations \"secret\") -}}\n{{- $annotationsMap := index $envAll.Values.annotations.secret $secretType | default dict -}}\n{{- $defaultAnnotations := dict -}}\n{{- if (hasKey $envAll.Values.annotations.secret \"default\" ) -}}\n{{- $defaultAnnotations = $envAll.Values.annotations.secret.default -}}\n{{- end -}}\n{{- $annotations := index $annotationsMap $userClass | default $defaultAnnotations -}}\n{{- if (not (empty $annotations)) -}}\n{{- toYaml $annotations -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_image.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Resolves an image reference to a string, and its pull policy\nvalues: |\n  images:\n    tags:\n      test_image: docker.io/port/test:version-foo\n      image_foo: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal\n    pull_policy: IfNotPresent\n    local_registry:\n      active: true\n      exclude:\n        - image_foo\n  endpoints:\n    cluster_domain_suffix: cluster.local\n    local_image_registry:\n      name: docker-registry\n      namespace: docker-registry\n      hosts:\n        default: localhost\n        internal: docker-registry\n        node: localhost\n      host_fqdn_override:\n        default: null\n      port:\n        registry:\n          node: 5000\nusage: |\n  {{ tuple . \"test_image\" | include \"helm-toolkit.snippets.image\" }}\nreturn: |\n  image: \"localhost:5000/docker.io/port/test:version-foo\"\n  imagePullPolicy: IfNotPresent\n*/}}\n\n{{- define \"helm-toolkit.snippets.image\" -}}\n{{- $envAll := index . 0 -}}\n{{- $image := index . 1 -}}\n{{- $imageTag := index $envAll.Values.images.tags $image -}}\n{{- if and ($envAll.Values.images.local_registry.active) (not (has $image $envAll.Values.images.local_registry.exclude )) -}}\n{{- $registryPrefix := printf \"%s:%s\" (tuple \"local_image_registry\" \"node\" $envAll | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\") (tuple \"local_image_registry\" \"node\" \"registry\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\") -}}\nimage: {{ printf \"%s/%s\" $registryPrefix $imageTag | quote }}\n{{- else -}}\nimage: {{ $imageTag | quote }}\n{{- end }}\nimagePullPolicy: {{ $envAll.Values.images.pull_policy }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_keystone_openrc_env_vars.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Returns a set of container enviorment variables, equivlant to an openrc for\n  use with keystone based command line clients.\nvalues: |\n  secrets:\n    identity:\n      admin: example-keystone-admin\nusage: |\n  {{ include \"helm-toolkit.snippets.keystone_openrc_env_vars\" ( dict \"ksUserSecret\" .Values.secrets.identity.admin ) }}\nreturn: |\n  - name: OS_IDENTITY_API_VERSION\n    value: \"3\"\n  - name: OS_AUTH_URL\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-admin\n        key: OS_AUTH_URL\n  - name: OS_REGION_NAME\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-admin\n        key: OS_REGION_NAME\n  - name: OS_INTERFACE\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-admin\n        key: OS_INTERFACE\n  - name: OS_ENDPOINT_TYPE\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-admin\n        key: OS_INTERFACE\n  - name: OS_PROJECT_DOMAIN_NAME\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-admin\n        key: OS_PROJECT_DOMAIN_NAME\n  - name: OS_PROJECT_NAME\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-admin\n        key: OS_PROJECT_NAME\n  - name: OS_USER_DOMAIN_NAME\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-admin\n        key: OS_USER_DOMAIN_NAME\n  - name: OS_USERNAME\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-admin\n        key: OS_USERNAME\n  - name: OS_PASSWORD\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-admin\n        key: OS_PASSWORD\n  - name: OS_CACERT\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-admin\n        key: OS_CACERT\n*/}}\n\n{{- define \"helm-toolkit.snippets.keystone_openrc_env_vars\" }}\n{{- $useCA := .useCA -}}\n{{- $ksUserSecret := .ksUserSecret }}\n- name: OS_IDENTITY_API_VERSION\n  value: \"3\"\n- name: OS_AUTH_URL\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_AUTH_URL\n- name: OS_REGION_NAME\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_REGION_NAME\n- name: OS_INTERFACE\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_INTERFACE\n- name: OS_ENDPOINT_TYPE\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_INTERFACE\n- name: OS_PROJECT_DOMAIN_NAME\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_PROJECT_DOMAIN_NAME\n- name: OS_PROJECT_NAME\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_PROJECT_NAME\n- name: OS_USER_DOMAIN_NAME\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_USER_DOMAIN_NAME\n- name: OS_USERNAME\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_USERNAME\n- name: OS_PASSWORD\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_PASSWORD\n- name: OS_DEFAULT_DOMAIN\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_DEFAULT_DOMAIN\n{{- if $useCA }}\n- name: OS_CACERT\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_CACERT\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_keystone_openrc_failover_env_vars.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Returns a set of container failover environment variables, equivlant to an openrc for\n  use with keystone based command line clients.\nvalues: |\n  secrets:\n    identity:\n      admin: example-keystone-admin\nusage: |\n  {{ include \"helm-toolkit.snippets.keystone_openrc_failover_env_vars\" ( dict \"ksUserSecret\" .Values.secrets.identity.admin ) }}\nreturn: |\n  - name: OS_AUTH_URL_FAILOVER\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-admin\n        key: OS_AUTH_URL_FAILOVER\n  - name: OS_REGION_NAME_FAILOVER\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-admin\n        key: OS_REGION_NAME_FAILOVER\n  - name: OS_INTERFACE_FAILOVER\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-admin\n        key: OS_INTERFACE_FAILOVER\n  - name: OS_PROJECT_DOMAIN_NAME_FAILOVER\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-admin\n        key: OS_PROJECT_DOMAIN_NAME_FAILOVER\n  - name: OS_PROJECT_NAME_FAILOVER\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-admin\n        key: OS_PROJECT_NAME_FAILOVER\n  - name: OS_USER_DOMAIN_NAME_FAILOVER\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-admin\n        key: OS_USER_DOMAIN_NAME_FAILOVER\n  - name: OS_USERNAME_FAILOVER\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-admin\n        key: OS_USERNAME_FAILOVER\n  - name: OS_PASSWORD_FAILOVER\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-admin\n        key: OS_PASSWORD_FAILOVER\n*/}}\n\n{{- define \"helm-toolkit.snippets.keystone_openrc_failover_env_vars\" }}\n{{- $useCA := .useCA -}}\n{{- $ksUserSecret := .ksUserSecret }}\n- name: OS_AUTH_URL_FAILOVER\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_AUTH_URL_FAILOVER\n- name: OS_REGION_NAME_FAILOVER\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_REGION_NAME_FAILOVER\n- name: OS_INTERFACE_FAILOVER\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_INTERFACE_FAILOVER\n- name: OS_PROJECT_DOMAIN_NAME_FAILOVER\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_PROJECT_DOMAIN_NAME_FAILOVER\n- name: OS_PROJECT_NAME_FAILOVER\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_PROJECT_NAME_FAILOVER\n- name: OS_USER_DOMAIN_NAME_FAILOVER\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_USER_DOMAIN_NAME_FAILOVER\n- name: OS_USERNAME_FAILOVER\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_USERNAME_FAILOVER\n- name: OS_PASSWORD_FAILOVER\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_PASSWORD_FAILOVER\n- name: OS_DEFAULT_DOMAIN_FAILOVER\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_DEFAULT_DOMAIN_FAILOVER\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_keystone_secret_openrc.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.snippets.keystone_secret_openrc\" }}\n{{- $userClass := index . 0 -}}\n{{- $identityEndpoint := index . 1 -}}\n{{- $context := index . 2 -}}\n{{- $userContext := index $context.Values.endpoints.identity.auth $userClass }}\nOS_AUTH_URL: {{ tuple \"identity\" $identityEndpoint \"api\" $context | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | b64enc }}\nOS_REGION_NAME: {{ $userContext.region_name | b64enc }}\nOS_INTERFACE: {{ $userContext.interface | default \"internal\" | b64enc }}\nOS_PROJECT_DOMAIN_NAME: {{ $userContext.project_domain_name | b64enc }}\nOS_PROJECT_NAME: {{ $userContext.project_name | b64enc }}\nOS_USER_DOMAIN_NAME: {{ $userContext.user_domain_name | b64enc }}\nOS_USERNAME: {{ $userContext.username | b64enc }}\nOS_PASSWORD: {{ $userContext.password | b64enc }}\nOS_DEFAULT_DOMAIN: {{ $userContext.default_domain_id | default \"default\" | b64enc }}\n{{- if $userContext.cacert }}\nOS_CACERT: {{ $userContext.cacert | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_keystone_user_create_env_vars.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Returns a set of container enviorment variables, for use with the keystone\n  user management jobs.\nvalues: |\n  secrets:\n    identity:\n      service_user: example-keystone-user\nusage: |\n  {{ include \"helm-toolkit.snippets.keystone_user_create_env_vars\" ( dict \"ksUserSecret\" .Values.secrets.identity.service_user \"useCA\" true ) }}\nreturn: |\n  - name: SERVICE_OS_REGION_NAME\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-user\n        key: OS_REGION_NAME\n  - name: SERVICE_OS_PROJECT_DOMAIN_NAME\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-user\n        key: OS_PROJECT_DOMAIN_NAME\n  - name: SERVICE_OS_PROJECT_NAME\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-user\n        key: OS_PROJECT_NAME\n  - name: SERVICE_OS_USER_DOMAIN_NAME\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-user\n        key: OS_USER_DOMAIN_NAME\n  - name: SERVICE_OS_USERNAME\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-user\n        key: OS_USERNAME\n  - name: SERVICE_OS_PASSWORD\n    valueFrom:\n      secretKeyRef:\n        name: example-keystone-user\n        key: OS_PASSWORD\n*/}}\n\n{{- define \"helm-toolkit.snippets.keystone_user_create_env_vars\" }}\n{{- $ksUserSecret := .ksUserSecret }}\n- name: SERVICE_OS_REGION_NAME\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_REGION_NAME\n- name: SERVICE_OS_PROJECT_DOMAIN_NAME\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_PROJECT_DOMAIN_NAME\n- name: SERVICE_OS_PROJECT_NAME\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_PROJECT_NAME\n- name: SERVICE_OS_USER_DOMAIN_NAME\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_USER_DOMAIN_NAME\n- name: SERVICE_OS_USERNAME\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_USERNAME\n- name: SERVICE_OS_PASSWORD\n  valueFrom:\n    secretKeyRef:\n      name: {{ $ksUserSecret }}\n      key: OS_PASSWORD\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_apparmor_configmap.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Renders a configmap used for loading custom AppArmor profiles.\nvalues: |\n  pod:\n    mandatory_access_control:\n      type: apparmor\n      configmap_apparmor: true\n      apparmor_profiles: |-\n        my_apparmor-v1.profile: |-\n          #include <tunables/global>\n          profile my-apparmor-v1 flags=(attach_disconnected,mediate_deleted) {\n            <profile_data>\n          }\nusage: |\n  {{ dict \"envAll\" . \"component\" \"myComponent\" | include \"helm-toolkit.snippets.kubernetes_apparmor_configmap\" }}\nreturn: |\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: releaseName-myComponent-apparmor\n  namespace: myNamespace\ndata:\n  my_apparmor-v1.profile: |-\n    #include <tunables/global>\n    profile my-apparmor-v1 flags=(attach_disconnected,mediate_deleted) {\n      <profile_data>\n    }\n*/}}\n{{- define \"helm-toolkit.snippets.kubernetes_apparmor_configmap\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $component := index . \"component\" -}}\n{{- if hasKey $envAll.Values.pod \"mandatory_access_control\" -}}\n{{- if hasKey $envAll.Values.pod.mandatory_access_control \"type\" -}}\n{{- if eq $envAll.Values.pod.mandatory_access_control.type \"apparmor\" -}}\n{{- if hasKey $envAll.Values.pod.mandatory_access_control \"configmap_apparmor\" -}}\n{{- if $envAll.Values.pod.mandatory_access_control.configmap_apparmor }}\n{{- $mapName := printf \"%s-%s-%s\" $envAll.Release.Name $component \"apparmor\" -}}\n{{- if $envAll.Values.conf.apparmor_profiles }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ $mapName }}\n  namespace: {{ $envAll.Release.Namespace }}\ndata:\n{{ $envAll.Values.conf.apparmor_profiles | toYaml | indent 2 }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_apparmor_loader_init_container.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Renders the init container used for apparmor loading.\nvalues: |\n  images:\n    tags:\n      apparmor_loader: my-repo.io/apparmor-loader:1.0.0\n  pod:\n    mandatory_access_control:\n      type: apparmor\n      configmap_apparmor: true\n      apparmor-loader: unconfined\nusage: |\n  {{ dict \"envAll\" . | include \"helm-toolkit.snippets.kubernetes_apparmor_loader_init_container\" }}\nreturn: |\n  - name: apparmor-loader\n    image: my-repo.io/apparmor-loader:1.0.0\n    args:\n      - /profiles\n    securityContext:\n      privileged: true\n    volumeMounts:\n      - name: sys\n        mountPath: /sys\n        readOnly: true\n      - name: includes\n        mountPath: /etc/apparmor.d\n        readOnly: true\n      - name: profiles\n        mountPath: /profiles\n        readOnly: true\n*/}}\n{{- define \"helm-toolkit.snippets.kubernetes_apparmor_loader_init_container\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- if hasKey $envAll.Values.pod \"mandatory_access_control\" -}}\n{{- if hasKey $envAll.Values.pod.mandatory_access_control \"type\" -}}\n{{- if hasKey $envAll.Values.pod.mandatory_access_control \"configmap_apparmor\" -}}\n{{- if eq $envAll.Values.pod.mandatory_access_control.type \"apparmor\" -}}\n{{- if $envAll.Values.pod.mandatory_access_control.configmap_apparmor }}\n- name: apparmor-loader\n  image: {{ $envAll.Values.images.tags.apparmor_loader }}\n  args:\n    - /profiles\n  securityContext:\n    privileged: true\n  volumeMounts:\n    - name: sys\n      mountPath: /sys\n      readOnly: true\n    - name: includes\n      mountPath: /etc/apparmor.d\n      readOnly: true\n    - name: profiles\n      mountPath: /profiles\n      readOnly: true\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_apparmor_volumes.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Renders the volumes used by the apparmor loader.\nvalues: |\n  pod:\n    mandatory_access_control:\n      type: apparmor\n      configmap_apparmor: true\ninputs: |\n  envAll: \"Environment or Context.\"\n  component: \"Name of the component used for the name of configMap.\"\n  requireSys: \"Boolean. True if it needs the hostpath /sys in volumes.\"\nusage: |\n  {{ dict \"envAll\" . \"component\" \"keystone\" \"requireSys\" true | include \"helm-toolkit.snippets.kubernetes_apparmor_volumes\" }}\nreturn: |\n- name: sys\n  hostPath:\n    path: /sys\n- name: includes\n  hostPath:\n    path: /etc/apparmor.d\n- name: profiles\n  configMap:\n    name: RELEASENAME-keystone-apparmor\n    defaultMode: 0555\n*/}}\n{{- define \"helm-toolkit.snippets.kubernetes_apparmor_volumes\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $component := index . \"component\" -}}\n{{- $requireSys := index . \"requireSys\" | default false -}}\n{{- $configName := printf \"%s-%s-%s\" $envAll.Release.Name $component \"apparmor\" -}}\n{{- if hasKey $envAll.Values.pod \"mandatory_access_control\" -}}\n{{- if hasKey $envAll.Values.pod.mandatory_access_control \"type\" -}}\n{{- if hasKey $envAll.Values.pod.mandatory_access_control \"configmap_apparmor\" -}}\n{{- if eq $envAll.Values.pod.mandatory_access_control.type \"apparmor\" -}}\n{{- if $envAll.Values.pod.mandatory_access_control.configmap_apparmor }}\n{{- if $requireSys }}\n- name: sys\n  hostPath:\n    path: /sys\n{{- end }}\n- name: includes\n  hostPath:\n    path: /etc/apparmor.d\n- name: profiles\n  configMap:\n    name: {{ $configName | quote }}\n    defaultMode: 0555\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_container_security_context.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Renders securityContext for a Kubernetes container.\n  For container level, see here: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#securitycontext-v1-core\nexamples:\n  - values: |\n      pod:\n        security_context:\n          myApp:\n            container:\n              foo:\n                runAsUser: 34356\n                readOnlyRootFilesystem: true\n    usage: |\n      {{ dict \"envAll\" . \"application\" \"myApp\" \"container\" \"foo\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" }}\n    return: |\n      securityContext:\n        readOnlyRootFilesystem: true\n        runAsUser: 34356\n*/}}\n\n{{- define \"helm-toolkit.snippets.kubernetes_container_security_context\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $application := index . \"application\" -}}\n{{- $container := index . \"container\" -}}\n{{- if hasKey $envAll.Values.pod \"security_context\" }}\n{{- if hasKey ( index $envAll.Values.pod.security_context ) $application }}\n{{- if hasKey ( index $envAll.Values.pod.security_context $application \"container\" ) $container }}\nsecurityContext:\n{{ toYaml ( index $envAll.Values.pod.security_context $application \"container\" $container ) | indent 2 }}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_entrypoint_init_container.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Returns a container definition for use with the kubernetes-entrypoint image.\nvalues: |\n  images:\n    tags:\n      dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal\n    pull_policy: IfNotPresent\n    local_registry:\n      active: true\n      exclude:\n        - dep_check\n  dependencies:\n    dynamic:\n      common:\n        local_image_registry:\n          jobs:\n            - calico-image-repo-sync\n          services:\n            - endpoint: node\n              service: local_image_registry\n    static:\n      calico_node:\n        services:\n          - endpoint: internal\n            service: etcd\n        custom_resources:\n          - apiVersion: argoproj.io/v1alpha1\n            kind: Workflow\n            name: wf-example\n            fields:\n              - key: \"status.phase\"\n                value: \"Succeeded\"\n  endpoints:\n    local_image_registry:\n      namespace: docker-registry\n      hosts:\n        default: localhost\n        node: localhost\n    etcd:\n      hosts:\n        default: etcd\n  # NOTE (portdirect): if the stanza, or a portion of it, under `pod` is not\n  # specififed then the following will be used as defaults:\n  #  pod:\n  #    security_context:\n  #      kubernetes_entrypoint:\n  #        container:\n  #          kubernetes_entrypoint:\n  #            runAsUser: 65534\n  #            readOnlyRootFilesystem: true\n  #            allowPrivilegeEscalation: false\n  pod:\n    security_context:\n      kubernetes_entrypoint:\n        container:\n          kubernetes_entrypoint:\n            runAsUser: 0\n            readOnlyRootFilesystem: false\nusage: |\n  {{ tuple . \"calico_node\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" }}\nreturn: |\n  - name: init\n    image: \"quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal\"\n    imagePullPolicy: IfNotPresent\n    securityContext:\n      allowPrivilegeEscalation: false\n      readOnlyRootFilesystem: false\n      runAsUser: 0\n\n    env:\n      - name: POD_NAME\n        valueFrom:\n          fieldRef:\n            apiVersion: v1\n            fieldPath: metadata.name\n      - name: NAMESPACE\n        valueFrom:\n          fieldRef:\n            apiVersion: v1\n            fieldPath: metadata.namespace\n      - name: INTERFACE_NAME\n        value: eth0\n      - name: PATH\n        value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/\n      - name: DEPENDENCY_SERVICE\n        value: \"default:etcd,docker-registry:localhost\"\n      - name: DEPENDENCY_JOBS\n        value: \"calico-image-repo-sync\"\n      - name: DEPENDENCY_DAEMONSET\n        value: \"\"\n      - name: DEPENDENCY_CONTAINER\n        value: \"\"\n      - name: DEPENDENCY_POD_JSON\n        value: \"\"\n      - name: DEPENDENCY_CUSTOM_RESOURCE\n        value: \"[{\\\"apiVersion\\\":\\\"argoproj.io/v1alpha1\\\",\\\"kind\\\":\\\"Workflow\\\",\\\"namespace\\\":\\\"default\\\",\\\"name\\\":\\\"wf-example\\\",\\\"fields\\\":[{\\\"key\\\":\\\"status.phase\\\",\\\"value\\\":\\\"Succeeded\\\"}]}]\"\n    command:\n      - kubernetes-entrypoint\n    volumeMounts:\n      []\n*/}}\n\n{{- define \"helm-toolkit.snippets.kubernetes_entrypoint_init_container._default_security_context\" -}}\nValues:\n  pod:\n    security_context:\n      kubernetes_entrypoint:\n        container:\n          kubernetes_entrypoint:\n            runAsUser: 65534\n            readOnlyRootFilesystem: true\n            allowPrivilegeEscalation: false\n{{- end -}}\n\n{{- define \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" -}}\n{{- $envAll := index . 0 -}}\n{{- $component := index . 1 -}}\n{{- $mounts := index . 2 -}}\n\n{{- $_ := set $envAll.Values \"__kubernetes_entrypoint_init_container\" dict -}}\n{{- $_ := set $envAll.Values.__kubernetes_entrypoint_init_container \"deps\" dict -}}\n{{- if and ($envAll.Values.images.local_registry.active) (ne $component \"image_repo_sync\") -}}\n{{- if eq $component \"pod_dependency\" -}}\n{{- $_ := include \"helm-toolkit.utils.merge\" ( tuple $envAll.Values.__kubernetes_entrypoint_init_container.deps ( index $envAll.Values.pod_dependency ) $envAll.Values.dependencies.dynamic.common.local_image_registry ) -}}\n{{- else -}}\n{{- $_ := include \"helm-toolkit.utils.merge\" ( tuple $envAll.Values.__kubernetes_entrypoint_init_container.deps ( index $envAll.Values.dependencies.static $component ) $envAll.Values.dependencies.dynamic.common.local_image_registry ) -}}\n{{- end -}}\n{{- else -}}\n{{- if eq $component \"pod_dependency\" -}}\n{{- $_ := set $envAll.Values.__kubernetes_entrypoint_init_container \"deps\" ( index $envAll.Values.pod_dependency ) -}}\n{{- else -}}\n{{- $_ := set $envAll.Values.__kubernetes_entrypoint_init_container \"deps\" ( index $envAll.Values.dependencies.static $component ) -}}\n{{- end -}}\n{{- end -}}\n\n{{- if and ($envAll.Values.manifests.job_rabbit_init) (hasKey $envAll.Values.dependencies \"dynamic\") -}}\n{{- if $envAll.Values.dependencies.dynamic.job_rabbit_init -}}\n{{- if eq $component \"pod_dependency\" -}}\n{{- $_ := include \"helm-toolkit.utils.merge\" ( tuple $envAll.Values.__kubernetes_entrypoint_init_container.deps ( index $envAll.Values.pod_dependency ) (index $envAll.Values.dependencies.dynamic.job_rabbit_init $component) ) -}}\n{{- else -}}\n{{- $_ := include \"helm-toolkit.utils.merge\" ( tuple $envAll.Values.__kubernetes_entrypoint_init_container.deps ( index $envAll.Values.dependencies.static $component ) (index $envAll.Values.dependencies.dynamic.job_rabbit_init $component)) -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_jobs_filter\" (dict \"envAll\" $envAll \"deps\" $envAll.Values.__kubernetes_entrypoint_init_container.deps) | toString | fromYaml -}}\n\n{{- $deps := $envAll.Values.__kubernetes_entrypoint_init_container.deps }}\n{{- range $deps.custom_resources }}\n{{- $_ := set . \"namespace\" $envAll.Release.Namespace -}}\n{{- end -}}\n{{- $default_security_context := include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container._default_security_context\" . | fromYaml }}\n{{- $patchedEnvAll := mergeOverwrite $default_security_context $envAll }}\n- name: init\n{{ tuple $envAll \"dep_check\" | include \"helm-toolkit.snippets.image\" | indent 2 }}\n{{- dict \"envAll\" $patchedEnvAll \"application\" \"kubernetes_entrypoint\" \"container\" \"kubernetes_entrypoint\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 2 }}\n  env:\n    - name: POD_NAME\n      valueFrom:\n        fieldRef:\n          apiVersion: v1\n          fieldPath: metadata.name\n    - name: NAMESPACE\n      valueFrom:\n        fieldRef:\n          apiVersion: v1\n          fieldPath: metadata.namespace\n    - name: INTERFACE_NAME\n      value: eth0\n    - name: PATH\n      value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/\n    - name: DEPENDENCY_SERVICE\n      value: \"{{ tuple $deps.services $envAll | include \"helm-toolkit.utils.comma_joined_service_list\" }}\"\n{{- if $deps.jobs -}}\n  {{- if kindIs \"string\" (index $deps.jobs 0) }}\n    - name: DEPENDENCY_JOBS\n      value: \"{{ include \"helm-toolkit.utils.joinListWithComma\" $deps.jobs }}\"\n  {{- else }}\n    - name: DEPENDENCY_JOBS_JSON\n      value: {{- toJson $deps.jobs | quote -}}\n  {{- end -}}\n{{- end }}\n    - name: DEPENDENCY_DAEMONSET\n      value: \"{{ include \"helm-toolkit.utils.joinListWithComma\" $deps.daemonset }}\"\n    - name: DEPENDENCY_CONTAINER\n      value: \"{{ include \"helm-toolkit.utils.joinListWithComma\" $deps.container }}\"\n    - name: DEPENDENCY_POD_JSON\n      value: {{ if $deps.pod }}{{ toJson $deps.pod | quote }}{{ else }}\"\"{{ end }}\n    - name: DEPENDENCY_CUSTOM_RESOURCE\n      value: {{ if $deps.custom_resources }}{{ toJson $deps.custom_resources | quote }}{{ else }}\"\"{{ end }}\n  command:\n    - kubernetes-entrypoint\n  volumeMounts:\n{{ toYaml $mounts | indent 4 }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_kubectl_params.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.snippets.kubernetes_kubectl_params\" -}}\n{{- $envAll := index . 0 -}}\n{{- $application := index . 1 -}}\n{{- $component := index . 2 -}}\n{{ print \"-l application=\" $application \" -l component=\" $component }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_mandatory_access_control_annotation.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Renders mandatory access control annotations for a list of containers\n  driven by values.yaml. As of now, it can only generate an apparmor\n  annotation, but in the future could generate others.\nvalues: |\n  pod:\n    mandatory_access_control:\n      type: apparmor\n      myPodName:\n        myContainerName: localhost/myAppArmor\n        mySecondContainerName: localhost/secondProfile # optional\n        myThirdContainerName: localhost/thirdProfile # optional\nusage: |\n  {{ dict \"envAll\" . \"podName\" \"myPodName\" \"containerNames\" (list \"myContainerName\" \"mySecondContainerName\" \"myThirdContainerName\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" }}\nreturn: |\n  container.apparmor.security.beta.kubernetes.io/myContainerName: localhost/myAppArmor\n  container.apparmor.security.beta.kubernetes.io/mySecondContainerName: localhost/secondProfile\n  container.apparmor.security.beta.kubernetes.io/myThirdContainerName: localhost/thirdProfile\nnote: |\n  The number of container underneath is a variable arguments. It loops through\n  all the container names specified.\n*/}}\n{{- define \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $podName := index . \"podName\" -}}\n{{- $containerNames := index . \"containerNames\" -}}\n{{- if hasKey $envAll.Values.pod \"mandatory_access_control\" -}}\n{{- if hasKey $envAll.Values.pod.mandatory_access_control \"type\" -}}\n{{- $macType := $envAll.Values.pod.mandatory_access_control.type -}}\n{{- if $macType -}}\n{{- if eq $macType \"apparmor\" -}}\n{{- if hasKey $envAll.Values.pod.mandatory_access_control $podName -}}\n{{- range $name := $containerNames -}}\n{{- $apparmorProfile := index $envAll.Values.pod.mandatory_access_control $podName $name -}}\n{{- if $apparmorProfile }}\ncontainer.apparmor.security.beta.kubernetes.io/{{ $name }}: {{ $apparmorProfile }}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_metadata_labels.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Renders a set of standardised labels\nvalues: |\n  release_group: null\n  pod:\n    labels:\n      default:\n        label1.example.com: value\n      bar:\n        label2.example.com: bar\nusage: |\n  {{ tuple . \"foo\" \"bar\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" }}\nreturn: |\n  release_group: RELEASE-NAME\n  application: foo\n  component: bar\n  label1.example.com: value\n  label2.example.com: bar\n*/}}\n\n{{- define \"helm-toolkit.snippets.kubernetes_metadata_labels\" -}}\n{{- $envAll := index . 0 -}}\n{{- $application := index . 1 -}}\n{{- $component := index . 2 -}}\n{{- $podValues := $envAll.Values.pod | default dict -}}\n{{- $labels := $podValues.labels | default dict -}}\nrelease_group: {{ $envAll.Values.release_group | default $envAll.Release.Name }}\napplication: {{ $application }}\ncomponent: {{ $component }}\n{{- if or $labels.include_app_kubernetes_io (not (hasKey $labels \"include_app_kubernetes_io\")) }}\napp.kubernetes.io/name: {{ $application }}\napp.kubernetes.io/component: {{ $component }}\napp.kubernetes.io/instance: {{ $envAll.Values.release_group | default $envAll.Release.Name }}\n{{- end -}}\n{{- if $labels }}\n{{- if hasKey $labels $component }}\n{{ index $podValues \"labels\" $component | toYaml }}\n{{- end -}}\n{{- if hasKey $labels \"default\" }}\n{{ $labels.default | toYaml }}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_pod_anti_affinity.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Renders kubernetes anti affinity rules, this function supports both hard\n  'requiredDuringSchedulingIgnoredDuringExecution' and soft\n  'preferredDuringSchedulingIgnoredDuringExecution' types.\nvalues: |\n  pod:\n    affinity:\n      anti:\n        topologyKey:\n          default: kubernetes.io/hostname\n        type:\n          default: requiredDuringSchedulingIgnoredDuringExecution\n        weight:\n          default: 10\nusage: |\n  {{ tuple . \"appliction_x\" \"component_y\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" }}\nreturn: |\n  podAntiAffinity:\n    requiredDuringSchedulingIgnoredDuringExecution:\n    - labelSelector:\n        matchExpressions:\n          - key: release_group\n            operator: In\n            values:\n            - RELEASE-NAME\n          - key: application\n            operator: In\n            values:\n            - appliction_x\n          - key: component\n            operator: In\n            values:\n            - component_y\n          topologyKey: kubernetes.io/hostname\n*/}}\n\n{{- define \"helm-toolkit.snippets.kubernetes_pod_anti_affinity._match_expressions\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $application := index . \"application\" -}}\n{{- $component := index . \"component\" -}}\n{{- $expressionRelease := dict \"key\" \"release_group\" \"operator\" \"In\"  \"values\" ( list ( $envAll.Values.release_group | default $envAll.Release.Name ) ) -}}\n{{- $expressionApplication := dict \"key\" \"application\" \"operator\" \"In\"  \"values\" ( list $application ) -}}\n{{- $expressionComponent := dict \"key\" \"component\" \"operator\" \"In\"  \"values\" ( list $component ) -}}\n{{- list $expressionRelease $expressionApplication $expressionComponent | toYaml }}\n{{- end -}}\n\n{{- define \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" -}}\n{{- $envAll := index . 0 -}}\n{{- $application := index . 1 -}}\n{{- $component := index . 2 -}}\n{{- $antiAffinityType := index $envAll.Values.pod.affinity.anti.type $component | default $envAll.Values.pod.affinity.anti.type.default }}\n{{- $antiAffinityKey := index $envAll.Values.pod.affinity.anti.topologyKey $component | default $envAll.Values.pod.affinity.anti.topologyKey.default }}\npodAntiAffinity:\n{{- $matchExpressions := include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity._match_expressions\" ( dict \"envAll\" $envAll \"application\" $application \"component\" $component ) -}}\n{{- if eq $antiAffinityType \"preferredDuringSchedulingIgnoredDuringExecution\" }}\n  {{ $antiAffinityType }}:\n  - podAffinityTerm:\n      labelSelector:\n        matchExpressions:\n{{ $matchExpressions | indent 10 }}\n      topologyKey: {{ $antiAffinityKey }}\n{{- if  $envAll.Values.pod.affinity.anti.weight }}\n    weight: {{ index $envAll.Values.pod.affinity.anti.weight $component | default $envAll.Values.pod.affinity.anti.weight.default }}\n{{- else }}\n    weight: 10\n{{- end -}}\n{{- else if eq $antiAffinityType \"requiredDuringSchedulingIgnoredDuringExecution\" }}\n  {{ $antiAffinityType }}:\n  - labelSelector:\n      matchExpressions:\n{{ $matchExpressions | indent 8 }}\n    topologyKey: {{ $antiAffinityKey }}\n{{- else }}\n  {}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_pod_image_pull_secret.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Renders image pull secrets for a pod\nvalues: |\n  pod:\n    image_pull_secrets:\n      default:\n        - name: some-pull-secret\n      bar:\n        - name: another-pull-secret\nusage: |\n  {{ tuple . \"bar\" | include \"helm-toolkit.snippets.kubernetes_image_pull_secrets\" }}\nreturn: |\n  imagePullSecrets:\n    - name: some-pull-secret\n    - name: another-pull-secret\n*/}}\n\n{{- define \"helm-toolkit.snippets.kubernetes_image_pull_secrets\" -}}\n{{- $envAll := index . 0 -}}\n{{- $application := index . 1 -}}\n{{- if ($envAll.Values.pod).image_pull_secrets }}\nimagePullSecrets:\n{{- if hasKey $envAll.Values.pod.image_pull_secrets $application }}\n{{ index $envAll.Values.pod \"image_pull_secrets\" $application | toYaml | indent 2 }}\n{{- end -}}\n{{- if hasKey $envAll.Values.pod.image_pull_secrets \"default\" }}\n{{ $envAll.Values.pod.image_pull_secrets.default | toYaml | indent 2 }}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_pod_priority_class.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Adds custom annotations to the secret spec of a component.\nexamples:\n  - values: |\n      pod:\n        priorityClassName:\n          designate_api: \"high-priority\"\n    usage: |\n      {{ tuple \"designate_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" }}\n    return: |\n      priorityClassName: \"high-priority\"\n  - values: |\n      pod:\n        priorityClassName: {}\n    usage: |\n      {{ tuple \"designate_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" }}\n    return: |\n      \"\"\n*/}}\n\n{{- define \"helm-toolkit.snippets.kubernetes_pod_priority_class\" -}}\n{{- $component := index . 0 | replace \"-\" \"_\" -}}\n{{- $envAll := index . 1 -}}\n{{- $priorityClassName := dig \"priorityClassName\" $component false $envAll.Values.pod -}}\n{{- if $priorityClassName -}}\n{{- toYaml (dict \"priorityClassName\" $priorityClassName) -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_pod_rbac_roles.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.snippets.kubernetes_pod_rbac_roles\" -}}\n{{- $envAll := index . 0 -}}\n{{- $deps := index . 1 -}}\n{{- $namespace := index . 2 -}}\n{{- $saName := index . 3 | replace \"_\" \"-\" }}\n{{- $saNamespace := index . 4 -}}\n{{- $releaseName := $envAll.Release.Name }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $releaseName }}-{{ $saNamespace }}-{{ $saName }}\n  namespace: {{ $namespace }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $releaseName }}-{{ $saNamespace }}-{{ $saName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $saName }}\n    namespace: {{ $saNamespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $releaseName }}-{{ $saNamespace }}-{{ $saName }}\n  namespace: {{ $namespace }}\nrules:\n  - apiGroups:\n      - \"\"\n      - extensions\n      - batch\n      - apps\n    verbs:\n      - get\n      - list\n    resources:\n      {{- range $k, $v := $deps -}}\n      {{ if eq $v \"daemonsets\" }}\n      - daemonsets\n      {{- end -}}\n      {{ if eq $v \"jobs\" }}\n      - jobs\n      {{- end -}}\n      {{ if or (eq $v \"pods\") (eq $v \"daemonsets\") (eq $v \"jobs\") }}\n      - pods\n      {{- end -}}\n      {{ if eq $v \"services\" }}\n      - services\n      - endpoints\n      {{- end -}}\n      {{ if eq $v \"secrets\" }}\n      - secrets\n      {{- end -}}\n      {{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_pod_rbac_serviceaccount.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" -}}\n{{- $envAll := index . 0 -}}\n{{- $component := index . 1 -}}\n{{- $saName := index . 2 -}}\n{{- $saNamespace := $envAll.Release.Namespace }}\n{{- $randomKey := randAlphaNum 32 }}\n{{- $allNamespace := dict $randomKey \"\" }}\n\n{{- $_ := set $envAll.Values \"__kubernetes_entrypoint_init_container\" dict -}}\n{{- $_ := set $envAll.Values.__kubernetes_entrypoint_init_container \"deps\" dict -}}\n{{- if and ($envAll.Values.images.local_registry.active) (ne $component \"image_repo_sync\") -}}\n{{- if eq $component \"pod_dependency\" -}}\n{{- $_ := include \"helm-toolkit.utils.merge\" ( tuple $envAll.Values.__kubernetes_entrypoint_init_container.deps ( index $envAll.Values.pod_dependency ) $envAll.Values.dependencies.dynamic.common.local_image_registry ) -}}\n{{- else -}}\n{{- $_ := include \"helm-toolkit.utils.merge\" ( tuple $envAll.Values.__kubernetes_entrypoint_init_container.deps ( index $envAll.Values.dependencies.static $component ) $envAll.Values.dependencies.dynamic.common.local_image_registry ) -}}\n{{- end -}}\n{{- else -}}\n{{- if eq $component \"pod_dependency\" -}}\n{{- $_ := set $envAll.Values.__kubernetes_entrypoint_init_container \"deps\" ( index $envAll.Values.pod_dependency ) -}}\n{{- else -}}\n{{- $_ := set $envAll.Values.__kubernetes_entrypoint_init_container \"deps\" ( index $envAll.Values.dependencies.static $component ) -}}\n{{- end -}}\n{{- end -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_jobs_filter\" (dict \"envAll\" $envAll \"deps\" $envAll.Values.__kubernetes_entrypoint_init_container.deps) | toString | fromYaml -}}\n{{- $deps := $envAll.Values.__kubernetes_entrypoint_init_container.deps }}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: {{ $saName }}\n  namespace: {{ $saNamespace }}\n{{- if $envAll.Values.manifests.secret_registry }}\n{{- if $envAll.Values.endpoints.oci_image_registry.auth.enabled }}\nimagePullSecrets:\n  - name: {{ index $envAll.Values.secrets.oci_image_registry $envAll.Chart.Name }}\n{{- end -}}\n{{- end -}}\n{{- range $k, $v := $deps -}}\n{{- if eq $k \"services\" }}\n{{- range $serv := $v }}\n{{- $endpointMap := index $envAll.Values.endpoints $serv.service }}\n{{- $endpointNS := $endpointMap.namespace | default $saNamespace }}\n{{- if not (contains \"services\" ((index $allNamespace $endpointNS) | default \"\")) }}\n{{- $_ := set $allNamespace $endpointNS (printf \"%s%s\" \"services,\" ((index $allNamespace $endpointNS) | default \"\")) }}\n{{- end -}}\n{{- end -}}\n{{- else if and (eq $k \"jobs\") $v }}\n{{- $_ := set $allNamespace $saNamespace  (printf \"%s%s\" \"jobs,\" ((index $allNamespace $saNamespace) | default \"\")) }}\n{{- else if and (eq $k \"daemonset\") $v }}\n{{- $_ := set $allNamespace $saNamespace  (printf \"%s%s\" \"daemonsets,\" ((index $allNamespace $saNamespace) | default \"\")) }}\n{{- else if and (eq $k \"pod\") $v }}\n{{- $_ := set $allNamespace $saNamespace  (printf \"%s%s\" \"pods,\" ((index $allNamespace $saNamespace) | default \"\")) }}\n{{- else if and (eq $k \"secret\") $v }}\n{{- $_ := set $allNamespace $saNamespace  (printf \"%s%s\" \"secrets,\" ((index $allNamespace $saNamespace) | default \"\")) }}\n{{- end -}}\n{{- end -}}\n{{- $_ := unset $allNamespace $randomKey }}\n{{- range $ns, $vv := $allNamespace }}\n{{- $resourceList := (splitList \",\" (trimSuffix \",\" $vv)) }}\n{{- tuple $envAll $resourceList $ns $saName $saNamespace | include \"helm-toolkit.snippets.kubernetes_pod_rbac_roles\" }}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_pod_runtime_class.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Adds custom annotations to the secret spec of a component.\nexamples:\n  - values: |\n      pod:\n        runtimeClassName:\n          designate_api: \"runtime-class\"\n    usage: |\n      {{ tuple \"designate_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" }}\n    return: |\n      runtimeClassName: \"runtime-class\"\n  - values: |\n      pod:\n        runtimeClassName: {}\n    usage: |\n      {{ tuple \"designate_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" }}\n    return: |\n      \"\"\n*/}}\n\n{{- define \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" -}}\n{{- $component := index . 0 | replace \"-\" \"_\" -}}\n{{- $envAll := index . 1 -}}\n{{- $runtimeClassName := dig \"runtimeClassName\" $component false $envAll.Values.pod -}}\n{{- if $runtimeClassName -}}\n{{- toYaml (dict \"runtimeClassName\" $runtimeClassName) -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_pod_security_context.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Renders securityContext for a Kubernetes pod.\n  For pod level, seurity context see here: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#podsecuritycontext-v1-core\nexamples:\n  - values: |\n      pod:\n        # NOTE: The 'user' key is deprecated, and will be removed shortly.\n        user:\n          myApp:\n            uid: 34356\n        security_context:\n          myApp:\n            pod:\n              runAsNonRoot: true\n    usage: |\n      {{ dict \"envAll\" . \"application\" \"myApp\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" }}\n    return: |\n      securityContext:\n        runAsUser: 34356\n        runAsNonRoot: true\n  - values: |\n      pod:\n        security_context:\n          myApp:\n            pod:\n              runAsUser: 34356\n              runAsNonRoot: true\n    usage: |\n      {{ dict \"envAll\" . \"application\" \"myApp\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" }}\n    return: |\n      securityContext:\n        runAsNonRoot: true\n        runAsUser: 34356\n*/}}\n\n{{- define \"helm-toolkit.snippets.kubernetes_pod_security_context\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $application := index . \"application\" -}}\nsecurityContext:\n{{- if hasKey $envAll.Values.pod \"user\" }}\n{{- if hasKey $envAll.Values.pod.user $application }}\n{{- if hasKey ( index $envAll.Values.pod.user $application ) \"uid\" }}\n  runAsUser: {{ index $envAll.Values.pod.user $application \"uid\" }}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{- if hasKey $envAll.Values.pod \"security_context\" }}\n{{- if hasKey ( index $envAll.Values.pod.security_context ) $application }}\n{{ toYaml ( index $envAll.Values.pod.security_context $application \"pod\" ) | indent 2 }}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_probes.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Renders kubernetes liveness and readiness probes for containers\nvalues: |\n  pod:\n    probes:\n      api:\n        default:\n          readiness:\n            enabled: true\n            params:\n              initialDelaySeconds: 30\n              timeoutSeconds: 30\nusage: |\n  {{- define \"probeTemplate\" }}\n  httpGet:\n    path: /status\n    port: 9090\n  {{- end }}\n  {{ dict \"envAll\" . \"component\" \"api\" \"container\" \"default\" \"type\" \"readiness\" \"probeTemplate\" (include \"probeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" }}\nreturn: |\n  readinessProbe:\n    httpGet:\n      path: /status\n      port: 9090\n    initialDelaySeconds: 30\n    timeoutSeconds: 30\n*/}}\n\n{{- define \"helm-toolkit.snippets.kubernetes_probe\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $component := index . \"component\" -}}\n{{- $container := index . \"container\" -}}\n{{- $type := index . \"type\" -}}\n{{- $probeTemplate := index . \"probeTemplate\" -}}\n{{- $probeOpts := index $envAll.Values.pod.probes $component $container $type -}}\n{{- if $probeOpts.enabled -}}\n{{- $probeOverides := index $probeOpts \"params\" | default dict -}}\n{{ dict ( printf \"%sProbe\" $type ) (mergeOverwrite $probeTemplate $probeOverides ) | toYaml }}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_resources.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nNote: This function is deprecated and will be removed in the future.\n\nabstract: |\n  Renders kubernetes resource limits for pods\nvalues: |\n  pod:\n    resources:\n      enabled: true\n      api:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n          hugepages-1Gi: \"1Gi\"\n\nusage: |\n  {{ include \"helm-toolkit.snippets.kubernetes_resources\" ( tuple . .Values.pod.resources.api ) }}\nreturn: |\n  resources:\n    limits:\n      cpu: \"2000m\"\n      memory: \"1024Mi\"\n      hugepages-1Gi: \"1Gi\"\n    requests:\n      cpu: \"100m\"\n      memory: \"128Mi\n*/}}\n\n{{- define \"helm-toolkit.snippets.kubernetes_resources\" -}}\n{{- $envAll := index . 0 -}}\n{{- $component := index . 1 -}}\n{{- if $envAll.Values.pod.resources.enabled -}}\nresources:\n{{ toYaml $component | trim | indent 2 }}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_seccomp_annotation.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Renders seccomp annotations for a list of containers driven by values.yaml.\nvalues: |\n  pod:\n    seccomp:\n      myPodName:\n        myContainerName: localhost/mySeccomp\n        mySecondContainerName: localhost/secondProfile # optional\n        myThirdContainerName: localhost/thirdProfile # optional\nusage: |\n  {{ dict \"envAll\" . \"podName\" \"myPodName\" \"containerNames\" (list \"myContainerName\" \"mySecondContainerName\" \"myThirdContainerName\") | include \"helm-toolkit.snippets.kubernetes_seccomp_annotation\" }}\nreturn: |\n  container.seccomp.security.alpha.kubernetes.io/myContainerName: localhost/mySeccomp\n  container.seccomp.security.alpha.kubernetes.io/mySecondContainerName: localhost/secondProfile\n  container.seccomp.security.alpha.kubernetes.io/myThirdContainerName: localhost/thirdProfile\nnote: |\n  The number of container underneath is a variable arguments. It loops through\n  all the container names specified.\n*/}}\n{{- define \"helm-toolkit.snippets.kubernetes_seccomp_annotation\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $podName := index . \"podName\" -}}\n{{- $containerNames := index . \"containerNames\" -}}\n{{- if hasKey (index $envAll.Values.pod \"seccomp\") $podName -}}\n{{- range $name := $containerNames -}}\n{{- $seccompProfile := index $envAll.Values.pod.seccomp $podName $name -}}\n{{- if $seccompProfile }}\ncontainer.seccomp.security.alpha.kubernetes.io/{{ $name }}: {{ $seccompProfile }}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_tolerations.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Renders kubernetes tolerations for pods\nvalues: |\n  pod:\n    tolerations:\n      api:\n        enabled: true\n        tolerations:\n        - key: node-role.kubernetes.io/master\n          operator: Exists\n        - key: node-role.kubernetes.io/node\n          operator: Exists\n\nusage: |\n  {{ include \"helm-toolkit.snippets.kubernetes_tolerations\" ( tuple . .Values.pod.tolerations.api ) }}\nreturn: |\n  tolerations:\n  - key: node-role.kubernetes.io/master\n    operator: Exists\n  - key: node-role.kubernetes.io/node\n    operator: Exists\n*/}}\n\n{{- define \"helm-toolkit.snippets.kubernetes_tolerations\" -}}\n{{- $envAll := index . 0 -}}\n{{- $component := index . 1 -}}\n{{- $pod := index $envAll.Values.pod.tolerations $component }}\ntolerations:\n{{ toYaml $pod.tolerations }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_upgrades_daemonset.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" -}}\n{{- $envAll := index . 0 -}}\n{{- $component := index . 1 -}}\n{{- $upgradeMap := index $envAll.Values.pod.lifecycle.upgrades.daemonsets $component -}}\n{{- $pod_replacement_strategy := $envAll.Values.pod.lifecycle.upgrades.daemonsets.pod_replacement_strategy -}}\n{{- with $upgradeMap -}}\n{{- if .enabled }}\nminReadySeconds: {{ .min_ready_seconds }}\nupdateStrategy:\n  type: {{ $pod_replacement_strategy }}\n  {{- if $pod_replacement_strategy }}\n  {{- if eq $pod_replacement_strategy \"RollingUpdate\" }}\n  rollingUpdate:\n    maxUnavailable: {{ .max_unavailable }}\n  {{- end }}\n  {{- end }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_upgrades_deployment.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" -}}\n{{- $envAll := index . 0 -}}\n{{- with $envAll.Values.pod.lifecycle.upgrades.deployments -}}\nrevisionHistoryLimit: {{ .revision_history }}\nstrategy:\n  type: {{ .pod_replacement_strategy }}\n  {{- if eq .pod_replacement_strategy \"RollingUpdate\" }}\n  rollingUpdate:\n    maxUnavailable: {{ .rolling_update.max_unavailable }}\n    maxSurge: {{ .rolling_update.max_surge }}\n  {{- end }}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_kubernetes_upgrades_statefulset.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Renders upgradeStrategy configuration for Kubernetes statefulsets.\n  See: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets\n  Types:\n    - RollingUpdate (default)\n    - OnDelete\n  Partitions:\n    - Stage updates to a statefulset by keeping pods at current version while\n      allowing mutations to statefulset's .spec.template\nvalues: |\n  pod:\n    lifecycle:\n      upgrades:\n        statefulsets:\n          pod_replacement_strategy: RollingUpdate\n          partition: 2\nusage: |\n  {{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_statefulset\" | indent 2 }}\nreturn: |\n  updateStrategy:\n    type: RollingUpdate\n    rollingUpdate:\n      partition: 2\n*/}}\n\n{{- define \"helm-toolkit.snippets.kubernetes_upgrades_statefulset\" -}}\n{{- $envAll := index . 0 -}}\n{{- with $envAll.Values.pod.lifecycle.upgrades.statefulsets -}}\nupdateStrategy:\n  type: {{ .pod_replacement_strategy }}\n  {{ if .partition -}}\n  rollingUpdate:\n    partition: {{ .partition }}\n  {{- end -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_mon_host_from_k8s_ep.sh.tpl",
    "content": "{{- define \"helm-toolkit.snippets.mon_host_from_k8s_ep\" -}}\n{{/*\n\nInserts a bash function definition mon_host_from_k8s_ep() which can be used\nto construct a mon_hosts value from the given namespaced endpoint.\n\nUsage (e.g. in _script.sh.tpl):\n    #!/bin/bash\n\n    : \"${NS:=ceph}\"\n    : \"${EP:=ceph-mon-discovery}\"\n\n    {{ include \"helm-toolkit.snippets.mon_host_from_k8s_ep\" . }}\n\n    MON_HOST=$(mon_host_from_k8s_ep \"$NS\" \"$EP\")\n\n    if [ -z \"$MON_HOST\" ]; then\n        # deal with failure\n    else\n        sed -i -e \"s/^mon_host = /mon_host = $MON_HOST/\" /etc/ceph/ceph.conf\n    fi\n*/}}\n{{`\n# Construct a mon_hosts value from the given namespaced endpoint\n# IP x.x.x.x with port p named \"mon-msgr2\" will appear as [v2:x.x.x.x/p/0]\n# IP x.x.x.x with port q named \"mon\" will appear as [v1:x.x.x.x/q/0]\n# IP x.x.x.x with ports p and q will appear as [v2:x.x.x.x/p/0,v1:x.x.x.x/q/0]\n# The entries for all IPs will be joined with commas\nmon_host_from_k8s_ep() {\n  local ns=$1\n  local ep=$2\n\n  if [ -z \"$ns\" ] || [ -z \"$ep\" ]; then\n    return 1\n  fi\n\n  # We don't want shell expansion for the go-template expression\n  # shellcheck disable=SC2016\n  kubectl get endpoints -n \"$ns\" \"$ep\" -o go-template='\n    {{- $sep := \"\" }}\n    {{- range $_,$s := .subsets }}\n      {{- $v2port := 0 }}\n      {{- $v1port := 0 }}\n      {{- range $_,$port := index $s \"ports\" }}\n        {{- if (eq $port.name \"mon-msgr2\") }}\n          {{- $v2port = $port.port }}\n        {{- else if (eq $port.name \"mon\") }}\n          {{- $v1port = $port.port }}\n        {{- end }}\n      {{- end }}\n      {{- range $_,$address := index $s \"addresses\" }}\n        {{- $v2endpoint := printf \"v2:%s:%d/0\" $address.ip $v2port }}\n        {{- $v1endpoint := printf \"v1:%s:%d/0\" $address.ip $v1port }}\n        {{- if (and $v2port $v1port) }}\n          {{- printf \"%s[%s,%s]\" $sep $v2endpoint $v1endpoint }}\n          {{- $sep = \",\" }}\n        {{- else if $v2port }}\n          {{- printf \"%s[%s]\" $sep $v2endpoint }}\n          {{- $sep = \",\" }}\n        {{- else if $v1port }}\n          {{- printf \"%s[%s]\" $sep $v1endpoint }}\n          {{- $sep = \",\" }}\n        {{- end }}\n      {{- end }}\n    {{- end }}'\n}\n`}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_prometheus_pod_annotations.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n# Appends annotations for configuring prometheus scrape jobs via pod\n# annotations. The required annotations are:\n# * `prometheus.io/scrape`: Only scrape pods that have a value of `true`\n# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.\n# * `prometheus.io/port`: Scrape the pod on the indicated port instead of the\n# pod's declared ports (default is a port-free target if none are declared).\n\n{{- define \"helm-toolkit.snippets.prometheus_pod_annotations\" -}}\n{{- $config := index . 0 -}}\n{{- if $config.scrape }}\nprometheus.io/scrape: {{ $config.scrape | quote }}\n{{- end }}\n{{- if $config.path }}\nprometheus.io/path: {{ $config.path | quote }}\n{{- end }}\n{{- if $config.port }}\nprometheus.io/port: {{ $config.port | quote }}\n{{- end }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_prometheus_service_annotations.tpl",
    "content": "{{/*\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n   http://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n# Appends annotations for configuring prometheus scrape endpoints via\n# annotations. The required annotations are:\n# * `prometheus.io/scrape`: Only scrape services that have a value of `true`\n# * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need\n# to set this to `https` & most likely set the `tls_config` of the scrape config.\n# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.\n# * `prometheus.io/port`: If the metrics are exposed on a different port to the\n# service then set this appropriately.\n\n{{- define \"helm-toolkit.snippets.prometheus_service_annotations\" -}}\n{{- $config := index . 0 -}}\n{{- if $config.scrape }}\nprometheus.io/scrape: {{ $config.scrape | quote }}\n{{- end }}\n{{- if $config.scheme }}\nprometheus.io/scheme: {{ $config.scheme | quote }}\n{{- end }}\n{{- if $config.path }}\nprometheus.io/path: {{ $config.path | quote }}\n{{- end }}\n{{- if $config.port }}\nprometheus.io/port: {{ $config.port | quote }}\n{{- end }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_release_uuid.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Reneders an attonation key and value for a release\nvalues: |\n  release_uuid: null\nusage: |\n  {{ tuple . | include \"helm-toolkit.snippets.release_uuid\" }}\nreturn: |\n  \"openstackhelm.openstack.org/release_uuid\": \"\"\n*/}}\n\n{{- define \"helm-toolkit.snippets.release_uuid\" -}}\n{{- $envAll := index . 0 -}}\n\"openstackhelm.openstack.org/release_uuid\": {{ $envAll.Values.release_uuid | default \"\" | quote }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_rgw_s3_admin_env_vars.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.snippets.rgw_s3_admin_env_vars\" }}\n{{- $s3AdminSecret := .s3AdminSecret }}\n- name: S3_ADMIN_USERNAME\n  valueFrom:\n    secretKeyRef:\n      name: {{ $s3AdminSecret }}\n      key: S3_ADMIN_USERNAME\n- name: S3_ADMIN_ACCESS_KEY\n  valueFrom:\n    secretKeyRef:\n      name: {{ $s3AdminSecret }}\n      key: S3_ADMIN_ACCESS_KEY\n- name: S3_ADMIN_SECRET_KEY\n  valueFrom:\n    secretKeyRef:\n      name: {{ $s3AdminSecret }}\n      key: S3_ADMIN_SECRET_KEY\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_rgw_s3_bucket_user_env_vars_rook.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.snippets.rgw_s3_bucket_user_env_vars_rook\" }}\n{{- range $s3Bucket := .Values.storage.s3.buckets }}\n- name: {{ printf \"%s_S3_ACCESS_KEY\" ($s3Bucket.client | replace \"-\" \"_\" | upper) }}\n  valueFrom:\n    secretKeyRef:\n      name: {{ $s3Bucket.name }}\n      key: AWS_ACCESS_KEY_ID\n- name: {{ printf \"%s_S3_SECRET_KEY\" ($s3Bucket.client | replace \"-\" \"_\" | upper) }}\n  valueFrom:\n    secretKeyRef:\n      name: {{ $s3Bucket.name }}\n      key: AWS_SECRET_ACCESS_KEY\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_rgw_s3_secret_creds.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.snippets.rgw_s3_secret_creds\" }}\n{{- range $client, $config := .Values.storage.s3.clients -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ printf \"%s-s3-user-secret\" ( $client | replace \"_\" \"-\" | lower ) }}\ntype: Opaque\ndata:\n{{- range $key, $value := $config.auth }}\n  {{ $key | upper }}: {{ $value | toString | b64enc}}\n{{- end }}\n\n{{ end }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_rgw_s3_user_env_vars.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.snippets.rgw_s3_user_env_vars\" }}\n{{- range $client, $user := .Values.storage.s3.clients }}\n{{- $s3secret := printf \"%s-s3-user-secret\" ( $client | replace \"_\" \"-\" | lower ) }}\n- name: {{ printf \"%s_S3_USERNAME\" ($client | replace \"-\" \"_\" | upper) }}\n  valueFrom:\n    secretKeyRef:\n      name: {{ $s3secret }}\n      key: USERNAME\n- name: {{ printf \"%s_S3_ACCESS_KEY\" ($client | replace \"-\" \"_\" | upper) }}\n  valueFrom:\n    secretKeyRef:\n      name: {{ $s3secret }}\n      key: ACCESS_KEY\n- name: {{ printf \"%s_S3_SECRET_KEY\" ($client | replace \"-\" \"_\" | upper) }}\n  valueFrom:\n    secretKeyRef:\n      name: {{ $s3secret }}\n      key: SECRET_KEY\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_service_params.tpl",
    "content": "{{/*\nCopyright 2017 The Openstack-Helm Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{/*\nabstract: |\n  Inserts kubernetes service parameters from values as is.\nvalues: |\n  network:\n    serviceExample:\n      service:\n        type: loadBalancer\n        loadBalancerIP: 1.1.1.1\nusage: |\n  ---\n  apiVersion: v1\n  kind: Service\n  metadata:\n    name: 'serviceExample'\n  spec:\n    ports:\n    - name: s-example\n      port: 1111\n  {{ .Values.network.serviceExample | include \"helm-toolkit.snippets.service_params\" | indent 2 }}\nreturn: |\n  type: loadBalancer\n  loadBalancerIP: 1.1.1.1\n*/}}\n\n{{- define \"helm-toolkit.snippets.service_params\" }}\n{{- $serviceParams := dict }}\n{{- if hasKey . \"service\" }}\n{{- $serviceParams = .service }}\n{{- end }}\n{{- if hasKey . \"node_port\" }}\n{{- if hasKey .node_port \"enabled\" }}\n{{- if .node_port.enabled }}\n{{- $_ := set $serviceParams \"type\" \"NodePort\" }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- if hasKey . \"external_policy_local\" }}\n{{- if .external_policy_local }}\n{{- $_ := set $serviceParams \"externalTrafficPolicy\" \"Local\" }}\n{{- end }}\n{{- end }}\n{{- if $serviceParams }}\n{{- $serviceParams | toYaml }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_tls_volume.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{/*\nabstract: |\n  Renders a secret volume for tls.\n\n  Dictionary Parameters:\n    enabled: boolean check if you want to conditional disable this snippet (optional)\n    name: name of the volume (required)\n    secretName: name of a kuberentes/tls secret, if not specified, use the volume name (optional)\n\nvalues: |\n  manifests:\n    certificates: true\n\nusage: |\n  {{- $opts := dict \"enabled\" \"true\" \"name\" \"glance-tls-api\" -}}\n  {{- $opts | include \"helm-toolkit.snippets.tls_volume\" -}}\n\nreturn: |\n  - name: glance-tls-api\n    secret:\n      secretName: glance-tls-api\n      defaultMode: 292\n*/}}\n{{- define \"helm-toolkit.snippets.tls_volume\" }}\n{{- $enabled := index . \"enabled\" -}}\n{{- $name := index . \"name\" -}}\n{{- $secretName := index . \"secretName\" | default $name -}}\n{{- if and $enabled (ne $name \"\") }}\n- name: {{ $name }}\n  secret:\n    secretName: {{ $secretName }}\n    defaultMode: 292\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_tls_volume_mount.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{/*\nabstract: |\n  Renders a volume mount for TLS key, cert and CA.\n\n  Dictionary Parameters:\n    enabled: boolean check if you want to conditional disable this snippet (optional)\n    name: name that of the volume and should match the volume name (required)\n    path: path to place tls.crt tls.key ca.crt, do not suffix with '/' (required)\n    certs: a tuple containing a nonempty subset of {tls.crt, tls.key, ca.crt}.\n          the default is the full set. (optional)\n\nvalues: |\n  manifests:\n    certificates: true\n\nusage: |\n  {{- $opts := dict \"enabled\" .Values.manifests.certificates \"name\" \"glance-tls-api\" \"path\" \"/etc/glance/certs\" -}}\n  {{- $opts | include \"helm-toolkit.snippets.tls_volume_mount\" -}}\n\nreturn: |\n  - name: glance-tls-api\n    mountPath: /etc/glance/certs/tls.crt\n    subPath: tls.crt\n    readOnly: true\n  - name: glance-tls-api\n    mountPath: /etc/glance/certs/tls.key\n    subPath: tls.key\n    readOnly: true\n  - name: glance-tls-api\n    mountPath: /etc/glance/certs/ca.crt\n    subPath: ca.crt\n    readOnly: true\n\nabstract: |\n  This mounts a specific issuing CA only for service validation\n\nusage: |\n  {{- $opts := dict \"enabled\" .Values.manifests.certificates \"name\" \"glance-tls-api\" \"ca\" true -}}\n  {{- $opts | include \"helm-toolkit.snippets.tls_volume_mount\" -}}\n\nreturn: |\n  - name: glance-tls-api\n    mountPath: /etc/ssl/certs/openstack-helm.crt\n    subPath: ca.crt\n    readOnly: true\n*/}}\n{{- define \"helm-toolkit.snippets.tls_volume_mount\" }}\n{{- $enabled := index . \"enabled\" -}}\n{{- $name := index . \"name\" -}}\n{{- $path := index . \"path\" | default \"\" -}}\n{{- $certs := index . \"certs\" | default ( tuple \"tls.crt\" \"tls.key\" \"ca.crt\" ) }}\n{{- if $enabled }}\n{{- if and (eq $path \"\") (ne $name \"\") }}\n- name: {{ $name }}\n  mountPath: \"/etc/ssl/certs/openstack-helm.crt\"\n  subPath: ca.crt\n  readOnly: true\n{{- else }}\n{{- if ne $name \"\" }}\n{{- range $key, $value := $certs }}\n- name: {{ $name }}\n  mountPath: {{ printf \"%s/%s\" $path $value }}\n  subPath: {{ $value }}\n  readOnly: true\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/snippets/_values_template_renderer.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Renders out configuration sections into a format suitable for incorporation\n  into a config-map. Allowing various forms of input to be rendered out as\n  appropriate.\nvalues: |\n  conf:\n    inputs:\n      - foo\n      - bar\n    some:\n      config_to_render: |\n        #We can use all of gotpl here: eg macros, ranges etc.\n        {{ include \"helm-toolkit.utils.joinListWithComma\" .Values.conf.inputs }}\n      config_to_complete:\n        #here we can fill out params, but things need to be valid yaml as input\n        '{{ .Release.Name }}': '{{ printf \"%s-%s\" .Release.Namespace \"namespace\" }}'\n      static_config:\n        #this is just passed though as yaml to the configmap\n        foo: bar\nusage: |\n  {{- $envAll := . }}\n  ---\n  apiVersion: v1\n  kind: ConfigMap\n  metadata:\n    name: application-etc\n  data:\n  {{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.some.config_to_render \"key\" \"config_to_render.conf\") | indent 2 }}\n  {{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.some.config_to_complete \"key\" \"config_to_complete.yaml\") | indent 2 }}\n  {{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.some.static_config \"key\" \"static_config.yaml\") | indent 2 }}\nreturn: |\n  ---\n  apiVersion: v1\n  kind: ConfigMap\n  metadata:\n    name: application-etc\n  data:\n    config_to_render.conf: |\n      #We can use all of gotpl here: eg macros, ranges etc.\n      foo,bar\n\n    config_to_complete.yaml: |\n      'RELEASE-NAME': 'default-namespace'\n\n    static_config.yaml: |\n      foo: bar\n*/}}\n\n{{- define \"helm-toolkit.snippets.values_template_renderer\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $template := index . \"template\" -}}\n{{- $key := index . \"key\" -}}\n{{- $format := index . \"format\" | default \"configMap\" -}}\n{{- with $envAll -}}\n{{- $templateRendered := tpl ( $template | toYaml ) . }}\n{{- if eq $format \"Secret\" }}\n{{- if hasPrefix \"|\\n\" $templateRendered }}\n{{ $key }}: {{ regexReplaceAllLiteral \"\\n  \" ( $templateRendered | trimPrefix \"|\\n\" | trimPrefix \"  \" ) \"\\n\" | b64enc }}\n{{- else }}\n{{ $key }}: {{ $templateRendered | b64enc }}\n{{- end -}}\n{{- else }}\n{{- if hasPrefix \"|\\n\" $templateRendered }}\n{{ $key }}: |\n{{ regexReplaceAllLiteral \"\\n  \" ( $templateRendered | trimPrefix \"|\\n\" | trimPrefix \"  \" ) \"\\n\" | indent 2 }}\n{{- else }}\n{{ $key }}: |\n{{ $templateRendered | indent 2 }}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/tls/_tls_generate_certs.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Produces a certificate from a certificate authority. If the \"encode\" parameter\n  is true, base64 encode the values for inclusion in a Kubernetes secret.\nvalues: |\n  test:\n    hosts:\n      names:\n        - barbican.openstackhelm.example\n        - barbican.openstack.svc.cluster.local\n      ips:\n        - 127.0.0.1\n        - 192.168.0.1\n    life: 3\n    # Use ca.crt and ca.key to build a customized ca, if they are provided.\n    # Use hosts.names[0] and life to auto-generate a ca, if ca is not provided.\n    ca:\n      crt: |\n        <CA CRT>\n      key: |\n        <CA PRIVATE KEY>\nusage: |\n  {{ include \"helm-toolkit.utils.tls_generate_certs\" (dict \"params\" .Values.test) }}\nreturn: |\n  ca: |\n    <CA CRT>\n  crt: |\n    <CRT>\n  exp: 2018-09-01T10:56:07.895392915-00:00\n  key: |\n    <CRT PRIVATE KEY>\n*/}}\n\n{{- define \"helm-toolkit.utils.tls_generate_certs\" -}}\n{{- $params := index . \"params\" -}}\n{{- $encode := index . \"encode\" | default false -}}\n{{- $local := dict -}}\n\n{{- $_hosts := $params.hosts.names | default list }}\n{{- if kindIs \"string\" $params.hosts.names }}\n{{- $_ := set $local \"certHosts\" (list $params.hosts.names) }}\n{{- else }}\n{{- $_ := set $local \"certHosts\" $_hosts }}\n{{- end }}\n\n{{- $_ips := $params.hosts.ips | default list }}\n{{- if kindIs \"string\" $params.hosts.ips }}\n{{- $_ := set $local \"certIps\" (list $params.hosts.ips) }}\n{{- else }}\n{{- $_ := set $local \"certIps\" $_ips }}\n{{- end }}\n\n{{- if hasKey $params \"ca\" }}\n{{- if and (hasKey $params.ca \"crt\") (hasKey $params.ca \"key\") }}\n{{- $ca := buildCustomCert ($params.ca.crt | b64enc ) ($params.ca.key | b64enc ) }}\n{{- $_ := set $local \"ca\" $ca }}\n{{- end }}\n{{- else }}\n{{- $ca := genCA (first $local.certHosts) (int $params.life) }}\n{{- $_ := set $local \"ca\" $ca }}\n{{- end }}\n\n{{- $expDate := date_in_zone \"2006-01-02T15:04:05Z07:00\" ( date_modify (printf \"+%sh\" (mul $params.life 24 |toString)) now ) \"UTC\" }}\n{{- $rawCert := genSignedCert (first $local.certHosts) ($local.certIps) ($local.certHosts) (int $params.life) $local.ca }}\n{{- $certificate := dict -}}\n{{- if $encode -}}\n{{- $_ := b64enc $rawCert.Cert | set $certificate \"crt\" -}}\n{{- $_ := b64enc $rawCert.Key | set $certificate \"key\" -}}\n{{- $_ := b64enc $local.ca.Cert | set $certificate \"ca\" -}}\n{{- $_ := b64enc $local.ca.Key | set $certificate \"caKey\" -}}\n{{- $_ := b64enc $expDate | set $certificate \"exp\" -}}\n{{- else -}}\n{{- $_ := set $certificate \"crt\" $rawCert.Cert -}}\n{{- $_ := set $certificate \"key\" $rawCert.Key -}}\n{{- $_ := set $certificate \"ca\" $local.ca.Cert -}}\n{{- $_ := set $certificate \"caKey\" $local.ca.Key -}}\n{{- $_ := set $certificate \"exp\" $expDate -}}\n{{- end -}}\n{{- $certificate | toYaml }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/utils/_comma_joined_service_list.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Returns a comma separated list of namespace:service pairs.\nvalues: |\n  dependencies:\n    static:\n      api:\n        services:\n          - endpoint: internal\n            service: oslo_cache\n          - endpoint: internal\n            service: oslo_db\n  endpoints:\n    oslo_db:\n      namespace: foo\n      hosts:\n        default: mariadb\n    oslo_cache:\n      namespace: bar\n      hosts:\n        default: memcache\nusage: |\n  {{ tuple .Values.dependencies.static.api.services . | include \"helm-toolkit.utils.comma_joined_service_list\" }}\nreturn: |\n  bar:memcache,foo:mariadb\n*/}}\n\n{{- define \"helm-toolkit.utils.comma_joined_service_list\" -}}\n{{- $deps := index . 0 -}}\n{{- $envAll := index . 1 -}}\n{{- range $k, $v := $deps -}}{{- if $k -}},{{- end -}}{{ tuple $v.service $v.endpoint $envAll | include \"helm-toolkit.endpoints.service_name_endpoint_with_namespace_lookup\" }}{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/utils/_configmap_templater.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.utils.configmap_templater\" }}\n{{- $keyRoot := index . 0 -}}\n{{- $configTemplate := index . 1 -}}\n{{- $context := index . 2 -}}\n{{ if $keyRoot.override -}}\n{{ $keyRoot.override | indent 4 }}\n{{- else -}}\n{{- if $keyRoot.prefix -}}\n{{ $keyRoot.prefix | indent 4 }}\n{{- end }}\n{{ tuple $configTemplate $context | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n{{- if $keyRoot.append -}}\n{{ $keyRoot.append | indent 4 }}\n{{- end }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/utils/_daemonset_overrides.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.utils.daemonset_overrides\" }}\n  {{- $daemonset := index . 0 }}\n  {{- $daemonset_yaml := index . 1 }}\n  {{- $configmap_include := index . 2 }}\n  {{- $configmap_name := index . 3 }}\n  {{- $context := index . 4 }}\n  {{- $_ := unset $context \".Files\" }}\n  {{- $daemonset_root_name := printf (print $context.Chart.Name \"_\" $daemonset) }}\n  {{- $_ := set $context.Values \"__daemonset_list\" list }}\n  {{- $_ := set $context.Values \"__default\" dict }}\n  {{- if hasKey $context.Values.conf \"overrides\" }}\n    {{- range $key, $val := $context.Values.conf.overrides }}\n\n      {{- if eq $key $daemonset_root_name }}\n        {{- range $type, $type_data := . }}\n\n          {{- if eq $type \"hosts\" }}\n            {{- range $host_data := . }}\n              {{/* dictionary that will contain all info needed to generate this\n              iteration of the daemonset */}}\n              {{- $current_dict := dict }}\n\n              {{/* set daemonset name */}}\n              {{/* Note: long hostnames can cause the 63 char name limit to be\n              exceeded. Truncate the hostname if hostname > 20 char */}}\n              {{- if gt (len $host_data.name) 20 }}\n                {{- $_ := set $current_dict \"name\" (substr 0 20 $host_data.name) }}\n              {{- else }}\n                {{- $_ := set $current_dict \"name\" $host_data.name }}\n              {{- end }}\n\n              {{/* apply overrides */}}\n              {{- $override_conf_copy := $host_data.conf }}\n              {{/* Deep copy to prevent https://storyboard.openstack.org/#!/story/2005936 */}}\n              {{- $root_conf_copy := omit ($context.Values.conf | toYaml | fromYaml) \"overrides\" }}\n              {{- $merged_dict := mergeOverwrite $root_conf_copy $override_conf_copy }}\n              {{- $root_conf_copy2 := dict \"conf\" $merged_dict }}\n              {{- $context_values := omit (omit ($context.Values | toYaml | fromYaml) \"conf\") \"__daemonset_list\" }}\n              {{- $root_conf_copy3 := mergeOverwrite $context_values $root_conf_copy2 }}\n              {{- $root_conf_copy4 := dict \"Values\" $root_conf_copy3 }}\n              {{- $_ := set $current_dict \"nodeData\" $root_conf_copy4 }}\n\n              {{/* Schedule to this host explicitly. */}}\n              {{- $nodeSelector_dict := dict }}\n\n              {{- $_ := set $nodeSelector_dict \"key\" \"kubernetes.io/hostname\" }}\n              {{- $_ := set $nodeSelector_dict \"operator\" \"In\" }}\n\n              {{- $values_list := list $host_data.name }}\n              {{- $_ := set $nodeSelector_dict \"values\" $values_list }}\n\n              {{- $list_aggregate := list $nodeSelector_dict }}\n              {{- $_ := set $current_dict \"matchExpressions\" $list_aggregate }}\n\n              {{/* store completed daemonset entry/info into global list */}}\n              {{- $list_aggregate := append $context.Values.__daemonset_list $current_dict }}\n              {{- $_ := set $context.Values \"__daemonset_list\" $list_aggregate }}\n\n            {{- end }}\n          {{- end }}\n\n          {{- if eq $type \"labels\" }}\n            {{- $_ := set $context.Values \"__label_list\" . }}\n            {{- range $label_data := . }}\n              {{/* dictionary that will contain all info needed to generate this\n              iteration of the daemonset. */}}\n              {{- $_ := set $context.Values \"__current_label\" dict }}\n\n              {{/* set daemonset name */}}\n              {{- $_ := set $context.Values.__current_label \"name\" $label_data.label.key }}\n\n              {{/* apply overrides */}}\n              {{- $override_conf_copy := $label_data.conf }}\n              {{/* Deep copy to prevent https://storyboard.openstack.org/#!/story/2005936 */}}\n              {{- $root_conf_copy := omit ($context.Values.conf | toYaml | fromYaml) \"overrides\" }}\n              {{- $merged_dict := mergeOverwrite $root_conf_copy $override_conf_copy }}\n              {{- $root_conf_copy2 := dict \"conf\" $merged_dict }}\n              {{- $context_values := omit (omit ($context.Values | toYaml | fromYaml) \"conf\") \"__daemonset_list\" }}\n              {{- $root_conf_copy3 := mergeOverwrite $context_values $root_conf_copy2 }}\n              {{- $root_conf_copy4 := dict \"Values\" $root_conf_copy3 }}\n              {{- $_ := set $context.Values.__current_label \"nodeData\" $root_conf_copy4 }}\n\n              {{/* Schedule to the provided label value(s) */}}\n              {{- $label_dict := omit $label_data.label \"NULL\" }}\n              {{- $_ := set $label_dict \"operator\" \"In\" }}\n              {{- $list_aggregate := list $label_dict }}\n              {{- $_ := set $context.Values.__current_label \"matchExpressions\" $list_aggregate }}\n\n              {{/* Do not schedule to other specified labels, with higher\n              precedence as the list position increases. Last defined label\n              is highest priority. */}}\n              {{- $other_labels := without $context.Values.__label_list $label_data }}\n              {{- range $label_data2 := $other_labels }}\n                {{- $label_dict := omit $label_data2.label \"NULL\" }}\n\n                {{- $_ := set $label_dict \"operator\" \"NotIn\" }}\n\n                {{- $list_aggregate := append $context.Values.__current_label.matchExpressions $label_dict }}\n                {{- $_ := set $context.Values.__current_label \"matchExpressions\" $list_aggregate }}\n              {{- end }}\n              {{- $_ := set $context.Values \"__label_list\" $other_labels }}\n\n              {{/* Do not schedule to any other specified hosts */}}\n              {{- range $type, $type_data := $val }}\n                {{- if eq $type \"hosts\" }}\n                  {{- range $host_data := . }}\n                    {{- $label_dict := dict }}\n\n                    {{- $_ := set $label_dict \"key\" \"kubernetes.io/hostname\" }}\n                    {{- $_ := set $label_dict \"operator\" \"NotIn\" }}\n\n                    {{- $values_list := list $host_data.name }}\n                    {{- $_ := set $label_dict \"values\" $values_list }}\n\n                    {{- $list_aggregate := append $context.Values.__current_label.matchExpressions $label_dict }}\n                    {{- $_ := set $context.Values.__current_label \"matchExpressions\" $list_aggregate }}\n                  {{- end }}\n                {{- end }}\n              {{- end }}\n\n              {{/* store completed daemonset entry/info into global list */}}\n              {{- $list_aggregate := append $context.Values.__daemonset_list $context.Values.__current_label }}\n              {{- $_ := set $context.Values \"__daemonset_list\" $list_aggregate }}\n              {{- $_ := unset $context.Values \"__current_label\" }}\n\n            {{- end }}\n          {{- end }}\n        {{- end }}\n\n        {{/* scheduler exceptions for the default daemonset */}}\n        {{- $_ := set $context.Values.__default \"matchExpressions\" list }}\n\n        {{- range $type, $type_data := . }}\n          {{/* Do not schedule to other specified labels */}}\n          {{- if eq $type \"labels\" }}\n            {{- range $label_data := . }}\n              {{- $default_dict := omit $label_data.label \"NULL\" }}\n\n              {{- $_ := set $default_dict \"operator\" \"NotIn\" }}\n\n              {{- $list_aggregate := append $context.Values.__default.matchExpressions $default_dict }}\n              {{- $_ := set $context.Values.__default \"matchExpressions\" $list_aggregate }}\n            {{- end }}\n          {{- end }}\n          {{/* Do not schedule to other specified hosts */}}\n          {{- if eq $type \"hosts\" }}\n            {{- range $host_data := . }}\n              {{- $default_dict := dict }}\n\n              {{- $_ := set $default_dict \"key\" \"kubernetes.io/hostname\" }}\n              {{- $_ := set $default_dict \"operator\" \"NotIn\" }}\n\n              {{- $values_list := list $host_data.name }}\n              {{- $_ := set $default_dict \"values\" $values_list }}\n\n              {{- $list_aggregate := append $context.Values.__default.matchExpressions $default_dict }}\n              {{- $_ := set $context.Values.__default \"matchExpressions\" $list_aggregate }}\n            {{- end }}\n          {{- end }}\n        {{- end }}\n      {{- end }}\n    {{- end }}\n  {{- end }}\n\n  {{/* generate the default daemonset */}}\n\n  {{/* set name */}}\n  {{- $_ := set $context.Values.__default \"name\" \"default\" }}\n\n  {{/* no overrides apply, so copy as-is */}}\n  {{- $root_conf_copy1 := omit $context.Values.conf \"overrides\" }}\n  {{- $root_conf_copy2 := dict \"conf\" $root_conf_copy1 }}\n  {{- $context_values := omit $context.Values \"conf\" }}\n  {{- $root_conf_copy3 := mergeOverwrite $context_values $root_conf_copy2 }}\n  {{- $root_conf_copy4 := dict \"Values\" $root_conf_copy3 }}\n  {{- $_ := set $context.Values.__default \"nodeData\" $root_conf_copy4 }}\n\n  {{/* add to global list */}}\n  {{- $list_aggregate := append $context.Values.__daemonset_list $context.Values.__default }}\n  {{- $_ := set $context.Values \"__daemonset_list\" $list_aggregate }}\n\n  {{- range $current_dict := $context.Values.__daemonset_list }}\n\n    {{- $context_novalues := omit $context \"Values\" }}\n    {{- $merged_dict := mergeOverwrite $context_novalues $current_dict.nodeData }}\n    {{- $_ := set $current_dict \"nodeData\" $merged_dict }}\n    {{/* Deep copy original daemonset_yaml */}}\n    {{- $_ := set $context.Values \"__daemonset_yaml\" ($daemonset_yaml | toYaml | fromYaml) }}\n\n    {{/* name needs to be a DNS-1123 compliant name. Ensure lower case */}}\n    {{- $name_format1 := printf (print $daemonset_root_name \"-\" $current_dict.name) | lower }}\n    {{/* labels may contain underscores which would be invalid here, so we replace them with dashes\n    there may be other valid label names which would make for an invalid DNS-1123 name\n    but these will be easier to handle in future with sprig regex* functions\n    (not availabile in helm 2.5.1) */}}\n    {{- $name_format2 := $name_format1 | replace \"_\" \"-\" }}\n    {{/* To account for the case where the same label is defined multiple times in overrides\n    (but with different label values), we add a sha of the scheduling data to ensure\n    name uniqueness */}}\n    {{- $_ := set $current_dict \"dns_1123_name\" dict }}\n    {{- if hasKey $current_dict \"matchExpressions\" }}\n      {{- $_ := set $current_dict \"dns_1123_name\" (printf (print $name_format2 \"-\" ($current_dict.matchExpressions | quote | sha256sum | trunc 8))) }}\n    {{- else }}\n      {{- $_ := set $current_dict \"dns_1123_name\" $name_format2 }}\n    {{- end }}\n\n    {{/* set daemonset metadata name */}}\n    {{- if not $context.Values.__daemonset_yaml.metadata }}{{- $_ := set $context.Values.__daemonset_yaml \"metadata\" dict }}{{- end }}\n    {{- if not $context.Values.__daemonset_yaml.metadata.name }}{{- $_ := set $context.Values.__daemonset_yaml.metadata \"name\" dict }}{{- end }}\n    {{- $_ := set $context.Values.__daemonset_yaml.metadata \"name\" $current_dict.dns_1123_name }}\n\n    {{/* cross-reference configmap name to container volume definitions */}}\n    {{- $_ := set $context.Values \"__volume_list\" list }}\n    {{- range $current_volume := $context.Values.__daemonset_yaml.spec.template.spec.volumes }}\n      {{- $_ := set $context.Values \"__volume\" $current_volume }}\n      {{- if hasKey $context.Values.__volume \"secret\" }}\n        {{- if eq $context.Values.__volume.secret.secretName $configmap_name }}\n          {{- $_ := set $context.Values.__volume.secret \"secretName\" $current_dict.dns_1123_name }}\n        {{- end }}\n      {{- end }}\n      {{- $updated_list := append $context.Values.__volume_list $context.Values.__volume }}\n      {{- $_ := set $context.Values \"__volume_list\" $updated_list }}\n    {{- end }}\n    {{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec \"volumes\" $context.Values.__volume_list }}\n\n\n    {{/* populate scheduling restrictions */}}\n    {{- if hasKey $current_dict \"matchExpressions\" }}\n      {{- if not $context.Values.__daemonset_yaml.spec.template.spec }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template \"spec\" dict }}{{- end }}\n      {{- if not $context.Values.__daemonset_yaml.spec.template.spec.affinity }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec \"affinity\" dict }}{{- end }}\n      {{- if not $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec.affinity \"nodeAffinity\" dict }}{{- end }}\n      {{- if not $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity \"requiredDuringSchedulingIgnoredDuringExecution\" dict }}{{- end }}\n      {{- $match_exprs := dict }}\n      {{- $_ := set $match_exprs \"matchExpressions\" $current_dict.matchExpressions }}\n      {{- $appended_match_expr := list $match_exprs }}\n      {{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution \"nodeSelectorTerms\" $appended_match_expr }}\n    {{- end }}\n\n    {{/* input value hash for current set of values overrides */}}\n    {{- if not $context.Values.__daemonset_yaml.spec }}{{- $_ := set $context.Values.__daemonset_yaml \"spec\" dict }}{{- end }}\n    {{- if not $context.Values.__daemonset_yaml.spec.template }}{{- $_ := set $context.Values.__daemonset_yaml.spec \"template\" dict }}{{- end }}\n    {{- if not $context.Values.__daemonset_yaml.spec.template.metadata }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template \"metadata\" dict }}{{- end }}\n    {{- if not $context.Values.__daemonset_yaml.spec.template.metadata.annotations }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.metadata \"annotations\" dict }}{{- end }}\n    {{- $cmap := list $current_dict.dns_1123_name $current_dict.nodeData | include $configmap_include }}\n    {{- $values_hash := $cmap | quote | sha256sum }}\n    {{- $_ := set $context.Values.__daemonset_yaml.spec.template.metadata.annotations \"configmap-etc-hash\" $values_hash }}\n\n    {{/* generate configmap */}}\n---\n{{ $cmap }}\n    {{/* generate daemonset yaml */}}\n---\n{{ $context.Values.__daemonset_yaml | toYaml }}\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/utils/_daemonset_overrides_root.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\n\nThe helm-toolkit.utils.daemonset_overrides function have some limitations:\n\n * it allows to override only conf values specifid in configmap-etc\n * it doesn't allow to override values for daemonsets passed via env variables\n   or via damoenset definition. As result it is impossible to have mixed\n   deployment when one compute is configured with dpdk while other not.\n * it is impossible to override interface names/other information stored in\n   <service>-bin configmap\n * It allows to schedule on both hosts and labels, which adds some\n   uncertainty\n\nThis implementation is intended to handle those limitations:\n\n * it allows to schedule only based on labels\n * it creates <service>-bin per daemonset override\n * it allows to override values when rendering daemonsets\n\n It picks data from the following structure:\n\n  .Values:\n    overrides:\n      mychart_mydaemonset:\n        labels:\n          label::value:\n            values:\n              override_root_option: override_root_value\n              conf:\n                ovs_dpdk:\n                  enabled: true\n                neutron:\n                  DEFAULT:\n                    foo: bar\n\n*/}}\n\n{{- define \"helm-toolkit.utils.daemonset_overrides_root\" }}\n  {{- $daemonset := index . 0 }}\n  {{- $daemonSetTemplateName := index . 1 }}\n  {{ $serviceAccountName := index . 2 }}\n  {{- $configmap_include := index . 3 }}\n  {{- $configmap_name := index . 4 }}\n  {{- $configbin_include := index . 5 }}\n  {{- $configbin_name := index . 6 }}\n  {{- $context := index . 7 }}\n\n  {{- $_ := unset $context \".Files\" }}\n  {{- $daemonset_root_name := printf (print $context.Chart.Name \"_\" $daemonset) }}\n  {{- $_ := set $context.Values \"__daemonset_list\" list }}\n  {{- $_ := set $context.Values \"__default\" dict }}\n\n  {{- $default_enabled := true }}\n  {{- if hasKey $context.Values \"overrides\" }}\n    {{- range $key, $val := $context.Values.overrides }}\n\n      {{- if eq $key $daemonset_root_name }}\n        {{- range $type, $type_data := . }}\n          {{- if eq $type \"overrides_default\" }}\n            {{- $default_enabled = $type_data }}\n          {{- end }}\n\n          {{- if eq $type \"labels\" }}\n            {{- $_ := set $context.Values \"__label_dict\" . }}\n            {{- range $lname, $ldata := . }}\n              {{ $label_name := (split \"::\" $lname)._0 }}\n              {{ $label_value := (split \"::\" $lname)._1 }}\n              {{/* dictionary that will contain all info needed to generate this\n              iteration of the daemonset. */}}\n              {{- $_ := set $context.Values \"__current_label\" dict }}\n\n              {{/* set daemonset name */}}\n              {{- $_ := set $context.Values.__current_label \"name\" $label_name }}\n\n              {{/* set daemonset metadata annotation */}}\n              {{- $_ := set $context.Values.__current_label \"daemonset_override\" $lname  }}\n\n              {{/* apply overrides */}}\n\n\n              {{- $override_root_copy := $ldata.values }}\n              {{/* Deep copy to prevent https://storyboard.openstack.org/#!/story/2005936 */}}\n              {{- $root_copy := omit ($context.Values | toYaml | fromYaml) \"overrides\" }}\n              {{- $merged_dict := mergeOverwrite $root_copy $override_root_copy }}\n\n              {{- $root_conf_copy2 := dict \"values\" $merged_dict }}\n              {{- $context_values := omit (omit ($context.Values | toYaml | fromYaml) \"values\") \"__daemonset_list\" }}\n              {{- $root_conf_copy3 := mergeOverwrite $context_values $root_conf_copy2.values }}\n              {{- $root_conf_copy4 := dict \"Values\" $root_conf_copy3 }}\n              {{- $_ := set $context.Values.__current_label \"nodeData\" $root_conf_copy4 }}\n\n\n              {{/* Schedule to the provided label value(s) */}}\n              {{- $label_dict := dict \"key\" $label_name  }}\n              {{- $_ := set $label_dict \"values\" (list $label_value) }}\n              {{- $_ := set $label_dict \"operator\" \"In\" }}\n              {{- $list_aggregate := list $label_dict }}\n              {{- $_ := set $context.Values.__current_label \"matchExpressions\" $list_aggregate }}\n\n              {{/* Do not schedule to other specified labels, with higher\n              precedence as the list position increases. Last defined label\n              is highest priority. */}}\n              {{- $other_labels :=  omit $context.Values.__label_dict $lname }}\n              {{- range $lname2, $ldata2 := $other_labels }}\n                {{ $label_name2 := (split \"::\" $lname2)._0 }}\n                {{ $label_value2 := (split \"::\" $lname2)._1 }}\n\n                {{- $label_dict := dict \"key\" $label_name2  }}\n                {{- $_ := set $label_dict \"values\" (list $label_value2) }}\n                {{- $_ := set $label_dict \"operator\" \"NotIn\" }}\n\n                {{- $list_aggregate := append $context.Values.__current_label.matchExpressions $label_dict }}\n                {{- $_ := set $context.Values.__current_label \"matchExpressions\" $list_aggregate }}\n              {{- end }}\n\n              {{/* store completed daemonset entry/info into global list */}}\n              {{- $list_aggregate := append $context.Values.__daemonset_list $context.Values.__current_label }}\n              {{- $_ := set $context.Values \"__daemonset_list\" $list_aggregate }}\n              {{- $_ := unset $context.Values \"__current_label\" }}\n\n            {{- end }}\n          {{- end }}\n        {{- end }}\n\n        {{/* scheduler exceptions for the default daemonset */}}\n        {{- $_ := set $context.Values.__default \"matchExpressions\" list }}\n\n        {{- range $type, $type_data := . }}\n          {{/* Do not schedule to other specified labels */}}\n          {{- if eq $type \"labels\" }}\n            {{- range $lname, $ldata := . }}\n              {{ $label_name := (split \"::\" $lname)._0 }}\n              {{ $label_value := (split \"::\" $lname)._1 }}\n\n              {{- $default_dict := dict \"key\" $label_name  }}\n              {{- $_ := set $default_dict \"values\" (list $label_value) }}\n              {{- $_ := set $default_dict \"operator\" \"NotIn\" }}\n\n              {{- $list_aggregate := append $context.Values.__default.matchExpressions $default_dict }}\n              {{- $_ := set $context.Values.__default \"matchExpressions\" $list_aggregate }}\n            {{- end }}\n          {{- end }}\n        {{- end }}\n      {{- end }}\n    {{- end }}\n  {{- end }}\n\n  {{/* generate the default daemonset */}}\n\n  {{/* set name */}}\n  {{- $_ := set $context.Values.__default \"name\" \"default\" }}\n\n  {{/* no overrides apply, so copy as-is */}}\n  {{- $root_conf_copy1 := omit $context.Values.conf \"overrides\" }}\n  {{- $root_conf_copy2 := dict \"conf\" $root_conf_copy1 }}\n  {{- $context_values := omit $context.Values \"conf\" }}\n  {{- $root_conf_copy3 := mergeOverwrite $context_values $root_conf_copy2 }}\n  {{- $root_conf_copy4 := dict \"Values\" $root_conf_copy3 }}\n  {{- $_ := set $context.Values.__default \"nodeData\" $root_conf_copy4 }}\n\n  {{/* add to global list */}}\n  {{- if $default_enabled }}\n    {{- $list_aggregate := append $context.Values.__daemonset_list $context.Values.__default }}\n    {{- $_ := set $context.Values \"__daemonset_list\" $list_aggregate }}\n  {{- end }}\n\n  {{- range $current_dict := $context.Values.__daemonset_list }}\n\n    {{- $context_novalues := omit $context \"Values\" }}\n    {{- $merged_dict := mergeOverwrite $context_novalues $current_dict.nodeData }}\n    {{- $_ := set $current_dict \"nodeData\" $merged_dict }}\n    {{/* Deep copy original daemonset_yaml */}}\n    {{- $daemonset_yaml := list $daemonset $configmap_name $serviceAccountName $merged_dict | include $daemonSetTemplateName | toString | fromYaml }}\n    {{- $_ := set $context.Values \"__daemonset_yaml\" ($daemonset_yaml | toYaml | fromYaml) }}\n\n    {{/* Use the following name format $daemonset_root_name + sha256summ($current_dict.matchExpressions)\n    as labels might be too long and contain wrong characters like / */}}\n    {{- $_ := set $current_dict \"dns_1123_name\" dict }}\n    {{- $name_format := \"\" }}\n    {{- if eq $current_dict.name \"default\" }}\n       {{- $name_format = (printf \"%s-%s\" $daemonset_root_name \"default\") | replace \"_\" \"-\" }}\n    {{- else }}\n       {{- $name_format = (printf \"%s-%s\" $daemonset_root_name ($current_dict.matchExpressions | quote | sha256sum | trunc 16)) | replace \"_\" \"-\" }}\n    {{- end }}\n    {{- $_ := set $current_dict \"dns_1123_name\" $name_format }}\n\n    {{/* set daemonset metadata name */}}\n    {{- if not $context.Values.__daemonset_yaml.metadata }}{{- $_ := set $context.Values.__daemonset_yaml \"metadata\" dict }}{{- end }}\n    {{- if not $context.Values.__daemonset_yaml.metadata.name }}{{- $_ := set $context.Values.__daemonset_yaml.metadata \"name\" dict }}{{- end }}\n    {{- $_ := set $context.Values.__daemonset_yaml.metadata \"name\" $current_dict.dns_1123_name }}\n\n    {{/* cross-reference configmap name to container volume definitions */}}\n    {{- $_ := set $context.Values \"__volume_list\" list }}\n    {{- range $current_volume := $context.Values.__daemonset_yaml.spec.template.spec.volumes }}\n      {{- $_ := set $context.Values \"__volume\" $current_volume }}\n      {{- if hasKey $context.Values.__volume \"secret\" }}\n        {{- if eq $context.Values.__volume.secret.secretName $configmap_name }}\n          {{- $_ := set $context.Values.__volume.secret \"secretName\" (printf \"%s-etc\" $current_dict.dns_1123_name) }}\n        {{- end }}\n      {{- end }}\n      {{- if hasKey $context.Values.__volume \"configMap\" }}\n        {{- if eq $context.Values.__volume.configMap.name $configbin_name }}\n          {{- $_ := set $context.Values.__volume.configMap \"name\" (printf \"%s-bin\" $current_dict.dns_1123_name) }}\n        {{- end }}\n      {{- end }}\n      {{- $updated_list := append $context.Values.__volume_list $context.Values.__volume }}\n      {{- $_ := set $context.Values \"__volume_list\" $updated_list }}\n    {{- end }}\n    {{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec \"volumes\" $context.Values.__volume_list }}\n\n\n    {{/* populate scheduling restrictions */}}\n    {{- if hasKey $current_dict \"matchExpressions\" }}\n      {{- $length := len $current_dict.matchExpressions }}\n      {{- if gt $length 0 }}\n        {{- if not $context.Values.__daemonset_yaml.spec.template.spec }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template \"spec\" dict }}{{- end }}\n        {{- if not $context.Values.__daemonset_yaml.spec.template.spec.affinity }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec \"affinity\" dict }}{{- end }}\n        {{- if not $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec.affinity \"nodeAffinity\" dict }}{{- end }}\n        {{- if not $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity \"requiredDuringSchedulingIgnoredDuringExecution\" dict }}{{- end }}\n\n        {{- $expressions_modified := list }}\n        {{- if hasKey $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution \"nodeSelectorTerms\" }}\n          {{- range $orig_expression := $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms }}\n            {{- $match_expressions_modified := list }}\n            {{- $match_expressions_modified = concat $match_expressions_modified $current_dict.matchExpressions }}\n            {{- if hasKey $orig_expression \"matchExpressions\" }}\n              {{- $match_expressions_modified = concat $match_expressions_modified $orig_expression.matchExpressions }}\n              {{- $expressions_modified = append $expressions_modified (dict \"matchExpressions\" $match_expressions_modified) }}\n            {{- end }}\n          {{- end }}\n        {{- else }}\n          {{- $expressions_modified = (list (dict \"matchExpressions\" $current_dict.matchExpressions)) }}\n        {{- end }}\n        {{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution \"nodeSelectorTerms\" $expressions_modified }}\n      {{- end }}\n    {{- end }}\n\n    {{/* input value hash for current set of values overrides */}}\n    {{- if not $context.Values.__daemonset_yaml.spec }}{{- $_ := set $context.Values.__daemonset_yaml \"spec\" dict }}{{- end }}\n    {{- if not $context.Values.__daemonset_yaml.spec.template }}{{- $_ := set $context.Values.__daemonset_yaml.spec \"template\" dict }}{{- end }}\n    {{- if not $context.Values.__daemonset_yaml.spec.template.metadata }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template \"metadata\" dict }}{{- end }}\n    {{- if not $context.Values.__daemonset_yaml.spec.template.metadata.annotations }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.metadata \"annotations\" dict }}{{- end }}\n    {{- $cmap := list (printf \"%s-etc\" $current_dict.dns_1123_name) $current_dict.nodeData | include $configmap_include }}\n    {{- $cmap_bin := list (printf \"%s-bin\" $current_dict.dns_1123_name) $current_dict.nodeData | include $configbin_include }}\n    {{- $values_cmap_hash := $cmap | quote | sha256sum }}\n    {{- $values_cmap_bin_hash := $cmap_bin | quote | sha256sum }}\n    {{- $_ := set $context.Values.__daemonset_yaml.spec.template.metadata.annotations \"configmap-etc-hash\" $values_cmap_hash }}\n    {{- $_ := set $context.Values.__daemonset_yaml.spec.template.metadata.annotations \"configmap-bin-hash\" $values_cmap_bin_hash }}\n\n    {{/* Do not set override for default daemonset */}}\n    {{- if $current_dict.daemonset_override }}\n        {{- $_ := set $context.Values.__daemonset_yaml.metadata.annotations \"daemonset_override\" $current_dict.daemonset_override }}\n    {{- end }}\n\n{{/* generate configmap */}}\n---\n{{ $cmap }}\n    {{/* generate <service>-bin yaml */}}\n---\n{{ $cmap_bin }}\n    {{/* generate daemonset yaml */}}\n---\n{{ $context.Values.__daemonset_yaml | toYaml }}\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/utils/_dependency_jobs.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nFilter dependency jobs when the corresponding manifests.job_* flag is disabled.\nThe manifest key is derived from the dependency job name by dropping the chart\nprefix up to the first dash and converting the remainder from kebab-case to\nsnake_case. If no matching manifest key exists, the job is kept.\n*/}}\n{{- define \"helm-toolkit.utils.dependency_jobs_filter\" -}}\n{{- $envAll := index . \"envAll\" -}}\n{{- $deps := index . \"deps\" -}}\n{{- if and $deps (hasKey $deps \"jobs\") $deps.jobs -}}\n{{- if kindIs \"string\" (index $deps.jobs 0) -}}\n{{- $jobs := list -}}\n{{- range $job := $deps.jobs -}}\n{{- $jobParts := splitList \"-\" $job -}}\n{{- if gt (len $jobParts) 1 -}}\n{{- $manifestKey := printf \"job_%s\" (join \"_\" (rest $jobParts)) -}}\n{{- if or (not (hasKey $envAll.Values.manifests $manifestKey)) (index $envAll.Values.manifests $manifestKey) -}}\n{{- $jobs = append $jobs $job -}}\n{{- end -}}\n{{- else -}}\n{{- $jobs = append $jobs $job -}}\n{{- end -}}\n{{- end -}}\n{{- $_ := set $deps \"jobs\" $jobs -}}\n{{- end -}}\n{{- end -}}\n{{ $deps | toYaml }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/utils/_dependency_resolver.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.utils.dependency_resolver\" }}\n{{- $envAll := index . \"envAll\" -}}\n{{- $dependencyMixinParam := index . \"dependencyMixinParam\" -}}\n{{- $dependencyKey := index . \"dependencyKey\" -}}\n{{- if $dependencyMixinParam -}}\n{{- $_ := set $envAll.Values \"pod_dependency\" dict -}}\n{{- if kindIs \"string\" $dependencyMixinParam }}\n{{- if ( index $envAll.Values.dependencies.dynamic.targeted $dependencyMixinParam ) }}\n{{- $_ := include \"helm-toolkit.utils.merge\" (tuple $envAll.Values.pod_dependency ( index $envAll.Values.dependencies.static $dependencyKey ) ( index $envAll.Values.dependencies.dynamic.targeted $dependencyMixinParam $dependencyKey ) ) -}}\n{{- else }}\n{{- $_ := set $envAll.Values \"pod_dependency\" ( index $envAll.Values.dependencies.static $dependencyKey ) }}\n{{- end }}\n{{- else if kindIs \"slice\" $dependencyMixinParam }}\n{{- $_ := set $envAll.Values \"__deps\" ( index $envAll.Values.dependencies.static $dependencyKey ) }}\n{{- range $k, $v := $dependencyMixinParam -}}\n{{- if ( index $envAll.Values.dependencies.dynamic.targeted $v ) }}\n{{- $_ := include \"helm-toolkit.utils.merge\" (tuple $envAll.Values.pod_dependency $envAll.Values.__deps ( index $envAll.Values.dependencies.dynamic.targeted $v $dependencyKey ) ) -}}\n{{- $_ := set $envAll.Values \"__deps\" $envAll.Values.pod_dependency -}}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- else -}}\n{{- $_ := set $envAll.Values \"pod_dependency\" ( index $envAll.Values.dependencies.static $dependencyKey ) -}}\n{{- end -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_jobs_filter\" (dict \"envAll\" $envAll \"deps\" $envAll.Values.pod_dependency) | toString | fromYaml -}}\n{{ $envAll.Values.pod_dependency | toYaml }}\n{{- end }}\n"
  },
  {
    "path": "helm-toolkit/templates/utils/_hash.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.utils.hash\" -}}\n{{- $name := index . 0 -}}\n{{- $context := index . 1 -}}\n{{- $last := base $context.Template.Name }}\n{{- $wtf := $context.Template.Name | replace $last $name -}}\n{{- include $wtf $context | sha256sum | quote -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/utils/_hash2.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.utils.hash2\" -}}\n{{- $name := index . 0 -}}\n{{- $context := index . 1 -}}\n{{- $last := base $context.Template.Name }}\n{{- $wtf := $context.Template.Name | replace $last $name -}}\n{{- printf \"%s\" $wtf | quote -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/utils/_host_list.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Returns a list of unique hosts for an endpoint, in yaml.\nvalues: |\n  endpoints:\n    cluster_domain_suffix: cluster.local\n    oslo_db:\n      hosts:\n        default: mariadb\n      host_fqdn_override:\n        default: mariadb\nusage: |\n  {{ tuple \"oslo_db\" \"internal\" . | include \"helm-toolkit.utils.host_list\" }}\nreturn: |\n  hosts:\n  - mariadb\n  - mariadb.default\n*/}}\n\n{{- define \"helm-toolkit.utils.host_list\" -}}\n{{- $type := index . 0 -}}\n{{- $endpoint := index . 1 -}}\n{{- $context := index . 2 -}}\n{{- $host_fqdn := tuple $type $endpoint $context | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\n{{- $host_namespaced := tuple $type $endpoint $context | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\n{{- $host_short := tuple $type $endpoint $context | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n{{/* It is important that the FQDN host is 1st in this list, to ensure other function can use the 1st element for cert gen CN etc */}}\n{{- $host_list := list $host_fqdn $host_namespaced $host_short | uniq }}\n{{- dict \"hosts\" $host_list | toYaml }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/utils/_image_sync_list.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.utils.image_sync_list\" -}}\n{{- $imageExcludeList := .Values.images.local_registry.exclude -}}\n{{- $imageDict := .Values.images.tags -}}\n{{- $local := dict \"first\" true -}}\n{{- range $k, $v := $imageDict -}}\n{{- if not $local.first -}},{{- end -}}\n{{- if (not (has $k $imageExcludeList )) -}}\n{{- index $imageDict $k -}}\n{{- $_ := set $local \"first\" false -}}\n{{- end -}}{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/utils/_joinListWithComma.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Joins a list of values into a comma separated string\nvalues: |\n  test:\n    - foo\n    - bar\nusage: |\n  {{ include \"helm-toolkit.utils.joinListWithComma\" .Values.test }}\nreturn: |\n  foo,bar\n*/}}\n\n{{- define \"helm-toolkit.utils.joinListWithComma\" -}}\n{{- $local := dict \"first\" true -}}\n{{- range $k, $v := . -}}{{- if not $local.first -}},{{- end -}}{{- $v -}}{{- $_ := set $local \"first\" false -}}{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/utils/_joinListWithCommaAndSingleQuotes.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Joins a list of values into a comma seperated string with single quotes\n  around each value.\nvalues: |\n  test:\n    - foo\n    - bar\nusage: |\n  {{ include \"helm-toolkit.utils.joinListWithCommaAndSingleQuotes\" .Values.test }}\nreturn: |\n  'foo','bar'\n*/}}\n\n{{- define \"helm-toolkit.utils.joinListWithCommaAndSingleQuotes\" -}}\n{{- $local := dict \"first\" true -}}\n{{- range $k, $v := . -}}{{- if not $local.first -}},{{- end -}}'{{- $v -}}'{{- $_ := set $local \"first\" false -}}{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/utils/_joinListWithPrefix.tpl",
    "content": "{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Joins a list of prefixed values into a space separated string\nvalues: |\n  test:\n    - foo\n    - bar\nusage: |\n  {{ tuple \"prefix\" .Values.test | include \"helm-toolkit.utils.joinListWithPrefix\" }}\nreturn: |\n  prefixfoo prefixbar\n*/}}\n\n{{- define \"helm-toolkit.utils.joinListWithPrefix\" -}}\n{{- $prefix := index . 0 -}}\n{{- $local := dict \"first\" true -}}\n{{- range $k, $v := index . 1 -}}{{- if not $local.first -}}{{- \" \" -}}{{- end -}}{{- $prefix -}}{{- $v -}}{{- $_ := set $local \"first\" false -}}{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/utils/_joinListWithSpace.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Joins a list of values into a space separated string\nvalues: |\n  test:\n    - foo\n    - bar\nusage: |\n  {{ include \"helm-toolkit.utils.joinListWithSpace\" .Values.test }}\nreturn: |\n  foo bar\n*/}}\n\n{{- define \"helm-toolkit.utils.joinListWithSpace\" -}}\n{{- $local := dict \"first\" true -}}\n{{- range $k, $v := . -}}{{- if not $local.first -}}{{- \" \" -}}{{- end -}}{{- $v -}}{{- $_ := set $local \"first\" false -}}{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/utils/_merge.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nTakes a tuple of values and merges into the first (target) one each subsequent\n(source) one in order. If all values to merge are maps, then the tuple can be\npassed as is and the target will be the result, otherwise pass a map with a\n\"values\" key containing the tuple of values to merge, and the merge result will\nbe assigned to the \"result\" key of the passed map.\n\nWhen merging maps, for each key in the source, if the target does not define\nthat key, the source value is assigned. If both define the key, then the key\nvalues are merged using this algorithm (recursively) and the result is assigned\nto the target key. Slices are merged by appending them and removing any\nduplicates, and when passing a map to this function and including a\n\"merge_same_named\" key set to true, then map items from the slices with the same\nvalue for the \"name\" key will be merged with each other. Any other values are\nmerged by simply keeping the source, and throwing away the target.\n*/}}\n\n{{- define \"helm-toolkit.utils.merge\" -}}\n  {{- $local := dict -}}\n  {{- $_ := set $local \"merge_same_named\" false -}}\n  {{- if kindIs \"map\" $ -}}\n    {{- $_ := set $local \"values\" $.values -}}\n    {{- if hasKey $ \"merge_same_named\" -}}\n      {{- $_ := set $local \"merge_same_named\" $.merge_same_named -}}\n    {{- end -}}\n  {{- else -}}\n    {{- $_ := set $local \"values\" $ -}}\n  {{- end -}}\n\n  {{- $target := first $local.values -}}\n  {{- range $item := rest $local.values -}}\n    {{- $call := dict \"target\" $target \"source\" . \"merge_same_named\" $local.merge_same_named -}}\n    {{- $_ := include \"helm-toolkit.utils._merge\" $call -}}\n    {{- $_ := set $local \"result\" $call.result -}}\n  {{- end -}}\n\n  {{- if kindIs \"map\" $ -}}\n    {{- $_ := set $ \"result\" $local.result -}}\n  {{- end -}}\n{{- end -}}\n\n{{- define \"helm-toolkit.utils._merge\" -}}\n  {{- $local := dict -}}\n\n  {{- $_ := set $ \"result\" $.source -}}\n\n  {{/*\n  TODO: Should we `fail` when trying to merge a collection (map or slice) with\n  either a different kind of collection or a scalar?\n  */}}\n\n  {{- if and (kindIs \"map\" $.target) (kindIs \"map\" $.source) -}}\n    {{- range $key, $sourceValue := $.source -}}\n      {{- if not (hasKey $.target $key) -}}\n        {{- $_ := set $local \"newTargetValue\" $sourceValue -}}\n        {{- if kindIs \"map\" $sourceValue -}}\n          {{- $copy := dict -}}\n          {{- $call := dict \"target\" $copy \"source\" $sourceValue -}}\n          {{- $_ := include \"helm-toolkit.utils._merge.shallow\" $call -}}\n          {{- $_ := set $local \"newTargetValue\" $copy -}}\n        {{- end -}}\n      {{- else -}}\n        {{- $targetValue := index $.target $key -}}\n        {{- $call := dict \"target\" $targetValue \"source\" $sourceValue \"merge_same_named\" $.merge_same_named -}}\n        {{- $_ := include \"helm-toolkit.utils._merge\" $call -}}\n        {{- $_ := set $local \"newTargetValue\" $call.result -}}\n      {{- end -}}\n      {{- $_ := set $.target $key $local.newTargetValue -}}\n    {{- end -}}\n    {{- $_ := set $ \"result\" $.target -}}\n  {{- else if and (kindIs \"slice\" $.target) (kindIs \"slice\" $.source) -}}\n    {{- $call := dict \"target\" $.target \"source\" $.source -}}\n    {{- $_ := include \"helm-toolkit.utils._merge.append_slice\" $call -}}\n    {{- if $.merge_same_named -}}\n      {{- $_ := set $local \"result\" list -}}\n      {{- $_ := set $local \"named_items\" dict -}}\n      {{- range $item := $call.result -}}\n      {{- $_ := set $local \"has_name_key\" false -}}\n        {{- if kindIs \"map\" $item -}}\n          {{- if hasKey $item \"name\" -}}\n            {{- $_ := set $local \"has_name_key\" true -}}\n          {{- end -}}\n        {{- end -}}\n\n        {{- if $local.has_name_key -}}\n          {{- if hasKey $local.named_items $item.name -}}\n            {{- $named_item := index $local.named_items $item.name -}}\n            {{- $call := dict \"target\" $named_item \"source\" $item \"merge_same_named\" $.merge_same_named -}}\n            {{- $_ := include \"helm-toolkit.utils._merge\" $call -}}\n          {{- else -}}\n            {{- $copy := dict -}}\n            {{- $copy_call := dict \"target\" $copy \"source\" $item -}}\n            {{- $_ := include \"helm-toolkit.utils._merge.shallow\" $copy_call -}}\n            {{- $_ := set $local.named_items $item.name $copy -}}\n            {{- $_ := set $local \"result\" (append $local.result $copy) -}}\n          {{- end -}}\n        {{- else -}}\n          {{- $_ := set $local \"result\" (append $local.result $item) -}}\n        {{- end -}}\n      {{- end -}}\n    {{- else -}}\n      {{- $_ := set $local \"result\" $call.result -}}\n    {{- end -}}\n    {{- $_ := set $ \"result\" (uniq $local.result) -}}\n  {{- end -}}\n{{- end -}}\n\n{{- define \"helm-toolkit.utils._merge.shallow\" -}}\n  {{- range $key, $value := $.source -}}\n    {{- $_ := set $.target $key $value -}}\n  {{- end -}}\n{{- end -}}\n\n{{- define \"helm-toolkit.utils._merge.append_slice\" -}}\n  {{- $local := dict -}}\n  {{- $_ := set $local \"result\" $.target -}}\n  {{- range $value := $.source -}}\n    {{- $_ := set $local \"result\" (append $local.result $value) -}}\n  {{- end -}}\n  {{- $_ := set $ \"result\" $local.result -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/utils/_template.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"helm-toolkit.utils.template\" -}}\n{{- $name := index . 0 -}}\n{{- $context := index . 1 -}}\n{{- $last := base $context.Template.Name }}\n{{- $wtf := $context.Template.Name | replace $last $name -}}\n{{ include $wtf $context }}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/utils/_to_ini.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Returns INI formatted output from yaml input\nvalues: |\n  conf:\n    paste:\n      filter:debug:\n        use: egg:oslo.middleware#debug\n      filter:request_id:\n        use: egg:oslo.middleware#request_id\n      filter:build_auth_context:\n        use: egg:keystone#build_auth_context\nusage: |\n  {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.paste }}\nreturn: |\n  [filter:build_auth_context]\n  use = egg:keystone#build_auth_context\n  [filter:debug]\n  use = egg:oslo.middleware#debug\n  [filter:request_id]\n  use = egg:oslo.middleware#request_id\n*/}}\n\n{{- define \"helm-toolkit.utils.to_ini\" -}}\n{{- range $section, $values := . -}}\n{{- if kindIs \"map\" $values -}}\n[{{ $section }}]\n{{range $key, $value := $values -}}\n{{- if kindIs \"slice\" $value -}}\n{{ $key }} = {{ include \"helm-toolkit.utils.joinListWithComma\" $value }}\n{{else -}}\n{{ $key }} = {{ $value }}\n{{end -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/utils/_to_k8s_env_secret_vars.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Returns yaml formatted to be used in k8s templates as container\n  env vars injected via secrets. This requires a secret-<chartname> template to\n  be defined in the chart that can be used to house the desired secret\n  variables. For reference, see the fluentd chart.\nvalues: |\n  test:\n    secrets:\n      foo: bar\n\nusage: |\n  {{ include \"helm-toolkit.utils.to_k8s_env_vars\" .Values.test }}\nreturn: |\n  - name: foo\n    valueFrom:\n      secretKeyRef:\n        name: \"my-release-name-env-secret\"\n        key: foo\n*/}}\n\n{{- define \"helm-toolkit.utils.to_k8s_env_secret_vars\" -}}\n{{- $context := index . 0 -}}\n{{- $secrets := index . 1 -}}\n{{ range $key, $config := $secrets -}}\n- name: {{ $key }}\n  valueFrom:\n    secretKeyRef:\n      name: {{ printf \"%s-%s\" $context.Release.Name \"env-secret\" | quote }}\n      key: {{ $key }}\n{{ end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/utils/_to_k8s_env_vars.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Returns key value pair formatted to be used in k8s templates as container\n  env vars.\nvalues: |\n  test:\n    foo: bar\nusage: |\n  {{ include \"helm-toolkit.utils.to_k8s_env_vars\" .Values.test }}\nreturn: |\n  - name: foo\n    value: \"bar\"\n*/}}\n\n{{- define \"helm-toolkit.utils.to_k8s_env_vars\" -}}\n{{range $key, $value := . -}}\n{{- if kindIs \"slice\" $value -}}\n- name: {{ $key }}\n  value: {{ include \"helm-toolkit.utils.joinListWithComma\" $value | quote }}\n{{else -}}\n- name: {{ $key }}\n  value: {{ $value | quote }}\n{{ end -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/utils/_to_kv_list.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Returns key value pair in INI format (key = value)\nvalues: |\n  conf:\n    libvirt:\n      log_level: 3\nusage: |\n  {{ include \"helm-toolkit.utils.to_kv_list\" .Values.conf.libvirt }}\nreturn: |\n  log_level = 3\n*/}}\n\n{{- define \"helm-toolkit.utils.to_kv_list\" -}}\n{{- range $key, $value :=  . -}}\n{{- if kindIs \"slice\" $value }}\n{{ $key }} = {{ include \"helm-toolkit.utils.joinListWithComma\" $value | quote }}\n{{- else if kindIs \"string\" $value }}\n{{- if regexMatch \"^[0-9]+$\" $value }}\n{{ $key }} = {{ $value }}\n{{- else }}\n{{ $key }} = {{ $value | quote }}\n{{- end }}\n{{- else }}\n{{ $key }} = {{ $value }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "helm-toolkit/templates/utils/_to_oslo_conf.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Returns OSLO.conf formatted output from yaml input\nvalues: |\n  conf:\n    keystone:\n      DEFAULT: # Keys at this level are used for section headings\n        max_token_size: 255\n      oslo_messaging_notifications:\n        driver: # An example of a multistring option's syntax\n          type: multistring\n          values:\n            - messagingv2\n            - log\n      oslo_messaging_notifications_stein:\n        driver: # An example of a csv option's syntax\n          type: csv\n          values:\n            - messagingv2\n            - log\n      security_compliance:\n        password_expires_ignore_user_ids:\n        # Values in a list will be converted to a comma separated key\n          - \"123\"\n          - \"456\"\nusage: |\n  {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.keystone }}\nreturn: |\n  [DEFAULT]\n  max_token_size = 255\n  [oslo_messaging_notifications]\n  driver = messagingv2\n  driver = log\n  [oslo_messaging_notifications_stein]\n  driver = messagingv2,log\n  [security_compliance]\n  password_expires_ignore_user_ids = 123,456\n*/}}\n\n{{- define \"helm-toolkit.utils.to_oslo_conf\" -}}\n{{- range $section, $values := . -}}\n{{- if kindIs \"map\" $values -}}\n[{{ $section }}]\n{{ range $key, $value := $values -}}\n{{- if kindIs \"slice\" $value -}}\n{{ $key }} = {{ include \"helm-toolkit.utils.joinListWithComma\" $value }}\n{{ else if kindIs \"map\" $value -}}\n{{- if eq $value.type \"multistring\" }}\n{{- range $k, $multistringValue := $value.values -}}\n{{ $key }} = {{ $multistringValue }}\n{{ end -}}\n{{ else if eq $value.type \"csv\" -}}\n{{ $key }} = {{ include \"helm-toolkit.utils.joinListWithComma\" $value.values }}\n{{ end -}}\n{{- else -}}\n{{ $key }} = {{ $value }}\n{{ end -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "horizon/.helmignore",
    "content": "values_overrides\n"
  },
  {
    "path": "horizon/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Horizon\nname: horizon\nversion: 2025.2.0\nhome: https://docs.openstack.org/horizon/latest/\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Horizon/OpenStack_Project_Horizon_vertical.png\nsources:\n  - https://opendev.org/openstack/horizon\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "horizon/releasenotes/notes/horizon-023da44e7958de05.yaml",
    "content": "---\nfeatures:\n  - |\n    Added support for openstack keystone domain dropdown\n...\n"
  },
  {
    "path": "horizon/releasenotes/notes/horizon-4c5d5e3b58c700a0.yaml",
    "content": "---\nfeatures:\n  - |\n    Added support for customizing the SESSION_ENGINE.\n    This enables deployers to select different Django session storage options,\n    such as database-backed or cache-based sessions, according to their environment needs.\n...\n"
  },
  {
    "path": "horizon/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nSITE_PACKAGES_ROOT=$(python -c \"from sysconfig import get_path; print(get_path('platlib'))\")\nrm -f ${SITE_PACKAGES_ROOT}/openstack_dashboard/local/local_settings.py\nln -s /etc/openstack-dashboard/local_settings ${SITE_PACKAGES_ROOT}/openstack_dashboard/local/local_settings.py\n\nexec /tmp/manage.py migrate --noinput\n"
  },
  {
    "path": "horizon/templates/bin/_django.wsgi.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nNOTE (Portdirect): This file is required to support Horizon regardless of the\nimage used, and to provide PyMySQL support.\n*/}}\n\nimport logging\nimport os\nimport sys\n\nimport pymysql\n\npymysql.version_info = (2, 2, 4, 'final', 0)\npymysql.install_as_MySQLdb()\n\nfrom django.core.wsgi import get_wsgi_application\nfrom django.conf import settings\n\n# Add this file path to sys.path in order to import settings\nsys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '../..'))\nos.environ['DJANGO_SETTINGS_MODULE'] = 'openstack_dashboard.settings'\nsys.stdout = sys.stderr\n\nDEBUG = False\n\napplication = get_wsgi_application()\n"
  },
  {
    "path": "horizon/templates/bin/_horizon.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  SITE_PACKAGES_ROOT=$(python -c \"from sysconfig import get_path; print(get_path('platlib'))\")\n  rm -f ${SITE_PACKAGES_ROOT}/openstack_dashboard/local/local_settings.py\n  ln -s /etc/openstack-dashboard/local_settings ${SITE_PACKAGES_ROOT}/openstack_dashboard/local/local_settings.py\n  ln -s  ${SITE_PACKAGES_ROOT}/openstack_dashboard/conf/default_policies  /etc/openstack-dashboard/default_policies\n  {{- range $key, $value := .Values.conf.horizon.local_settings_d }}\n  ln -s /etc/openstack-dashboard/local_settings.d/{{ $key }}.py ${SITE_PACKAGES_ROOT}/openstack_dashboard/local/local_settings.d/{{ $key }}.py\n  {{- end }}\n  {{- range $key, $value := .Values.conf.horizon.custom_panels }}\n  ln -s /etc/openstack-dashboard/custom_panels/{{ $key }}.py ${SITE_PACKAGES_ROOT}/openstack_dashboard/local/enabled/{{ $key }}.py\n  {{- end }}\n  # wsgi/horizon-http needs open files here, including secret_key_store\n  chown -R horizon ${SITE_PACKAGES_ROOT}/openstack_dashboard/local/\n\n  {{- if .Values.conf.software.apache2.a2enmod }}\n    {{- range .Values.conf.software.apache2.a2enmod }}\n  a2enmod {{ . }}\n    {{- end }}\n  {{- end }}\n\n  {{- if .Values.conf.software.apache2.a2dismod }}\n    {{- range .Values.conf.software.apache2.a2dismod }}\n  a2dismod {{ . }}\n    {{- end }}\n  {{- end }}\n\n  if [ -f /etc/apache2/envvars ]; then\n     # Loading Apache2 ENV variables\n     source /etc/apache2/envvars\n     # The directory below has to be created due to the fact that\n     # libapache2-mod-wsgi-py3 doesn't create it in contrary by libapache2-mod-wsgi\n     if [ ! -d ${APACHE_RUN_DIR} ]; then\n        mkdir -p ${APACHE_RUN_DIR}\n     fi\n  fi\n  rm -rf /var/run/apache2/*\n  APACHE_DIR=\"apache2\"\n\n  # Add extra panels if available\n  {{- range .Values.conf.horizon.extra_panels }}\n  PANEL_DIR=\"${SITE_PACKAGES_ROOT}/{{ . }}/enabled\"\n  if [ -d ${PANEL_DIR} ];then\n    for panel in `ls -1 ${PANEL_DIR}/_[1-9]*.py`\n    do\n      ln -s ${panel} ${SITE_PACKAGES_ROOT}/openstack_dashboard/local/enabled/$(basename ${panel})\n    done\n  fi\n  unset PANEL_DIR\n  PANEL_DIR=\"${SITE_PACKAGES_ROOT}/{{ . }}/local/enabled\"\n  if [ -d ${PANEL_DIR} ];then\n    for panel in `ls -1 ${PANEL_DIR}/_[1-9]*.py`\n    do\n      ln -s ${panel} ${SITE_PACKAGES_ROOT}/openstack_dashboard/local/enabled/$(basename ${panel})\n    done\n  fi\n  unset PANEL_DIR\n  {{- end }}\n\n  # If the image has support for it, compile the translations\n  if type -p gettext >/dev/null 2>/dev/null; then\n    cd ${SITE_PACKAGES_ROOT}/openstack_dashboard; /tmp/manage.py compilemessages\n    # if there are extra panels and the image has support for it, compile the translations\n    {{- range .Values.conf.horizon.extra_panels }}\n    PANEL_DIR=\"${SITE_PACKAGES_ROOT}/{{ . }}\"\n    if [ -d ${PANEL_DIR} ]; then\n      cd ${PANEL_DIR}; /tmp/manage.py compilemessages\n    fi\n    {{- end }}\n    unset PANEL_DIR\n  fi\n\n  # Copy custom logo images\n  {{- if .Values.manifests.configmap_logo }}\n  if [[ -f /tmp/favicon.ico ]]; then\n    cp /tmp/favicon.ico ${SITE_PACKAGES_ROOT}/openstack_dashboard/static/dashboard/img/favicon.ico\n  fi\n  if [[ -f /tmp/logo.svg ]]; then\n    cp /tmp/logo.svg ${SITE_PACKAGES_ROOT}/openstack_dashboard/static/dashboard/img/logo.svg\n  fi\n  if [[ -f /tmp/logo-splash.svg ]]; then\n    cp /tmp/logo-splash.svg ${SITE_PACKAGES_ROOT}/openstack_dashboard/static/dashboard/img/logo-splash.svg\n  fi\n  {{- end }}\n\n  # Compress Horizon's assets.\n  /tmp/manage.py collectstatic --noinput\n  /tmp/manage.py compress --force\n  rm -rf /tmp/_tmp_.secret_key_store.lock /tmp/.secret_key_store\n  chmod +x ${SITE_PACKAGES_ROOT}/django/core/wsgi.py\n  exec {{ .Values.conf.software.apache2.binary }} {{ .Values.conf.software.apache2.start_parameters }}\n}\n\nfunction stop () {\n  {{ .Values.conf.software.apache2.binary }} -k graceful-stop\n}\n\n$COMMAND\n"
  },
  {
    "path": "horizon/templates/bin/_manage.py.tpl",
    "content": "#!/usr/bin/env python\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nNOTE (Portdirect): This file is required to support Horizon regardless of the\nimage used, and to provide PyMySQL support.\n*/}}\n\nimport os\nimport sys\n\nimport pymysql\npymysql.version_info = (2, 2, 4, 'final', 0)\npymysql.install_as_MySQLdb()\n\nfrom django.core.management import execute_from_command_line\n\nif __name__ == \"__main__\":\n    os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\",\n                          \"openstack_dashboard.settings\")\n    execute_from_command_line(sys.argv)\n"
  },
  {
    "path": "horizon/templates/bin/_selenium-test.py.tpl",
    "content": "#!/usr/bin/env python3\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nimport os\nimport sys\nimport logging\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.chrome.options import Options\n{{- if .Values.selenium_v4 }}\nfrom selenium.webdriver.chrome.service import Service\n{{- end }}\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import NoSuchElementException\n\n# Create logger, console handler and formatter\nlogger = logging.getLogger('Horizon Selenium Tests')\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\n    '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n)\n\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n\ndef get_variable(env_var):\n    if env_var in os.environ:\n        logger.info('Found \"{}\"'.format(env_var))\n        return os.environ[env_var]\n    else:\n        logger.critical('Variable \"{}\" is not defined!'.format(env_var))\n        sys.exit(1)\n\n\nkeystone_user = get_variable('OS_USERNAME')\nkeystone_password = get_variable('OS_PASSWORD')\nhorizon_uri = get_variable('HORIZON_URI')\nuser_domain_name = get_variable('OS_USER_DOMAIN_NAME')\n\n# Add options to make chrome browser headless\noptions = Options()\noptions.add_argument('--headless')\noptions.add_argument('--no-sandbox')\nchrome_driver = '/etc/selenium/chromedriver'\n{{- if .Values.selenium_v4 }}\nservice = Service(executable_path=chrome_driver)\nbrowser = webdriver.Chrome(service=service, options=options)\n{{- else }}\nbrowser = webdriver.Chrome(chrome_driver, chrome_options=options)\n{{- end }}\n\ntry:\n    logger.info('Attempting to connect to Horizon')\n    browser.get(horizon_uri)\n    el = WebDriverWait(browser, 15).until(\n        EC.title_contains('OpenStack Dashboard')\n    )\n    logger.info('Connected to Horizon')\nexcept TimeoutException:\n    logger.critical('Timed out waiting for Horizon')\n    browser.quit()\n    sys.exit(1)\n\ntry:\n    logger.info('Attempting to log into Horizon')\n{{- if .Values.selenium_v4 }}\n    browser.find_element(By.NAME, 'domain').send_keys(user_domain_name)\n    browser.find_element(By.NAME, 'username').send_keys(keystone_user)\n    browser.find_element(By.NAME, 'password').send_keys(keystone_password)\n    browser.find_element(By.ID, 'loginBtn').click()\n{{- else }}\n    browser.find_element_by_name('domain').send_keys(user_domain_name)\n    browser.find_element_by_name('username').send_keys(keystone_user)\n    browser.find_element_by_name('password').send_keys(keystone_password)\n    browser.find_element_by_id('loginBtn').click()\n{{- end }}\n    WebDriverWait(browser, 15).until(\n        EC.presence_of_element_located((By.ID, 'navbar-collapse'))\n    )\n    logger.info(\"Successfully logged into Horizon\")\nexcept (TimeoutException, NoSuchElementException):\n    logger.error('Failed to login to Horizon')\n    browser.quit()\n    sys.exit(1)\n\nbrowser.quit()\n"
  },
  {
    "path": "horizon/templates/certificates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.certificates -}}\n{{ dict \"envAll\" . \"service\" \"dashboard\" \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end -}}\n"
  },
  {
    "path": "horizon/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: horizon-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  horizon.sh: |\n{{ tuple \"bin/_horizon.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  manage.py: |\n{{ tuple \"bin/_manage.py.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  django.wsgi: |\n{{ tuple \"bin/_django.wsgi.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  selenium-test.py: |\n{{ tuple \"bin/_selenium-test.py.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "horizon/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: horizon-etc\ntype: Opaque\ndata:\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.horizon.apache \"key\" \"horizon.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.horizon.local_settings.template \"key\" \"local_settings\" \"format\" \"Secret\" ) | indent 2 }}\n{{- if .Values.conf.horizon.security }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.horizon.security \"key\" \"security.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- end }}\n{{- range $key, $value := .Values.conf.horizon.custom_panels }}\n  {{ printf \"%s.py\" $key }}: {{ $value | b64enc }}\n{{- end }}\n{{- range $key, $value := .Values.conf.horizon.policy }}\n  {{ printf \"%s_policy.json\" $key }}: {{ $value | toPrettyJson | b64enc }}\n{{- end }}\n{{- range $key, $value := .Values.conf.horizon.policy }}\n  {{ printf \"%s_policy.yaml\" $key }}: {{ toYaml $value | b64enc }}\n{{- end }}\n{{- range $key, $value := .Values.conf.horizon.local_settings_d }}\n  {{ printf \"%s.py\" $key }}: {{ $value | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "horizon/templates/configmap-logo.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_logo }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: horizon-logo\nbinaryData:\n{{- if .Values.conf.horizon.branding.favicon }}\n  favicon.ico: |\n    {{- .Values.conf.horizon.branding.favicon | nindent 4 }}\n{{- end }}\n{{- if .Values.conf.horizon.branding.logo }}\n  logo.svg: |\n    {{- .Values.conf.horizon.branding.logo | nindent 4 }}\n{{- end }}\n{{- if .Values.conf.horizon.branding.logo_splash }}\n  logo-splash.svg: |\n    {{- .Values.conf.horizon.branding.logo_splash | nindent 4 }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "horizon/templates/deployment.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment }}\n{{- $envAll := . }}\n\n{{- $mounts_horizon := .Values.pod.mounts.horizon.horizon }}\n{{- $mounts_horizon_init := .Values.pod.mounts.horizon.init_container }}\n\n{{- $serviceAccountName := \"horizon\" }}\n{{ tuple $envAll \"dashboard\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: horizon\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"horizon\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.server }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"horizon\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"horizon\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"horizon\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{- if .Values.manifests.configmap_logo }}\n        configmap-logo-hash: {{ tuple \"configmap-logo.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{- end }}\n{{ dict \"envAll\" $envAll \"podName\" \"horizon\" \"containerNames\" (list \"horizon\" \"init\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"horizon\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"horizon\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"horizon\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"horizon\" \"server\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.dashboard.node_selector_key }}: {{ .Values.labels.dashboard.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.horizon.enabled }}\n{{ tuple $envAll \"horizon\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.horizon.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"dashboard\" $mounts_horizon_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: horizon\n{{ tuple $envAll \"horizon\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.server | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"horizon\" \"container\" \"horizon\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/horizon.sh\n            - start\n          env:\n          - name: MY_POD_IP\n            valueFrom:\n              fieldRef:\n                fieldPath: status.podIP\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n          - name: REQUESTS_CA_BUNDLE\n            value: \"/etc/openstack-dashboard/certs/ca.crt\"\n{{- end }}\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/horizon.sh\n                  - stop\n          ports:\n            - name: web\n              containerPort: {{ tuple \"dashboard\" \"internal\" \"web\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            httpGet:\n              scheme: {{ tuple \"dashboard\" \"internal\" \"web\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n              path: /\n              port: {{ tuple \"dashboard\" \"internal\" \"web\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 15\n            periodSeconds: 10\n            timeoutSeconds: 5\n          livenessProbe:\n            httpGet:\n              scheme: {{ tuple \"dashboard\" \"internal\" \"web\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n              path: /\n              port: {{ tuple \"dashboard\" \"internal\" \"web\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 180\n            periodSeconds: 60\n            timeoutSeconds: 5\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: static-horizon\n              mountPath: /var/www/html/\n            - name: horizon-bin\n              mountPath: /tmp/horizon.sh\n              subPath: horizon.sh\n              readOnly: true\n            - name: horizon-bin\n              mountPath: /tmp/manage.py\n              subPath: manage.py\n              readOnly: true\n            - name: horizon-etc\n              mountPath: {{ .Values.conf.software.apache2.site_dir }}/000-default.conf\n              subPath: horizon.conf\n              readOnly: true\n            {{- if .Values.conf.horizon.security }}\n            - name: horizon-etc\n              mountPath: {{ .Values.conf.software.apache2.conf_dir }}/security.conf\n              subPath: security.conf\n              readOnly: true\n            {{- end }}\n            - name: horizon-bin\n              mountPath: /var/www/cgi-bin/horizon/django.wsgi\n              subPath: django.wsgi\n              readOnly: true\n            - name: horizon-etc\n              mountPath: /etc/openstack-dashboard/local_settings\n              subPath: local_settings\n              readOnly: true\n            {{- range $key, $value := $envAll.Values.conf.horizon.custom_panels }}\n            {{- $customPanelsFile := printf \"/etc/openstack-dashboard/custom_panels/%s.py\" $key }}\n            - name: horizon-etc\n              mountPath: {{ $customPanelsFile }}\n              subPath: {{ base $customPanelsFile }}\n              readOnly: true\n            {{- end }}\n            {{- range $key, $value := $envAll.Values.conf.horizon.policy }}\n            {{- $policyYamlFile := printf \"/etc/openstack-dashboard/%s_policy.yaml\" $key }}\n            - name: horizon-etc\n              mountPath: {{ $policyYamlFile }}\n              subPath: {{ base $policyYamlFile }}\n              readOnly: true\n            {{- end }}\n            {{- range $key, $value := $envAll.Values.conf.horizon.policy }}\n            {{- $policyJsonFile := printf \"/etc/openstack-dashboard/%s_policy.json\" $key }}\n            - name: horizon-etc\n              mountPath: {{ $policyJsonFile }}\n              subPath: {{ base $policyJsonFile }}\n              readOnly: true\n            {{- end }}\n            {{- range $key, $value := $envAll.Values.conf.horizon.local_settings_d }}\n            {{- $localSettingsFile := printf \"/etc/openstack-dashboard/local_settings.d/%s.py\" $key }}\n            - name: horizon-etc\n              mountPath: {{ $localSettingsFile }}\n              subPath: {{ base $localSettingsFile }}\n              readOnly: true\n            {{- end }}\n            {{- if .Values.manifests.configmap_logo }}\n            - name: horizon-logo\n              mountPath: /tmp/logo.svg\n              subPath: logo.svg\n            - name: horizon-logo\n              mountPath: /tmp/logo-splash.svg\n              subPath: logo-splash.svg\n            - name: horizon-logo\n              mountPath: /tmp/favicon.ico\n              subPath: favicon.ico\n            {{- end }}\n{{- dict \"enabled\" (or $envAll.Values.manifests.certificates $envAll.Values.tls.identity) \"name\" $envAll.Values.secrets.tls.dashboard.dashboard.internal \"path\" \"/etc/openstack-dashboard/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_horizon.volumeMounts }}{{ toYaml $mounts_horizon.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: wsgi-horizon\n          emptyDir: {}\n        - name: static-horizon\n          emptyDir: {}\n        - name: horizon-bin\n          configMap:\n            name: horizon-bin\n            defaultMode: 0555\n        - name: horizon-etc\n          secret:\n            secretName: horizon-etc\n            defaultMode: 0444\n        {{- if .Values.manifests.configmap_logo }}\n        - name: horizon-logo\n          configMap:\n            name: horizon-logo\n        {{- end }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" (or $envAll.Values.manifests.certificates $envAll.Values.tls.identity) \"name\" $envAll.Values.secrets.tls.dashboard.dashboard.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_horizon.volumes }}{{ toYaml $mounts_horizon.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "horizon/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "horizon/templates/ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_api .Values.network.dashboard.ingress.public }}\n{{- $envAll := . }}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendService\" \"dashboard\" \"backendServiceType\" \"dashboard\" \"backendPort\" \"web\" -}}\n{{- $secretName := $envAll.Values.secrets.tls.dashboard.dashboard.internal -}}\n{{- if and .Values.manifests.certificates $secretName -}}\n{{- $_ := set $ingressOpts \"certIssuer\" .Values.endpoints.dashboard.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "horizon/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $dbToDrop := dict \"inputType\" \"secret\" \"adminSecret\" .Values.secrets.oslo_db.admin \"userSecret\" .Values.secrets.oslo_db.horizon -}}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" \"horizon\" \"dbToDrop\" $dbToDrop -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbDropJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- if .Values.pod.tolerations.horizon.enabled -}}\n{{- $_ := set $dbDropJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "horizon/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbToInit := dict \"inputType\" \"secret\" \"adminSecret\" .Values.secrets.oslo_db.admin \"userSecret\" .Values.secrets.oslo_db.horizon -}}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"horizon\" \"dbToInit\" $dbToInit -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbInitJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbInitJob \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.horizon.enabled -}}\n{{- $_ := set $dbInitJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "horizon/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $envAll := . }}\n\n{{- $mounts_horizon_db_sync := .Values.pod.mounts.horizon_db_sync.horizon_db_sync }}\n{{- $mounts_horizon_db_sync_init := .Values.pod.mounts.horizon_db_sync.init_container }}\n\n{{- $serviceAccountName := \"horizon-db-sync\" }}\n{{ tuple $envAll \"db_sync\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: horizon-db-sync\n  labels:\n{{ tuple $envAll \"horizon\" \"db-sync\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    helm.sh/hook: post-install,post-upgrade\n    helm.sh/hook-weight: \"-4\"\n{{ tuple $serviceAccountName $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 }}\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"horizon\" \"db-sync\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" \"horizon-db-sync\" \"containerNames\" (list \"horizon-db-sync\" \"init\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"db_sync\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"db_sync\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"db_sync\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.horizon.enabled }}\n{{ tuple $envAll \"horizon\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"db_sync\" $mounts_horizon_db_sync_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: horizon-db-sync\n{{ tuple $envAll \"horizon_db_sync\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.db_sync | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"db_sync\" \"container\" \"horizon_db_sync\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/db-sync.sh\n          volumeMounts:\n          - name: horizon-etc\n            mountPath: /etc/openstack-dashboard/local_settings\n            subPath: local_settings\n            readOnly: true\n          - name: horizon-bin\n            mountPath: /tmp/db-sync.sh\n            subPath: db-sync.sh\n            readOnly: true\n          - name: horizon-bin\n            mountPath: /tmp/manage.py\n            subPath: manage.py\n            readOnly: true\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 10 }}\n{{ if $mounts_horizon_db_sync.volumeMounts }}{{ toYaml $mounts_horizon_db_sync.volumeMounts | indent 10 }}{{ end }}\n      volumes:\n      - name: horizon-etc\n        secret:\n          secretName: horizon-etc\n          defaultMode: 0444\n      - name: horizon-bin\n        configMap:\n          name: horizon-bin\n          defaultMode: 0555\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 6 }}\n{{ if $mounts_horizon_db_sync.volumes }}{{ toYaml $mounts_horizon_db_sync.volumes | indent 6 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "horizon/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"horizon\" -}}\n{{- if .Values.pod.tolerations.horizon.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "horizon/templates/network_policy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"horizon\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "horizon/templates/pdb.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: horizon\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.horizon.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"horizon\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "horizon/templates/pod-helm-tests.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pod_helm_test }}\n{{- $envAll := . }}\n\n{{- $mounts_tests := .Values.pod.mounts.horizon_tests.horizon_tests }}\n{{- $mounts_tests_init := .Values.pod.mounts.horizon_tests.init_container }}\n\n{{- $serviceAccountName := print $envAll.Release.Name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: {{ print $envAll.Release.Name \"-test\" }}\n  labels:\n{{ tuple $envAll \"horizon\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"horizon-test\" \"containerNames\" (list \"init\" \"horizon-test\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n{{ dict \"envAll\" $envAll \"application\" \"test\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 2 }}\n  restartPolicy: Never\n{{ tuple \"horizon_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 2 }}\n{{ tuple \"horizon_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 2 }}\n  serviceAccountName: {{ $serviceAccountName }}\n{{ if $envAll.Values.pod.tolerations.horizon.enabled }}\n{{ tuple $envAll \"horizon\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 2 }}\n{{ end }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n  initContainers:\n{{ tuple $envAll \"tests\" $mounts_tests_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n  containers:\n    - name: horizon-test\n{{ tuple $envAll \"test\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" \"container\" \"horizon_test\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      command:\n        - /tmp/selenium-test.py\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: HORIZON_URI\n          value: {{ tuple \"dashboard\" \"public\" \"web\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" }}\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: horizon-bin\n          mountPath: /tmp/selenium-test.py\n          subPath: selenium-test.py\n          readOnly: true\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" .Values.secrets.tls.dashboard.dashboard.internal | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 8 }}\n{{ if $mounts_tests.volumeMounts }}{{ toYaml $mounts_tests.volumeMounts | indent 8 }}{{ end }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: horizon-bin\n      configMap:\n        name: horizon-bin\n        defaultMode: 0555\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" .Values.secrets.tls.dashboard.dashboard.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 4 }}\n{{ if $mounts_tests.volumes }}{{ toYaml $mounts_tests.volumes | indent 4 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "horizon/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"horizon\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n{{- $connection := tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- if $envAll.Values.manifests.certificates }}\n  DB_CONNECTION: {{ (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | b64enc -}}\n{{- else }}\n  DB_CONNECTION: {{  $connection | b64enc -}}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "horizon/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{- include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendService\" \"dashboard\" \"backendServiceType\" \"dashboard\" ) }}\n{{- end }}\n"
  },
  {
    "path": "horizon/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- $secretName := index $envAll.Values.secrets.identity.admin }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" \"admin\" $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple \"admin\" \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n"
  },
  {
    "path": "horizon/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "horizon/templates/service-ingress.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress .Values.network.dashboard.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"dashboard\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "horizon/templates/service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"dashboard\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    {{ if .Values.network.node_port.enabled }}\n    - name: web\n      protocol: TCP\n      nodePort: {{ .Values.network.node_port.port }}\n      port: {{ tuple \"dashboard\" \"internal\" \"web\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n      targetPort: {{ tuple \"dashboard\" \"internal\" \"web\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ else }}\n    - name: web\n      protocol: TCP\n      port: {{ tuple \"dashboard\" \"internal\" \"web\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n      targetPort: {{ tuple \"dashboard\" \"internal\" \"web\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"horizon\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.node_port.enabled }}\n  type: NodePort\n  {{ if .Values.network.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{ end }}\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "horizon/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for horizon.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nimages:\n  tags:\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    horizon_db_sync: quay.io/airshipit/horizon:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    horizon: quay.io/airshipit/horizon:2025.1-ubuntu_noble\n    test: quay.io/airshipit/osh-selenium:latest-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: docker.io/docker:17.07.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\n# Use selenium v4 syntax\nselenium_v4: true\n\nrelease_group: null\n\nlabels:\n  dashboard:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nnetwork:\n  dashboard:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        nginx.ingress.kubernetes.io/proxy-body-size: \"0\"\n        haproxy.org/path-rewrite: /\n  external_policy_local: false\n  node_port:\n    enabled: false\n    port: 31000\n\nconf:\n  software:\n    apache2:\n      binary: apache2\n      start_parameters: -DFOREGROUND\n      site_dir: /etc/apache2/sites-available\n      conf_dir: /etc/apache2/conf-available\n      mods_dir: /etc/apache2/mods-available\n      a2enmod:\n        - headers\n        - rewrite\n      a2dismod:\n        - status\n  horizon:\n    branding:\n      # favicon must be a base64 encoded .ico string\n      # logo and logo_splash must be base64 encoded .svg string\n      logo:\n      logo_splash:\n      favicon:\n    apache: |\n      LogFormat \"%h %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" combined\n      LogFormat \"%{X-Forwarded-For}i %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" proxy\n\n      SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n      CustomLog /dev/stdout combined env=!forwarded\n      CustomLog /dev/stdout proxy env=forwarded\n\n      <VirtualHost *:{{ tuple \"dashboard\" \"internal\" \"web\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}>\n          WSGIApplicationGroup %{GLOBAL}\n          WSGIScriptReloading On\n          WSGIDaemonProcess horizon-http processes=5 threads=1 user=horizon group=horizon display-name=%{GROUP}\n          WSGIProcessGroup horizon-http\n          WSGIScriptAlias / /var/www/cgi-bin/horizon/django.wsgi\n          WSGIPassAuthorization On\n          RewriteEngine on\n          RewriteCond %{REQUEST_METHOD} !^(POST|PUT|GET|DELETE|PATCH)\n          RewriteRule .* - [F]\n\n          <Location \"/\">\n              Require all granted\n          </Location>\n\n          Alias /static /var/www/html/horizon\n          <Location \"/static\">\n              SetHandler static\n          </Location>\n\n          ErrorLogFormat \"%{cu}t %M\"\n          ErrorLog /dev/stdout\n          TransferLog /dev/stdout\n\n          SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n          CustomLog /dev/stdout combined env=!forwarded\n          CustomLog /dev/stdout proxy env=forwarded\n      </Virtualhost>\n    security: |\n      #\n      # Disable access to the entire file system except for the directories that\n      # are explicitly allowed later.\n      #\n      # This currently breaks the configurations that come with some web application\n      # Debian packages.\n      #\n      #<Directory />\n      #   AllowOverride None\n      #   Require all denied\n      #</Directory>\n\n      # Changing the following options will not really affect the security of the\n      # server, but might make attacks slightly more difficult in some cases.\n\n      #\n      # ServerTokens\n      # This directive configures what you return as the Server HTTP response\n      # Header. The default is 'Full' which sends information about the OS-Type\n      # and compiled in modules.\n      # Set to one of:  Full | OS | Minimal | Minor | Major | Prod\n      # where Full conveys the most information, and Prod the least.\n      ServerTokens Prod\n\n      #\n      # Optionally add a line containing the server version and virtual host\n      # name to server-generated pages (internal error documents, FTP directory\n      # listings, mod_status and mod_info output etc., but not CGI generated\n      # documents or custom error documents).\n      # Set to \"EMail\" to also include a mailto: link to the ServerAdmin.\n      # Set to one of:  On | Off | EMail\n      ServerSignature Off\n\n      #\n      # Allow TRACE method\n      #\n      # Set to \"extended\" to also reflect the request body (only for testing and\n      # diagnostic purposes).\n      #\n      # Set to one of:  On | Off | extended\n      TraceEnable Off\n\n      #\n      # Forbid access to version control directories\n      #\n      # If you use version control systems in your document root, you should\n      # probably deny access to their directories. For example, for subversion:\n      #\n      #<DirectoryMatch \"/\\.svn\">\n      #   Require all denied\n      #</DirectoryMatch>\n\n      #Security-Settings\n      # Setting this header will prevent MSIE from interpreting files as something\n      # else than declared by the content type in the HTTP headers.\n      # Requires mod_headers to be enabled.\n      #\n      Header set X-Content-Type-Options: \"nosniff\"\n      Header set X-Permitted-Cross-Domain-Policies: \"none\"\n      # Setting this header will prevent other sites from embedding pages from this\n      # site as frames. This defends against clickjacking attacks.\n      # Requires mod_headers to be enabled.\n      #\n    custom_panels: {}\n      ## For example, _5000_disable_project_vg_snapshots.py\n      # _5000_disable_project_vg_snapshots: |\n      #   PANEL = 'vg_snapshots'\n      #   PANEL_DASHBOARD = 'project'\n      #   PANEL_GROUP = 'volumes'\n      #   REMOVE_PANEL = True\n      ## https://docs.openstack.org/horizon/latest/configuration/pluggable_panels.html#id2\n    local_settings_d: {}\n    local_settings:\n      config:\n        # Use \"True\" and \"False\" as Titlecase strings with quotes, boolean\n        # values will not work\n        horizon_secret_key: 9aee62c0-5253-4a86-b189-e0fb71fa503c\n        debug: \"False\"\n        use_ssl: \"False\"\n        endpoint_type: \"internalURL\"\n        keystone_multidomain_support: \"True\"\n        keystone_multidomain_dropdown: \"False\"\n        keystone_domains:\n          Default: \"Default\"\n          example_domain: \"example\"\n        keystone_default_domain: Default\n        disable_password_reveal: \"True\"\n        show_openrc_file: \"True\"\n        csrf_cookie_secure: \"False\"\n        csrf_cookie_httponly: \"False\"\n        csrf_trusted_origins: []\n        enforce_password_check: \"True\"\n        # Set enable_pwd_validator to true to enforce password validator settings.\n        enable_pwd_validator: false\n        pwd_validator_regex: '(?=.*[a-zA-Z])(?=.*\\d).{8,}|(?=.*\\d)(?=.*\\W).{8,}|(?=.*\\W)(?=.*[a-zA-Z]).{8,}'\n        pwd_validator_help_text: '_(\"Your password must be at least eight (8) characters in length and must include characters from at least two (2) of these groupings: alpha, numeric, and special characters.\")'\n        session_cookie_secure: \"False\"\n        session_cookie_httponly: \"False\"\n        session_engine: 'django.contrib.sessions.backends.cache'\n        cache_backend: 'django.core.cache.backends.memcached.PyMemcacheCache'\n        secure_proxy_ssl_header: false\n        password_autocomplete: \"False\"\n        disallow_iframe_embed: \"False\"\n        ssl_no_verify: \"True\"\n        allowed_hosts:\n          - '*'\n        horizon_images_upload_mode: 'legacy'\n        openstack_cinder_features:\n          enable_backup: \"True\"\n        openstack_neutron_network:\n          enable_router: \"True\"\n          enable_quotas: \"True\"\n          enable_ipv6: \"True\"\n          enable_distributed_router: \"False\"\n          enable_ha_router: \"False\"\n          enable_lb: \"True\"\n          enable_firewall: \"True\"\n          enable_vpn: \"True\"\n          enable_fip_topology_check: \"True\"\n        openstack_enable_password_retrieve: \"False\"\n        auth:\n          sso:\n            enabled: False\n            initial_choice: \"credentials\"\n          idp_mapping:\n            - name: \"acme_oidc\"\n              label: \"Acme Corporation - OpenID Connect\"\n              idp: \"myidp1\"\n              protocol: \"oidc\"\n            - name: \"acme_saml2\"\n              label: \"Acme Corporation - SAML2\"\n              idp: \"myidp2\"\n              protocol: \"saml2\"\n        log_level: \"DEBUG\"\n        # Pass any settings to the end of local_settings.py\n        raw: {}\n        openstack_api_versions:\n          container_infra: \"1.10\"\n      template: |\n        import os\n\n        from django.utils.translation import gettext_lazy as _\n\n        from openstack_dashboard import exceptions\n\n        DEBUG = {{ .Values.conf.horizon.local_settings.config.debug }}\n        TEMPLATE_DEBUG = DEBUG\n\n        COMPRESS_OFFLINE = True\n        COMPRESS_CSS_HASHING_METHOD = \"hash\"\n\n        # WEBROOT is the location relative to Webserver root\n        # should end with a slash.\n        WEBROOT = '/'\n        # LOGIN_URL = WEBROOT + 'auth/login/'\n        # LOGOUT_URL = WEBROOT + 'auth/logout/'\n        #\n        # LOGIN_REDIRECT_URL can be used as an alternative for\n        # HORIZON_CONFIG.user_home, if user_home is not set.\n        # Do not set it to '/home/', as this will cause circular redirect loop\n        # LOGIN_REDIRECT_URL = WEBROOT\n\n        # Required for Django 1.5.\n        # If horizon is running in production (DEBUG is False), set this\n        # with the list of host/domain names that the application can serve.\n        # For more information see:\n        # https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts\n        ALLOWED_HOSTS = [{{ include \"helm-toolkit.utils.joinListWithCommaAndSingleQuotes\" .Values.conf.horizon.local_settings.config.allowed_hosts }},'%s' % (os.environ.get(\"MY_POD_IP\"))]\n\n        # Set SSL proxy settings:\n        # For Django 1.4+ pass this header from the proxy after terminating the SSL,\n        # and don't forget to strip it from the client's request.\n        # For more information see:\n        # https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header\n        #SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')\n        # https://docs.djangoproject.com/en/1.5/ref/settings/#secure-proxy-ssl-header\n        {{- if .Values.conf.horizon.local_settings.config.secure_proxy_ssl_header }}\n        SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n        {{- end }}\n\n        # If Horizon is being served through SSL, then uncomment the following two\n        # settings to better secure the cookies from security exploits\n        USE_SSL = {{ .Values.conf.horizon.local_settings.config.use_ssl }}\n        CSRF_COOKIE_SECURE = {{ .Values.conf.horizon.local_settings.config.csrf_cookie_secure }}\n        CSRF_COOKIE_HTTPONLY = {{ .Values.conf.horizon.local_settings.config.csrf_cookie_httponly }}\n        SESSION_COOKIE_SECURE = {{ .Values.conf.horizon.local_settings.config.session_cookie_secure }}\n\n        SESSION_COOKIE_HTTPONLY = {{ .Values.conf.horizon.local_settings.config.session_cookie_httponly }}\n\n        # https://docs.djangoproject.com/en/dev/ref/settings/#csrf-trusted-origins\n        CSRF_TRUSTED_ORIGINS = [{{ include \"helm-toolkit.utils.joinListWithCommaAndSingleQuotes\" .Values.conf.horizon.local_settings.config.csrf_trusted_origins }}]\n\n        # Overrides for OpenStack API versions. Use this setting to force the\n        # OpenStack dashboard to use a specific API version for a given service API.\n        # Versions specified here should be integers or floats, not strings.\n        # NOTE: The version should be formatted as it appears in the URL for the\n        # service API. For example, The identity service APIs have inconsistent\n        # use of the decimal point, so valid options would be 2.0 or 3.\n        #OPENSTACK_API_VERSIONS = {\n        #    \"data-processing\": 1.1,\n        #    \"identity\": 3,\n        #    \"volume\": 2,\n        #}\n\n        OPENSTACK_API_VERSIONS = {\n            \"identity\": 3,\n            \"container-infra\": \"{{ .Values.conf.horizon.local_settings.config.openstack_api_versions.container_infra }}\"\n        }\n\n        # Set this to True if running on multi-domain model. When this is enabled, it\n        # will require user to enter the Domain name in addition to username for login.\n        OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = {{ .Values.conf.horizon.local_settings.config.keystone_multidomain_support }}\n        OPENSTACK_KEYSTONE_DOMAIN_DROPDOWN = {{ .Values.conf.horizon.local_settings.config.keystone_multidomain_dropdown }}\n        {{- $multiDomainDropdown := eq (lower .Values.conf.horizon.local_settings.config.keystone_multidomain_dropdown) \"true\" }}\n        {{- if $multiDomainDropdown }}\n        OPENSTACK_KEYSTONE_DOMAIN_CHOICES = (\n        {{- range $key, $label := .Values.conf.horizon.local_settings.config.keystone_domains }}\n            ('{{ $key }}', _('{{ $label }}')),\n        {{- end }}\n        )\n        {{- end }}\n\n        # Overrides the default domain used when running on single-domain model\n        # with Keystone V3. All entities will be created in the default domain.\n        OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = '{{ .Values.conf.horizon.local_settings.config.keystone_default_domain }}'\n\n        # Set Console type:\n        # valid options are \"AUTO\"(default), \"VNC\", \"SPICE\", \"RDP\", \"SERIAL\" or None\n        # Set to None explicitly if you want to deactivate the console.\n        #CONSOLE_TYPE = \"AUTO\"\n\n        # Default OpenStack Dashboard configuration.\n        HORIZON_CONFIG = {\n            'user_home': 'openstack_dashboard.views.get_user_home',\n            'ajax_queue_limit': 10,\n            'auto_fade_alerts': {\n                'delay': 3000,\n                'fade_duration': 1500,\n                'types': ['alert-success', 'alert-info']\n            },\n            'help_url': \"http://docs.openstack.org\",\n            'exceptions': {'recoverable': exceptions.RECOVERABLE,\n                           'not_found': exceptions.NOT_FOUND,\n                           'unauthorized': exceptions.UNAUTHORIZED},\n            'modal_backdrop': 'static',\n            'angular_modules': [],\n            'js_files': [],\n            'js_spec_files': [],\n        }\n\n        {{- if .Values.conf.horizon.local_settings.config.enable_pwd_validator }}\n        # Specify a regular expression to validate user passwords.\n        HORIZON_CONFIG[\"password_validator\"] = {\n            \"regex\": '{{ .Values.conf.horizon.local_settings.config.pwd_validator_regex }}',\n            \"help_text\": {{ .Values.conf.horizon.local_settings.config.pwd_validator_help_text }},\n        }\n        {{- end }}\n\n        # Disable simplified floating IP address management for deployments with\n        # multiple floating IP pools or complex network requirements.\n        #HORIZON_CONFIG[\"simple_ip_management\"] = False\n\n        # Turn off browser autocompletion for forms including the login form and\n        # the database creation workflow if so desired.\n        HORIZON_CONFIG[\"password_autocomplete\"] = '{{ .Values.conf.horizon.local_settings.config.password_autocomplete }}'\n\n        # Setting this to True will disable the reveal button for password fields,\n        # including on the login form.\n        HORIZON_CONFIG[\"disable_password_reveal\"] = {{ .Values.conf.horizon.local_settings.config.disable_password_reveal }}\n\n        LOCAL_PATH = '/tmp'\n\n        # Set custom secret key:\n        # You can either set it to a specific value or you can let horizon generate a\n        # default secret key that is unique on this machine, e.i. regardless of the\n        # amount of Python WSGI workers (if used behind Apache+mod_wsgi): However,\n        # there may be situations where you would want to set this explicitly, e.g.\n        # when multiple dashboard instances are distributed on different machines\n        # (usually behind a load-balancer). Either you have to make sure that a session\n        # gets all requests routed to the same dashboard instance or you set the same\n        # SECRET_KEY for all of them.\n        SECRET_KEY='{{ .Values.conf.horizon.local_settings.config.horizon_secret_key }}'\n\n        CACHES = {\n            'default': {\n                'BACKEND': '{{ .Values.conf.horizon.local_settings.config.cache_backend }}',\n                'LOCATION': '{{ tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}',\n            }\n        }\n        DATABASES = {\n            'default': {\n                # Database configuration here\n                'ENGINE': 'django.db.backends.mysql',\n                'NAME': '{{ .Values.endpoints.oslo_db.path | base }}',\n                'USER': '{{ .Values.endpoints.oslo_db.auth.horizon.username }}',\n                'PASSWORD': '{{ .Values.endpoints.oslo_db.auth.horizon.password }}',\n                'HOST': '{{ tuple \"oslo_db\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}',\n                'default-character-set': 'utf8',\n                {{- if .Values.manifests.certificates }}\n                'OPTIONS':{\n                    'ssl': {\n                        'ca': '/etc/mysql/certs/ca.crt',\n                        'cert': '/etc/mysql/certs/tls.crt',\n                        'key': '/etc/mysql/certs/tls.key'\n                    }\n                },\n                {{- end }}\n                'PORT': '{{ tuple \"oslo_db\" \"internal\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}'\n            }\n        }\n        SESSION_ENGINE = '{{ .Values.conf.horizon.local_settings.config.session_engine }}'\n\n        # Send email to the console by default\n        EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n        # Or send them to /dev/null\n        #EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'\n\n        # Configure these for your outgoing email host\n        #EMAIL_HOST = 'smtp.my-company.com'\n        #EMAIL_PORT = 25\\\\\n        #EMAIL_HOST_USER = 'djangomail'\n        #EMAIL_HOST_PASSWORD = 'top-secret!'\n\n        # For multiple regions uncomment this configuration, and add (endpoint, title).\n        #AVAILABLE_REGIONS = [\n        #    ('http://cluster1.example.com:5000/v2.0', 'cluster1'),\n        #    ('http://cluster2.example.com:5000/v2.0', 'cluster2'),\n        #]\n\n        OPENSTACK_KEYSTONE_URL = \"{{ tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" }}\"\n        OPENSTACK_KEYSTONE_DEFAULT_ROLE = \"member\"\n\n        # This setting specifies the name of the header with remote IP address. If not present,\n        # then REMOTE_ADDR header is used. The commom value for this setting is HTTP_X_REAL_IP\n        # or HTTP_X_FORWARDED_FORx\n        SECURE_PROXY_ADDR_HEADER = 'HTTP_X_FORWARDED_FOR'\n\n        {{- if .Values.conf.horizon.local_settings.config.auth.sso.enabled }}\n        # Enables keystone web single-sign-on if set to True.\n        WEBSSO_ENABLED = True\n\n        # Determines which authentication choice to show as default.\n        WEBSSO_INITIAL_CHOICE = \"{{ .Values.conf.horizon.local_settings.config.auth.sso.initial_choice }}\"\n\n        {{- if .Values.conf.horizon.local_settings.config.auth.sso.websso_keystone_url }}\n        # The full auth URL for the Keystone endpoint used for web single-sign-on authentication.\n        WEBSSO_KEYSTONE_URL = \"{{ .Values.conf.horizon.local_settings.config.auth.sso.websso_keystone_url }}\"\n        {{- end }}\n\n        # The list of authentication mechanisms\n        # which include keystone federation protocols.\n        # Current supported protocol IDs are 'saml2' and 'oidc'\n        # which represent SAML 2.0, OpenID Connect respectively.\n        # Do not remove the mandatory credentials mechanism.\n        WEBSSO_CHOICES = (\n            (\"credentials\", _(\"Keystone Credentials\")),\n          {{- range $i, $sso := .Values.conf.horizon.local_settings.config.auth.idp_mapping }}\n            ({{ $sso.name | quote }}, {{ $sso.label | quote }}),\n          {{- end }}\n        )\n\n        WEBSSO_IDP_MAPPING = {\n          {{- range $i, $sso := .Values.conf.horizon.local_settings.config.auth.idp_mapping }}\n            {{ $sso.name | quote}}: ({{ $sso.idp | quote }}, {{ $sso.protocol | quote }}),\n          {{- end }}\n        }\n\n        {{- end }}\n\n        # Disable SSL certificate checks (useful for self-signed certificates):\n        OPENSTACK_SSL_NO_VERIFY = {{ .Values.conf.horizon.local_settings.config.ssl_no_verify }}\n\n        {{- if .Values.manifests.certificates }}\n        # The CA certificate to use to verify SSL connections\n        OPENSTACK_SSL_CACERT = '/etc/openstack-dashboard/certs/ca.crt'\n        {{- end }}\n\n        # The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the\n        # capabilities of the auth backend for Keystone.\n        # If Keystone has been configured to use LDAP as the auth backend then set\n        # can_edit_user to False and name to 'ldap'.\n        #\n        # TODO(tres): Remove these once Keystone has an API to identify auth backend.\n        OPENSTACK_KEYSTONE_BACKEND = {\n            'name': 'native',\n            'can_edit_user': True,\n            'can_edit_group': True,\n            'can_edit_project': True,\n            'can_edit_domain': True,\n            'can_edit_role': True,\n        }\n\n        # Setting this to True, will add a new \"Retrieve Password\" action on instance,\n        # allowing Admin session password retrieval/decryption.\n        OPENSTACK_ENABLE_PASSWORD_RETRIEVE = {{ .Values.conf.horizon.local_settings.config.openstack_enable_password_retrieve }}\n        # Controls whether the keystone openrc file is accessible from the user menu and the api access panel.\n        SHOW_OPENRC_FILE = {{ .Values.conf.horizon.local_settings.config.show_openrc_file }}\n        # The Launch Instance user experience has been significantly enhanced.\n        # You can choose whether to enable the new launch instance experience,\n        # the legacy experience, or both. The legacy experience will be removed\n        # in a future release, but is available as a temporary backup setting to ensure\n        # compatibility with existing deployments. Further development will not be\n        # done on the legacy experience. Please report any problems with the new\n        # experience via the StoryBoard tracking system.\n        #\n        # Toggle LAUNCH_INSTANCE_LEGACY_ENABLED and LAUNCH_INSTANCE_NG_ENABLED to\n        # determine the experience to enable.  Set them both to true to enable\n        # both.\n        #LAUNCH_INSTANCE_LEGACY_ENABLED = True\n        #LAUNCH_INSTANCE_NG_ENABLED = False\n\n        # The Xen Hypervisor has the ability to set the mount point for volumes\n        # attached to instances (other Hypervisors currently do not). Setting\n        # can_set_mount_point to True will add the option to set the mount point\n        # from the UI.\n        OPENSTACK_HYPERVISOR_FEATURES = {\n            'can_set_mount_point': False,\n            'can_set_password': False,\n        }\n\n        # The OPENSTACK_CINDER_FEATURES settings can be used to enable optional\n        # services provided by cinder that is not exposed by its extension API.\n        OPENSTACK_CINDER_FEATURES = {\n            'enable_backup': {{ .Values.conf.horizon.local_settings.config.openstack_cinder_features.enable_backup }},\n        }\n\n        # The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional\n        # services provided by neutron. Options currently available are load\n        # balancer service, security groups, quotas, VPN service.\n        OPENSTACK_NEUTRON_NETWORK = {\n            'enable_router': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_router }},\n            'enable_quotas': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_quotas }},\n            'enable_ipv6': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_ipv6 }},\n            'enable_distributed_router': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_distributed_router }},\n            'enable_ha_router': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_ha_router }},\n            'enable_lb': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_lb }},\n            'enable_firewall': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_firewall }},\n            'enable_vpn': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_vpn }},\n            'enable_fip_topology_check': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_fip_topology_check }},\n\n            # The profile_support option is used to detect if an external router can be\n            # configured via the dashboard. When using specific plugins the\n            # profile_support can be turned on if needed.\n            'profile_support': None,\n            #'profile_support': 'cisco',\n\n            # Set which provider network types are supported. Only the network types\n            # in this list will be available to choose from when creating a network.\n            # Network types include local, flat, vlan, gre, and vxlan.\n            'supported_provider_types': ['*'],\n\n            # Set which VNIC types are supported for port binding. Only the VNIC\n            # types in this list will be available to choose from when creating a\n            # port.\n            # VNIC types include 'normal', 'macvtap' and 'direct'.\n            'supported_vnic_types': ['*']\n        }\n\n        # The OPENSTACK_IMAGE_BACKEND settings can be used to customize features\n        # in the OpenStack Dashboard related to the Image service, such as the list\n        # of supported image formats.\n        #OPENSTACK_IMAGE_BACKEND = {\n        #    'image_formats': [\n        #        ('', _('Select format')),\n        #        ('aki', _('AKI - Amazon Kernel Image')),\n        #        ('ami', _('AMI - Amazon Machine Image')),\n        #        ('ari', _('ARI - Amazon Ramdisk Image')),\n        #        ('docker', _('Docker')),\n        #        ('iso', _('ISO - Optical Disk Image')),\n        #        ('ova', _('OVA - Open Virtual Appliance')),\n        #        ('qcow2', _('QCOW2 - QEMU Emulator')),\n        #        ('raw', _('Raw')),\n        #        ('vdi', _('VDI - Virtual Disk Image')),\n        #        ('vhd', ('VHD - Virtual Hard Disk')),\n        #        ('vmdk', _('VMDK - Virtual Machine Disk')),\n        #    ]\n        #}\n\n        # The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for\n        # image custom property attributes that appear on image detail pages.\n        IMAGE_CUSTOM_PROPERTY_TITLES = {\n            \"architecture\": _(\"Architecture\"),\n            \"kernel_id\": _(\"Kernel ID\"),\n            \"ramdisk_id\": _(\"Ramdisk ID\"),\n            \"image_state\": _(\"Euca2ools state\"),\n            \"project_id\": _(\"Project ID\"),\n            \"image_type\": _(\"Image Type\"),\n        }\n\n        # The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image\n        # custom properties should not be displayed in the Image Custom Properties\n        # table.\n        IMAGE_RESERVED_CUSTOM_PROPERTIES = []\n\n        # Set to 'legacy' or 'direct' to allow users to upload images to glance via\n        # Horizon server. When enabled, a file form field will appear on the create\n        # image form. If set to 'off', there will be no file form field on the create\n        # image form. See documentation for deployment considerations.\n        HORIZON_IMAGES_UPLOAD_MODE = '{{ .Values.conf.horizon.local_settings.config.horizon_images_upload_mode }}'\n\n        # OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints\n        # in the Keystone service catalog. Use this setting when Horizon is running\n        # external to the OpenStack environment. The default is 'publicURL'.\n        OPENSTACK_ENDPOINT_TYPE = \"{{ .Values.conf.horizon.local_settings.config.endpoint_type }}\"\n\n        # SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the\n        # case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints\n        # in the Keystone service catalog. Use this setting when Horizon is running\n        # external to the OpenStack environment. The default is None.  This\n        # value should differ from OPENSTACK_ENDPOINT_TYPE if used.\n        SECONDARY_ENDPOINT_TYPE = \"publicURL\"\n\n        # The number of objects (Swift containers/objects or images) to display\n        # on a single page before providing a paging element (a \"more\" link)\n        # to paginate results.\n        API_RESULT_LIMIT = 1000\n        API_RESULT_PAGE_SIZE = 20\n\n        # The size of chunk in bytes for downloading objects from Swift\n        SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024\n\n        # Specify a maximum number of items to display in a dropdown.\n        DROPDOWN_MAX_ITEMS = 30\n\n        # The timezone of the server. This should correspond with the timezone\n        # of your entire OpenStack installation, and hopefully be in UTC.\n        TIME_ZONE = \"UTC\"\n\n        # When launching an instance, the menu of available flavors is\n        # sorted by RAM usage, ascending. If you would like a different sort order,\n        # you can provide another flavor attribute as sorting key. Alternatively, you\n        # can provide a custom callback method to use for sorting. You can also provide\n        # a flag for reverse sort. For more info, see\n        # http://docs.python.org/2/library/functions.html#sorted\n        #CREATE_INSTANCE_FLAVOR_SORT = {\n        #    'key': 'name',\n        #     # or\n        #    'key': my_awesome_callback_method,\n        #    'reverse': False,\n        #}\n\n        # Set this to True to display an 'Admin Password' field on the Change Password\n        # form to verify that it is indeed the admin logged-in who wants to change\n        # the password.\n        ENFORCE_PASSWORD_CHECK = {{ .Values.conf.horizon.local_settings.config.enforce_password_check }}\n\n        # Modules that provide /auth routes that can be used to handle different types\n        # of user authentication. Add auth plugins that require extra route handling to\n        # this list.\n        #AUTHENTICATION_URLS = [\n        #    'openstack_auth.urls',\n        #]\n\n        # The Horizon Policy Enforcement engine uses these values to load per service\n        # policy rule files. The content of these files should match the files the\n        # OpenStack services are using to determine role based access control in the\n        # target installation.\n\n        # Path to directory containing policy.yaml files\n        POLICY_FILES_PATH = '/etc/openstack-dashboard'\n        # Map of local copy of service policy files\n        #POLICY_FILES = {\n        #    'identity': 'keystone_policy.yaml',\n        #    'compute': 'nova_policy.yaml',\n        #    'volume': 'cinder_policy.yaml',\n        #    'image': 'glance_policy.yaml',\n        #    'orchestration': 'heat_policy.yaml',\n        #    'network': 'neutron_policy.yaml',\n        #}\n\n        # Trove user and database extension support. By default support for\n        # creating users and databases on database instances is turned on.\n        # To disable these extensions set the permission here to something\n        # unusable such as [\"!\"].\n        # TROVE_ADD_USER_PERMS = []\n        # TROVE_ADD_DATABASE_PERMS = []\n\n        # Change this patch to the appropriate static directory containing\n        # two files: _variables.scss and _styles.scss\n        #CUSTOM_THEME_PATH = 'static/themes/default'\n\n        LOGGING = {\n            'version': 1,\n            # When set to True this will disable all logging except\n            # for loggers specified in this configuration dictionary. Note that\n            # if nothing is specified here and disable_existing_loggers is True,\n            # django.db.backends will still log unless it is disabled explicitly.\n            'disable_existing_loggers': False,\n            'handlers': {\n                'null': {\n                    'level': 'DEBUG',\n                    'class': 'logging.NullHandler',\n                },\n                'console': {\n                    # Set the level to \"DEBUG\" for verbose output logging.\n                    'level': 'INFO',\n                    'class': 'logging.StreamHandler',\n                },\n            },\n            'loggers': {\n                # Logging from django.db.backends is VERY verbose, send to null\n                # by default.\n                'django.db.backends': {\n                    'handlers': ['null'],\n                    'propagate': False,\n                },\n                'requests': {\n                    'handlers': ['null'],\n                    'propagate': False,\n                },\n                'horizon': {\n                    'handlers': ['console'],\n                    'level': '{{ .Values.conf.horizon.local_settings.config.log_level }}',\n                    'propagate': False,\n                },\n                'openstack_dashboard': {\n                    'handlers': ['console'],\n                    'level': '{{ .Values.conf.horizon.local_settings.config.log_level }}',\n                    'propagate': False,\n                },\n                'novaclient': {\n                    'handlers': ['console'],\n                    'level': '{{ .Values.conf.horizon.local_settings.config.log_level }}',\n                    'propagate': False,\n                },\n                'cinderclient': {\n                    'handlers': ['console'],\n                    'level': '{{ .Values.conf.horizon.local_settings.config.log_level }}',\n                    'propagate': False,\n                },\n                'glanceclient': {\n                    'handlers': ['console'],\n                    'level': '{{ .Values.conf.horizon.local_settings.config.log_level }}',\n                    'propagate': False,\n                },\n                'neutronclient': {\n                    'handlers': ['console'],\n                    'level': '{{ .Values.conf.horizon.local_settings.config.log_level }}',\n                    'propagate': False,\n                },\n                'heatclient': {\n                    'handlers': ['console'],\n                    'level': '{{ .Values.conf.horizon.local_settings.config.log_level }}',\n                    'propagate': False,\n                },\n                'troveclient': {\n                    'handlers': ['console'],\n                    'level': '{{ .Values.conf.horizon.local_settings.config.log_level }}',\n                    'propagate': False,\n                },\n                'swiftclient': {\n                    'handlers': ['console'],\n                    'level': '{{ .Values.conf.horizon.local_settings.config.log_level }}',\n                    'propagate': False,\n                },\n                'openstack_auth': {\n                    'handlers': ['console'],\n                    'level': '{{ .Values.conf.horizon.local_settings.config.log_level }}',\n                    'propagate': False,\n                },\n                'nose.plugins.manager': {\n                    'handlers': ['console'],\n                    'level': '{{ .Values.conf.horizon.local_settings.config.log_level }}',\n                    'propagate': False,\n                },\n                'django': {\n                    'handlers': ['console'],\n                    'level': '{{ .Values.conf.horizon.local_settings.config.log_level }}',\n                    'propagate': False,\n                },\n                'iso8601': {\n                    'handlers': ['null'],\n                    'propagate': False,\n                },\n                'scss': {\n                    'handlers': ['null'],\n                    'propagate': False,\n                },\n            }\n        }\n\n        # 'direction' should not be specified for all_tcp/udp/icmp.\n        # It is specified in the form.\n        SECURITY_GROUP_RULES = {\n            'all_tcp': {\n                'name': _('All TCP'),\n                'ip_protocol': 'tcp',\n                'from_port': '1',\n                'to_port': '65535',\n            },\n            'all_udp': {\n                'name': _('All UDP'),\n                'ip_protocol': 'udp',\n                'from_port': '1',\n                'to_port': '65535',\n            },\n            'all_icmp': {\n                'name': _('All ICMP'),\n                'ip_protocol': 'icmp',\n                'from_port': '-1',\n                'to_port': '-1',\n            },\n            'ssh': {\n                'name': 'SSH',\n                'ip_protocol': 'tcp',\n                'from_port': '22',\n                'to_port': '22',\n            },\n            'smtp': {\n                'name': 'SMTP',\n                'ip_protocol': 'tcp',\n                'from_port': '25',\n                'to_port': '25',\n            },\n            'dns': {\n                'name': 'DNS',\n                'ip_protocol': 'tcp',\n                'from_port': '53',\n                'to_port': '53',\n            },\n            'http': {\n                'name': 'HTTP',\n                'ip_protocol': 'tcp',\n                'from_port': '80',\n                'to_port': '80',\n            },\n            'pop3': {\n                'name': 'POP3',\n                'ip_protocol': 'tcp',\n                'from_port': '110',\n                'to_port': '110',\n            },\n            'imap': {\n                'name': 'IMAP',\n                'ip_protocol': 'tcp',\n                'from_port': '143',\n                'to_port': '143',\n            },\n            'ldap': {\n                'name': 'LDAP',\n                'ip_protocol': 'tcp',\n                'from_port': '389',\n                'to_port': '389',\n            },\n            'https': {\n                'name': 'HTTPS',\n                'ip_protocol': 'tcp',\n                'from_port': '443',\n                'to_port': '443',\n            },\n            'smtps': {\n                'name': 'SMTPS',\n                'ip_protocol': 'tcp',\n                'from_port': '465',\n                'to_port': '465',\n            },\n            'imaps': {\n                'name': 'IMAPS',\n                'ip_protocol': 'tcp',\n                'from_port': '993',\n                'to_port': '993',\n            },\n            'pop3s': {\n                'name': 'POP3S',\n                'ip_protocol': 'tcp',\n                'from_port': '995',\n                'to_port': '995',\n            },\n            'ms_sql': {\n                'name': 'MS SQL',\n                'ip_protocol': 'tcp',\n                'from_port': '1433',\n                'to_port': '1433',\n            },\n            'mysql': {\n                'name': 'MYSQL',\n                'ip_protocol': 'tcp',\n                'from_port': '3306',\n                'to_port': '3306',\n            },\n            'rdp': {\n                'name': 'RDP',\n                'ip_protocol': 'tcp',\n                'from_port': '3389',\n                'to_port': '3389',\n            },\n        }\n\n        # Deprecation Notice:\n        #\n        # The setting FLAVOR_EXTRA_KEYS has been deprecated.\n        # Please load extra spec metadata into the Glance Metadata Definition Catalog.\n        #\n        # The sample quota definitions can be found in:\n        # <glance_source>/etc/metadefs/compute-quota.json\n        #\n        # The metadata definition catalog supports CLI and API:\n        #  $glance --os-image-api-version 2 help md-namespace-import\n        #  $glance-manage db_load_metadefs <directory_with_definition_files>\n        #\n        # See Metadata Definitions on: https://docs.openstack.org/glance/latest/\n\n        # Indicate to the Sahara data processing service whether or not\n        # automatic floating IP allocation is in effect.  If it is not\n        # in effect, the user will be prompted to choose a floating IP\n        # pool for use in their cluster.  False by default.  You would want\n        # to set this to True if you were running Nova Networking with\n        # auto_assign_floating_ip = True.\n        #SAHARA_AUTO_IP_ALLOCATION_ENABLED = False\n\n        # The hash algorithm to use for authentication tokens. This must\n        # match the hash algorithm that the identity server and the\n        # auth_token middleware are using. Allowed values are the\n        # algorithms supported by Python's hashlib library.\n        #OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5'\n\n        # AngularJS requires some settings to be made available to\n        # the client side. Some settings are required by in-tree / built-in horizon\n        # features. These settings must be added to REST_API_REQUIRED_SETTINGS in the\n        # form of ['SETTING_1','SETTING_2'], etc.\n        #\n        # You may remove settings from this list for security purposes, but do so at\n        # the risk of breaking a built-in horizon feature. These settings are required\n        # for horizon to function properly. Only remove them if you know what you\n        # are doing. These settings may in the future be moved to be defined within\n        # the enabled panel configuration.\n        # You should not add settings to this list for out of tree extensions.\n        # See: https://wiki.openstack.org/wiki/Horizon/RESTAPI\n        REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES',\n                                      'LAUNCH_INSTANCE_DEFAULTS',\n                                      'OPENSTACK_IMAGE_FORMATS']\n\n        # Additional settings can be made available to the client side for\n        # extensibility by specifying them in REST_API_ADDITIONAL_SETTINGS\n        # !! Please use extreme caution as the settings are transferred via HTTP/S\n        # and are not encrypted on the browser. This is an experimental API and\n        # may be deprecated in the future without notice.\n        #REST_API_ADDITIONAL_SETTINGS = []\n\n        # DISALLOW_IFRAME_EMBED can be used to prevent Horizon from being embedded\n        # within an iframe. Legacy browsers are still vulnerable to a Cross-Frame\n        # Scripting (XFS) vulnerability, so this option allows extra security hardening\n        # where iframes are not used in deployment. Default setting is True.\n        # For more information see:\n        # http://tinyurl.com/anticlickjack\n        DISALLOW_IFRAME_EMBED = {{ .Values.conf.horizon.local_settings.config.disallow_iframe_embed }}\n\n        STATIC_ROOT = '/var/www/html/horizon'\n\n        {{- range $option, $value := .Values.conf.horizon.local_settings.config.raw }}\n        {{ $option }} = {{ toJson $value }}\n        {{- end }}\n    policy:\n      ceilometer:\n        context_is_admin: 'role:admin'\n        context_is_owner: 'user_id:%(target.user_id)s'\n        context_is_project: 'project_id:%(target.project_id)s'\n        segregation: 'rule:context_is_admin'\n      heat:\n        'actions:action': 'rule:deny_stack_user'\n        'build_info:build_info': 'rule:deny_stack_user'\n        'cloudformation:CancelUpdateStack': 'rule:deny_stack_user'\n        'cloudformation:CreateStack': 'rule:deny_stack_user'\n        'cloudformation:DeleteStack': 'rule:deny_stack_user'\n        'cloudformation:DescribeStackEvents': 'rule:deny_stack_user'\n        'cloudformation:DescribeStackResource': ''\n        'cloudformation:DescribeStackResources': 'rule:deny_stack_user'\n        'cloudformation:DescribeStacks': 'rule:deny_stack_user'\n        'cloudformation:EstimateTemplateCost': 'rule:deny_stack_user'\n        'cloudformation:GetTemplate': 'rule:deny_stack_user'\n        'cloudformation:ListStackResources': 'rule:deny_stack_user'\n        'cloudformation:ListStacks': 'rule:deny_stack_user'\n        'cloudformation:UpdateStack': 'rule:deny_stack_user'\n        'cloudformation:ValidateTemplate': 'rule:deny_stack_user'\n        context_is_admin: 'role:admin'\n        deny_everybody: '!'\n        deny_stack_user: 'not role:heat_stack_user'\n        'events:index': 'rule:deny_stack_user'\n        'events:show': 'rule:deny_stack_user'\n        'resource:index': 'rule:deny_stack_user'\n        'resource:mark_unhealthy': 'rule:deny_stack_user'\n        'resource:metadata': ''\n        'resource:show': 'rule:deny_stack_user'\n        'resource:signal': ''\n        'resource_types:OS::Cinder::EncryptedVolumeType': 'rule:context_is_admin'\n        'resource_types:OS::Cinder::VolumeType': 'rule:context_is_admin'\n        'resource_types:OS::Manila::ShareType': 'rule:context_is_admin'\n        'resource_types:OS::Neutron::QoSBandwidthLimitRule': 'rule:context_is_admin'\n        'resource_types:OS::Neutron::QoSPolicy': 'rule:context_is_admin'\n        'resource_types:OS::Nova::Flavor': 'rule:context_is_admin'\n        'resource_types:OS::Nova::HostAggregate': 'rule:context_is_admin'\n        'service:index': 'rule:context_is_admin'\n        'software_configs:create': 'rule:deny_stack_user'\n        'software_configs:delete': 'rule:deny_stack_user'\n        'software_configs:global_index': 'rule:deny_everybody'\n        'software_configs:index': 'rule:deny_stack_user'\n        'software_configs:show': 'rule:deny_stack_user'\n        'software_deployments:create': 'rule:deny_stack_user'\n        'software_deployments:delete': 'rule:deny_stack_user'\n        'software_deployments:index': 'rule:deny_stack_user'\n        'software_deployments:metadata': ''\n        'software_deployments:show': 'rule:deny_stack_user'\n        'software_deployments:update': 'rule:deny_stack_user'\n        'stacks:abandon': 'rule:deny_stack_user'\n        'stacks:create': 'rule:deny_stack_user'\n        'stacks:delete': 'rule:deny_stack_user'\n        'stacks:delete_snapshot': 'rule:deny_stack_user'\n        'stacks:detail': 'rule:deny_stack_user'\n        'stacks:environment': 'rule:deny_stack_user'\n        'stacks:export': 'rule:deny_stack_user'\n        'stacks:generate_template': 'rule:deny_stack_user'\n        'stacks:global_index': 'rule:deny_everybody'\n        'stacks:index': 'rule:deny_stack_user'\n        'stacks:list_outputs': 'rule:deny_stack_user'\n        'stacks:list_resource_types': 'rule:deny_stack_user'\n        'stacks:list_snapshots': 'rule:deny_stack_user'\n        'stacks:list_template_functions': 'rule:deny_stack_user'\n        'stacks:list_template_versions': 'rule:deny_stack_user'\n        'stacks:lookup': ''\n        'stacks:preview': 'rule:deny_stack_user'\n        'stacks:preview_update': 'rule:deny_stack_user'\n        'stacks:preview_update_patch': 'rule:deny_stack_user'\n        'stacks:resource_schema': 'rule:deny_stack_user'\n        'stacks:restore_snapshot': 'rule:deny_stack_user'\n        'stacks:show': 'rule:deny_stack_user'\n        'stacks:show_output': 'rule:deny_stack_user'\n        'stacks:show_snapshot': 'rule:deny_stack_user'\n        'stacks:snapshot': 'rule:deny_stack_user'\n        'stacks:template': 'rule:deny_stack_user'\n        'stacks:update': 'rule:deny_stack_user'\n        'stacks:update_patch': 'rule:deny_stack_user'\n        'stacks:validate_template': 'rule:deny_stack_user'\n    # list of panels to enable for horizon\n    # this requires that the panels are already installed in the horizon image, if they are not\n    # nothing will be added\n    # the name of the panel should be the name of the dir where the panel is installed\n    # for example heat_dashboard, cloudkittydashboard or neutron_taas_dashboard\n    extra_panels:\n      - heat_dashboard\n      - neutron_taas_dashboard\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - horizon-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    dashboard:\n      jobs:\n        - horizon-db-sync\n      services:\n        - endpoint: internal\n          service: oslo_cache\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n    db_drop:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - horizon-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    tests:\n      services:\n        - endpoint: internal\n          service: dashboard\n\npod:\n  security_context:\n    horizon:\n      pod:\n        runAsUser: 42424\n      container:\n        horizon:\n          readOnlyRootFilesystem: false\n          allowPrivilegeEscalation: false\n          runAsUser: 0\n    db_sync:\n      pod:\n        runAsUser: 42424\n      container:\n        horizon_db_sync:\n          readOnlyRootFilesystem: false\n          allowPrivilegeEscalation: false\n          runAsUser: 0\n    test:\n      pod:\n        runAsUser: 42424\n      container:\n        horizon_test:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    horizon:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n        effect: NoSchedule\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n        effect: NoSchedule\n  mounts:\n    horizon_db_init:\n      init_container: null\n      horizon_db_init:\n        volumeMounts:\n        volumes:\n    horizon_db_sync:\n      init_container: null\n      horizon_db_sync:\n        volumeMounts:\n        volumes:\n    horizon:\n      init_container: null\n      horizon:\n        volumeMounts:\n        volumes:\n    horizon_tests:\n      init_container: null\n      horizon_tests:\n        volumeMounts:\n        volumes:\n  replicas:\n    server: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    disruption_budget:\n      horizon:\n        min_available: 0\n    termination_grace_period:\n      horizon:\n        timeout: 30\n  resources:\n    enabled: false\n    server:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\n# Names of secrets used by bootstrap and environmental checks\nsecrets:\n  identity:\n    admin: horizon-keystone-admin\n  oslo_db:\n    admin: horizon-db-admin\n    horizon: horizon-db-user\n  tls:\n    dashboard:\n      dashboard:\n        public: horizon-tls-public\n        internal: horizon-tls-web\n  oci_image_registry:\n    horizon: horizon-oci-image-registry\n\ntls:\n  identity: false\n\n# typically overridden by environmental\n# values, but should include all endpoints\n# required by this chart\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      horizon:\n        username: horizon\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  oslo_cache:\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  dashboard:\n    name: horizon\n    hosts:\n      default: horizon-int\n      public: horizon\n    host_fqdn_override:\n      default: null\n      # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public\n      # endpoints using the following format:\n      # public:\n      #   host: null\n      #   tls:\n      #     crt: null\n      #     key: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      web:\n        default: 80\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n        secret:\n          tls:\n            internal: mariadb-tls-direct\n      horizon:\n        username: horizon\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /horizon\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress\n  # They are using to enable the Egress K8s network policy.\n  kube_dns:\n    namespace: kube-system\n    name: kubernetes-dns\n    hosts:\n      default: kube-dns\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: http\n    port:\n      dns:\n        default: 53\n        protocol: UDP\n  ingress:\n    namespace: null\n    name: ingress\n    hosts:\n      default: ingress\n    port:\n      ingress:\n        default: 80\n\nnetwork_policy:\n  horizon:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nmanifests:\n  certificates: false\n  configmap_bin: true\n  configmap_etc: true\n  configmap_logo: false\n  deployment: true\n  ingress_api: true\n  job_db_init: true\n  job_db_sync: true\n  job_db_drop: false\n  job_image_repo_sync: true\n  pdb: true\n  pod_helm_test: true\n  network_policy: false\n  secret_db: true\n  secret_ingress_tls: true\n  secret_keystone: true\n  secret_registry: true\n  service_ingress: true\n  service: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "ironic/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Ironic\nname: ironic\nversion: 2025.2.0\nhome: https://docs.openstack.org/ironic/latest/\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Ironic/OpenStack_Project_Ironic_vertical.png\nsources:\n  - https://opendev.org/openstack/ironic\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "ironic/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n{{ $source_base := .Values.bootstrap.image.source_base | default \"\" }}\n{{ range $name, $opts := .Values.bootstrap.image.structured }}\n{{ $source := empty $source_base | ternary $opts.source (printf \"%s/%s\" $source_base $opts.source) }}\nopenstack image show {{ $name | quote }} -fvalue -cid || (\n  IMAGE_LOC=$(mktemp)\n  curl --fail -sSL {{ $source }} -o ${IMAGE_LOC}\n  openstack image create {{ $name | quote }} \\\n  --disk-format {{ $opts.disk_format }} \\\n  --container-format {{ $opts.container_format }} \\\n  --file ${IMAGE_LOC} \\\n  {{ if $opts.properties -}} {{ range $k, $v := $opts.properties }}--property {{$k}}={{$v}} {{ end }}{{ end -}} \\\n  --{{ $opts.visibility | default \"public\" }}\n  rm -f ${IMAGE_LOC}\n)\n{{ else }}\n{{ .Values.bootstrap.image.script | default \"echo 'Not Enabled'\" }}\n{{ end }}\n"
  },
  {
    "path": "ironic/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n# https://docs.openstack.org/ironic/latest/admin/upgrade-guide.html\nironic-dbsync upgrade\n\nironic-dbsync online_data_migrations\n\necho 'Finished DB migrations'\n"
  },
  {
    "path": "ironic/templates/bin/_ironic-api.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec uwsgi --ini /etc/ironic/ironic-api-uwsgi.ini\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "ironic/templates/bin/_ironic-conductor-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nif [ \"x\" == \"x${PROVISIONER_INTERFACE}\" ]; then\n  echo \"Provisioner interface is not set\"\n  exit 1\nfi\n\nfunction net_pxe_addr {\n ip addr | awk \"/inet / && /${PROVISIONER_INTERFACE}/{print \\$2; exit }\"\n}\nfunction net_pxe_ip {\n echo $(net_pxe_addr) | awk -F '/' '{ print $1; exit }'\n}\nPXE_IP=$(net_pxe_ip)\n\nif [ \"x\" == \"x${PXE_IP}\" ]; then\n  echo \"Could not find IP for pxe to bind to\"\n  exit 1\nfi\n\n# ensure the tempdir exists, read it from the config\nironictmpdir=$(python -c 'from configparser import ConfigParser;cfg = ConfigParser();cfg.read(\"/etc/ironic/ironic.conf\");print(cfg.get(\"DEFAULT\", \"tempdir\", fallback=\"\"))')\nif [ -n \"${ironictmpdir}\" -a ! -d \"${ironictmpdir}\" ]; then\n  mkdir -p \"${ironictmpdir}\"\n  chmod 1777 \"${ironictmpdir}\"\nfi\n\ntee /tmp/pod-shared/conductor-local-ip.conf << EOF\n[DEFAULT]\n\n# IP address of this host. If unset, will determine the IP\n# programmatically. If unable to do so, will use \"127.0.0.1\".\n# (string value)\nmy_ip = ${PXE_IP}\n\n[pxe]\n# IP address of ironic-conductor node's TFTP server. (string\n# value)\ntftp_server = ${PXE_IP}\n\n[deploy]\n# ironic-conductor node's HTTP server URL. Example:\n# http://192.1.2.3:8080 (string value)\n# from .deploy.ironic.http_url\nhttp_url = http://${PXE_IP}:{{ tuple \"baremetal\" \"internal\" \"pxe_http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\nEOF\n"
  },
  {
    "path": "ironic/templates/bin/_ironic-conductor.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nmkdir -p /var/lib/openstack-helm/ironic/images\nmkdir -p /var/lib/openstack-helm/ironic/master_images\n\n{{- if and (.Values.bootstrap.object_store.enabled) (.Values.bootstrap.object_store.openstack.enabled) }}\nOPTIONS=\" --config-file /tmp/pod-shared/swift.conf\"\n{{- end }}\n\nexec ironic-conductor \\\n      --config-file /etc/ironic/ironic.conf \\\n      --config-file /tmp/pod-shared/conductor-local-ip.conf \\\n      ${OPTIONS} \\\n      --config-dir /etc/ironic/ironic.conf.d\n"
  },
  {
    "path": "ironic/templates/bin/_manage-cleaning-network.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nif ! openstack network show ${neutron_network_name}; then\n  IRONIC_NEUTRON_CLEANING_NET_ID=$(openstack network create -f value -c id \\\n    --share \\\n    --provider-network-type flat \\\n    --provider-physical-network ${neutron_provider_network} \\\n    ${neutron_network_name})\nelse\n  IRONIC_NEUTRON_CLEANING_NET_ID=$(openstack network show ${neutron_network_name} -f value -c id)\nfi\n\nSUBNETS=$(openstack network show $IRONIC_NEUTRON_CLEANING_NET_ID -f value -c subnets)\nif [ \"x${SUBNETS}\" != \"x[]\" ]; then\n  for SUBNET in ${SUBNETS}; do\n    CURRENT_SUBNET=$(openstack subnet show $SUBNET -f value -c name)\n    if [ \"x${CURRENT_SUBNET}\" == \"x${neutron_subnet_name}\" ]; then\n      openstack subnet show ${neutron_subnet_name}\n      SUBNET_EXISTS=true\n    fi\n  done\nfi\n\nif [ \"x${SUBNET_EXISTS}\" != \"xtrue\" ]; then\n  openstack subnet create \\\n    --gateway ${neutron_subnet_gateway%/*} \\\n    --allocation-pool start=${neutron_subnet_alloc_start},end=${neutron_subnet_alloc_end} \\\n    --dns-nameserver ${neutron_subnet_dns_nameserver} \\\n    --subnet-range ${neutron_subnet_cidr} \\\n    --network ${neutron_network_name} \\\n    ${neutron_subnet_name}\nfi\n"
  },
  {
    "path": "ironic/templates/bin/_retreive-swift-config.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nOS_SWIFT_ENDPOINT=\"$(openstack endpoint list \\\n    --service swift \\\n    --interface public \\\n    -f value \\\n    -c URL | head -1 )\"\nOS_SWIFT_HOST_AND_PATH_PREFIX=\"$(echo \"${OS_SWIFT_ENDPOINT}\" | awk -F \"/${OS_SWIFT_API_VERSION}\" '{ print $1 }')\"\nOS_SWIFT_ACCOUNT_PREFIX=\"$(echo \"${OS_SWIFT_ENDPOINT}\" | awk -F \"/${OS_SWIFT_API_VERSION}/\" '{ print $NF }' | awk -F '$' '{ print $1 }')\"\nOS_PROJECT_ID=\"$(openstack project show ${OS_PROJECT_NAME} -f value -c id)\"\nOS_SWIFT_ACCOUNT=\"$(echo \"${OS_SWIFT_ACCOUNT_PREFIX}${OS_PROJECT_ID}\")\"\n\ntee /tmp/pod-shared/swift.conf <<EOF\n[glance]\nswift_endpoint_url: \"${OS_SWIFT_HOST_AND_PATH_PREFIX}\"\nswift_account: \"${OS_SWIFT_ACCOUNT}\"\nEOF\n"
  },
  {
    "path": "ironic/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: ironic-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n{{- if .Values.bootstrap.image.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  manage-cleaning-network.sh: |\n{{ tuple \"bin/_manage-cleaning-network.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  retreive-swift-config.sh: |\n{{ tuple \"bin/_retreive-swift-config.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  ironic-api.sh: |\n{{ tuple \"bin/_ironic-api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ironic-conductor.sh: |\n{{ tuple \"bin/_ironic-conductor.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ironic-conductor-init.sh: |\n{{ tuple \"bin/_ironic-conductor-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- if .Values.conductor.pxe.enabled }}\n{{ include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conductor.pxe.script \"key\" \"ironic-conductor-pxe.sh\") | indent 2 }}\n{{ include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conductor.pxe.init_script \"key\" \"ironic-conductor-pxe-init.sh\") | indent 2 }}\n{{ end }}\n{{- if .Values.conductor.http.enabled }}\n{{ include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conductor.http.script \"key\" \"ironic-conductor-http.sh\") | indent 2 }}\n{{ include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conductor.http.init_script \"key\" \"ironic-conductor-http-init.sh\") | indent 2 }}\n{{ end }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "ironic/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n{{- if empty .Values.conf.ironic.keystone_authtoken.auth_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.ironic.keystone_authtoken \"auth_uri\" -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.ironic.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.keystone_authtoken.region_name -}}\n{{- $_ := set .Values.conf.ironic.keystone_authtoken \"region_name\" .Values.endpoints.identity.auth.ironic.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.keystone_authtoken.project_name -}}\n{{- $_ := set .Values.conf.ironic.keystone_authtoken \"project_name\" .Values.endpoints.identity.auth.ironic.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.keystone_authtoken.project_domain_name -}}\n{{- $_ := set .Values.conf.ironic.keystone_authtoken \"project_domain_name\" .Values.endpoints.identity.auth.ironic.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.keystone_authtoken.user_domain_name -}}\n{{- $_ := set .Values.conf.ironic.keystone_authtoken \"user_domain_name\" .Values.endpoints.identity.auth.ironic.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.keystone_authtoken.username -}}\n{{- $_ := set .Values.conf.ironic.keystone_authtoken \"username\" .Values.endpoints.identity.auth.ironic.username -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.keystone_authtoken.password -}}\n{{- $_ := set .Values.conf.ironic.keystone_authtoken \"password\" .Values.endpoints.identity.auth.ironic.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ironic.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.ironic.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.ironic.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if .Values.conf.ironic.service_user.send_service_user_token -}}\n\n{{- if empty .Values.conf.ironic.service_user.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.ironic.service_user \"auth_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.service_user.region_name -}}\n{{- $_ := set .Values.conf.ironic.service_user \"region_name\" .Values.endpoints.identity.auth.ironic.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.service_user.project_name -}}\n{{- $_ := set .Values.conf.ironic.service_user \"project_name\" .Values.endpoints.identity.auth.ironic.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.service_user.project_domain_name -}}\n{{- $_ := set .Values.conf.ironic.service_user \"project_domain_name\" .Values.endpoints.identity.auth.ironic.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.service_user.user_domain_name -}}\n{{- $_ := set .Values.conf.ironic.service_user \"user_domain_name\" .Values.endpoints.identity.auth.ironic.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.service_user.username -}}\n{{- $_ := set .Values.conf.ironic.service_user \"username\" .Values.endpoints.identity.auth.ironic.username -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.service_user.password -}}\n{{- $_ := set .Values.conf.ironic.service_user \"password\" .Values.endpoints.identity.auth.ironic.password -}}\n{{- end -}}\n\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.ironic.database.connection)) (empty .Values.conf.ironic.database.connection) -}}\n{{- $_ := tuple \"oslo_db\" \"internal\" \"ironic\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\"| set .Values.conf.ironic.database \"connection\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ironic.DEFAULT.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"ironic\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.ironic.DEFAULT \"transport_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ironic.glance.glance_host -}}\n{{- $_ := tuple \"image\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\"| set .Values.conf.ironic.glance \"glance_host\" -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.glance.glance_port -}}\n{{- $_ := tuple \"image\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\"| set .Values.conf.ironic.glance \"glance_port\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ironic.glance.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.ironic.glance \"auth_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.glance.project_name -}}\n{{- $_ := set .Values.conf.ironic.glance \"project_name\" .Values.endpoints.identity.auth.glance.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.glance.project_domain_name -}}\n{{- $_ := set .Values.conf.ironic.glance \"project_domain_name\" .Values.endpoints.identity.auth.glance.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.glance.user_domain_name -}}\n{{- $_ := set .Values.conf.ironic.glance \"user_domain_name\" .Values.endpoints.identity.auth.glance.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.glance.username -}}\n{{- $_ := set .Values.conf.ironic.glance \"username\" .Values.endpoints.identity.auth.glance.username -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.glance.password -}}\n{{- $_ := set .Values.conf.ironic.glance \"password\" .Values.endpoints.identity.auth.glance.password -}}\n{{- end -}}\n\n\n{{- if empty .Values.conf.ironic.inspector.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.ironic.inspector \"auth_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.inspector.project_name -}}\n{{- $_ := set .Values.conf.ironic.inspector \"project_name\" .Values.endpoints.identity.auth.ironic.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.inspector.project_domain_name -}}\n{{- $_ := set .Values.conf.ironic.inspector \"project_domain_name\" .Values.endpoints.identity.auth.ironic.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.inspector.user_domain_name -}}\n{{- $_ := set .Values.conf.ironic.inspector \"user_domain_name\" .Values.endpoints.identity.auth.ironic.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.inspector.username -}}\n{{- $_ := set .Values.conf.ironic.inspector \"username\" .Values.endpoints.identity.auth.ironic.username -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.inspector.password -}}\n{{- $_ := set .Values.conf.ironic.inspector \"password\" .Values.endpoints.identity.auth.ironic.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ironic.nova.url -}}\n{{- $_ := tuple \"compute\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.ironic.nova \"url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.nova.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.ironic.nova \"auth_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.nova.project_name -}}\n{{- $_ := set .Values.conf.ironic.nova \"project_name\" .Values.endpoints.identity.auth.ironic.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.nova.project_domain_name -}}\n{{- $_ := set .Values.conf.ironic.nova \"project_domain_name\" .Values.endpoints.identity.auth.ironic.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.nova.user_domain_name -}}\n{{- $_ := set .Values.conf.ironic.nova \"user_domain_name\" .Values.endpoints.identity.auth.ironic.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.nova.username -}}\n{{- $_ := set .Values.conf.ironic.nova \"username\" .Values.endpoints.identity.auth.ironic.username -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.nova.password -}}\n{{- $_ := set .Values.conf.ironic.nova \"password\" .Values.endpoints.identity.auth.ironic.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ironic.cinder.url -}}\n{{- $_ := tuple \"volumev3\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.ironic.cinder \"url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.cinder.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.ironic.cinder \"auth_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.cinder.project_name -}}\n{{- $_ := set .Values.conf.ironic.cinder \"project_name\" .Values.endpoints.identity.auth.ironic.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.cinder.project_domain_name -}}\n{{- $_ := set .Values.conf.ironic.cinder \"project_domain_name\" .Values.endpoints.identity.auth.ironic.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.cinder.user_domain_name -}}\n{{- $_ := set .Values.conf.ironic.cinder \"user_domain_name\" .Values.endpoints.identity.auth.ironic.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.cinder.username -}}\n{{- $_ := set .Values.conf.ironic.cinder \"username\" .Values.endpoints.identity.auth.ironic.username -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.cinder.password -}}\n{{- $_ := set .Values.conf.ironic.cinder \"password\" .Values.endpoints.identity.auth.ironic.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ironic.neutron.url -}}\n{{- $_ := tuple \"network\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.ironic.neutron \"url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.neutron.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.ironic.neutron \"auth_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.neutron.project_name -}}\n{{- $_ := set .Values.conf.ironic.neutron \"project_name\" .Values.endpoints.identity.auth.ironic.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.neutron.project_domain_name -}}\n{{- $_ := set .Values.conf.ironic.neutron \"project_domain_name\" .Values.endpoints.identity.auth.ironic.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.neutron.user_domain_name -}}\n{{- $_ := set .Values.conf.ironic.neutron \"user_domain_name\" .Values.endpoints.identity.auth.ironic.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.neutron.username -}}\n{{- $_ := set .Values.conf.ironic.neutron \"username\" .Values.endpoints.identity.auth.ironic.username -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.neutron.password -}}\n{{- $_ := set .Values.conf.ironic.neutron \"password\" .Values.endpoints.identity.auth.ironic.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ironic.service_catalog.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.ironic.service_catalog \"auth_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.service_catalog.project_name -}}\n{{- $_ := set .Values.conf.ironic.service_catalog \"project_name\" .Values.endpoints.identity.auth.ironic.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.service_catalog.project_domain_name -}}\n{{- $_ := set .Values.conf.ironic.service_catalog \"project_domain_name\" .Values.endpoints.identity.auth.ironic.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.service_catalog.user_domain_name -}}\n{{- $_ := set .Values.conf.ironic.service_catalog \"user_domain_name\" .Values.endpoints.identity.auth.ironic.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.service_catalog.username -}}\n{{- $_ := set .Values.conf.ironic.service_catalog \"username\" .Values.endpoints.identity.auth.ironic.username -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.service_catalog.password -}}\n{{- $_ := set .Values.conf.ironic.service_catalog \"password\" .Values.endpoints.identity.auth.ironic.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ironic.swift.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.ironic.swift \"auth_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.swift.project_name -}}\n{{- $_ := set .Values.conf.ironic.swift \"project_name\" .Values.endpoints.identity.auth.ironic.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.swift.project_domain_name -}}\n{{- $_ := set .Values.conf.ironic.swift \"project_domain_name\" .Values.endpoints.identity.auth.ironic.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.swift.user_domain_name -}}\n{{- $_ := set .Values.conf.ironic.swift \"user_domain_name\" .Values.endpoints.identity.auth.ironic.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.swift.username -}}\n{{- $_ := set .Values.conf.ironic.swift \"username\" .Values.endpoints.identity.auth.ironic.username -}}\n{{- end -}}\n{{- if empty .Values.conf.ironic.swift.password -}}\n{{- $_ := set .Values.conf.ironic.swift \"password\" .Values.endpoints.identity.auth.ironic.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ironic.neutron.cleaning_network -}}\n{{- $_ := set .Values.conf.ironic.neutron \"cleaning_network\" (default \"\" .Values.network.pxe.neutron_network_name) -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ironic.api.public_endpoint -}}\n{{- $_ := tuple \"baremetal\" \"public\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.ironic.api \"public_endpoint\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ironic.glance.swift_endpoint_url -}}\n{{- $_ := tuple \"object_store\" \"public\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| trimSuffix .Values.endpoints.object_store.path.default | set .Values.conf.ironic.glance \"swift_endpoint_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ironic.glance.swift_temp_url_key -}}\n{{- $_ := set .Values.conf.ironic.glance \"swift_temp_url_key\" .Values.endpoints.object_store.auth.glance.tmpurlkey -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ironic.api.port -}}\n{{- $_ := set .Values.conf.ironic.api \"port\" (tuple \"baremetal\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\") -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ironic_api_uwsgi.uwsgi.processes -}}\n{{- $_ := set .Values.conf.ironic_api_uwsgi.uwsgi \"processes\" .Values.conf.ironic.api.workers -}}\n{{- end -}}\n{{- if empty (index .Values.conf.ironic_api_uwsgi.uwsgi \"http-socket\") -}}\n{{- $http_socket_port := tuple \"baremetal\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | toString }}\n{{- $http_socket := printf \"0.0.0.0:%s\" $http_socket_port }}\n{{- $_ := set .Values.conf.ironic_api_uwsgi.uwsgi \"http-socket\" $http_socket -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.handler_fluent) (has \"fluent\" .Values.conf.logging.handlers.keys) -}}\n{{- $fluentd_host := tuple \"fluentd\" \"internal\" $envAll | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\n{{- $fluentd_port := tuple \"fluentd\" \"internal\" \"service\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $fluent_args := printf \"('%s.%s', '%s', %s)\" .Release.Namespace .Release.Name $fluentd_host $fluentd_port }}\n{{- $handler_fluent := dict \"class\" \"fluent.handler.FluentHandler\" \"formatter\" \"fluent\" \"args\" $fluent_args -}}\n{{- $_ := set .Values.conf.logging \"handler_fluent\" $handler_fluent -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.formatter_fluent) (has \"fluent\" .Values.conf.logging.formatters.keys) -}}\n{{- $formatter_fluent := dict \"class\" \"oslo_log.formatters.FluentFormatter\" -}}\n{{- $_ := set .Values.conf.logging \"formatter_fluent\" $formatter_fluent -}}\n{{- end -}}\n\n{{- if empty .Values.conf.ironic.keystone_authtoken.auth_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.ironic.keystone_authtoken \"auth_uri\" -}}\n{{- end -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: ironic-etc\ntype: Opaque\ndata:\n  ironic.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.ironic | b64enc }}\n  ironic-api-uwsgi.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.ironic_api_uwsgi | b64enc }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.logging | b64enc }}\n  policy.yaml: {{ toYaml .Values.conf.policy | b64enc }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.tftp_map_file \"key\" \"tftp-map-file\" \"format\" \"Secret\" ) | indent 2 }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.nginx \"key\" \"nginx.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- end }}\n"
  },
  {
    "path": "ironic/templates/deployment-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_api }}\n{{- $envAll := . }}\n\n{{- $mounts_ironic_api := .Values.pod.mounts.ironic_api.ironic_api }}\n{{- $mounts_ironic_api_init := .Values.pod.mounts.ironic_api.init_container }}\n{{- $etcSources := .Values.pod.etcSources.ironic_api }}\n\n{{- $serviceAccountName := \"ironic-api\" }}\n{{ tuple $envAll \"api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: ironic-api\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"ironic\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"ironic\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ironic\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"ironic_api\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ tuple \"ironic_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"ironic_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"ironic\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.api.node_selector_key }}: {{ .Values.labels.api.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.ironic.enabled }}\n{{ tuple $envAll \"ironic\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 10 }}\n{{ end }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.api.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"api\" $mounts_ironic_api_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n{{- if and (.Values.bootstrap.object_store.enabled) (.Values.bootstrap.object_store.openstack.enabled) }}\n        - name: ironic-retrive-swift-config\n{{ tuple $envAll \"ironic_retrive_swift_config\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.conductor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n            - name: OS_SWIFT_API_VERSION\n              value: {{ .Values.conf.ironic.glance.swift_api_version | quote }}\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.glance }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n          command:\n            - /tmp/retreive-swift-config.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ironic-bin\n              mountPath: /tmp/retreive-swift-config.sh\n              subPath: retreive-swift-config.sh\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n{{- end }}\n      containers:\n        - name: ironic-api\n{{ tuple $envAll \"ironic_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/ironic-api.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/ironic-api.sh\n                  - stop\n          ports:\n            - containerPort: {{ tuple \"baremetal\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            httpGet:\n              scheme: {{ tuple \"baremetal\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n              path: /\n              port: {{ tuple \"baremetal\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.ironic.oslo_concurrency.lock_path }}\n            - name: ironic-bin\n              mountPath: /tmp/ironic-api.sh\n              subPath: ironic-api.sh\n              readOnly: true\n            - name: ironic-etc\n              mountPath: /etc/ironic/ironic.conf\n              subPath: ironic.conf\n              readOnly: true\n            - name: ironic-etc\n              mountPath: /etc/ironic/ironic-api-uwsgi.ini\n              subPath: ironic-api-uwsgi.ini\n              readOnly: true\n            - name: ironic-etc-snippets\n              mountPath: /etc/ironic/ironic.conf.d/\n              readOnly: true\n            {{- if .Values.conf.ironic.DEFAULT.log_config_append }}\n            - name: ironic-etc\n              mountPath: {{ .Values.conf.ironic.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.ironic.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: ironic-etc\n              mountPath: /etc/ironic/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n{{ if $mounts_ironic_api.volumeMounts }}{{ toYaml $mounts_ironic_api.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: ironic-bin\n          configMap:\n            name: ironic-bin\n            defaultMode: 0555\n        - name: ironic-etc\n          secret:\n            secretName: ironic-etc\n            defaultMode: 0444\n        - name: ironic-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: pod-shared\n          emptyDir: {}\n{{ if $mounts_ironic_api.volumes }}{{ toYaml $mounts_ironic_api.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "ironic/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "ironic/templates/ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_api .Values.network.api.ingress.public }}\n{{- $ingressOpts := dict \"envAll\" . \"backendServiceType\" \"baremetal\" \"backendPort\" \"m-api\" -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "ironic/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.bootstrap\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"5\"\n{{- end }}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.image.enabled }}\n{{- if .Values.bootstrap.image.openstack.enabled }}\n{{- $bootstrapJob := dict \"envAll\" . \"serviceName\" \"ironic\" \"keystoneUser\" .Values.bootstrap.image.openstack.ks_user \"logConfigFile\" .Values.conf.ironic.DEFAULT.log_config_append -}}\n{{- if .Values.pod.tolerations.ironic.enabled -}}\n{{- $_ := set $bootstrapJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{- $_ := set $bootstrapJob \"jobAnnotations\" (include \"metadata.annotations.job.bootstrap\" . | fromYaml) }}\n{{ $bootstrapJob | include \"helm-toolkit.manifests.job_bootstrap\" }}\n{{- else }}\n{{ include \"helm-toolkit.manifests.job_bootstrap\" }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "ironic/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" \"ironic\" -}}\n{{- if .Values.pod.tolerations.ironic.enabled -}}\n{{- $_ := set $dbDropJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "ironic/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"ironic\" \"jobAnnotations\" -}}\n{{- $_ := set $dbInitJob \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.ironic.enabled -}}\n{{- $_ := set $dbInitJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "ironic/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"ironic\" \"podVolMounts\" .Values.pod.mounts.ironic_db_sync.ironic_db_sync.volumeMounts \"podVols\" .Values.pod.mounts.ironic_db_sync.ironic_db_sync.volumes -}}\n{{- $_ := set $dbSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.ironic.enabled -}}\n{{- $_ := set $dbSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "ironic/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.repo_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\n{{- end }}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"ironic\" -}}\n{{- $_ := $imageRepoSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.repo_sync\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.ironic.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "ironic/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_endpoints\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-2\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksEndpointsJob := dict \"envAll\" . \"serviceName\" \"ironic\" \"serviceTypes\" ( tuple \"baremetal\" ) -}}\n{{- $_ := set $ksEndpointsJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_endpoints\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.ironic.enabled -}}\n{{- $_ := set $ksEndpointsJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksEndpointsJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "ironic/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_service\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"ironic\" \"serviceTypes\" ( tuple \"baremetal\" ) -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_service\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.ironic.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "ironic/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"ironic\" \"jobAnnotations\" -}}\n{{- $_ := set $ksUserJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.ironic.enabled -}}\n{{- $_ := set $ksUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "ironic/templates/job-manage-cleaning-network.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_manage_cleaning_network }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"ironic-manage-cleaning-network\" }}\n{{ tuple $envAll \"manage_cleaning_network\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: ironic-manage-cleaning-network\n  annotations:\n    \"helm.sh/hook\": post-install,post-upgrade\n    \"helm.sh/hook-delete-policy\": before-hook-creation\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ tuple \"ironic_manage_cleaning_network\" $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ironic\" \"manage-cleaning-network\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.ironic.enabled }}\n{{ tuple $envAll \"ironic\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 10 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"manage_cleaning_network\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n      containers:\n        - name: ironic-manage-cleaning-network\n{{ tuple $envAll \"ironic_manage_cleaning_network\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.manage_cleaning_network | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.ironic }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n{{ include \"helm-toolkit.utils.to_k8s_env_vars\" $envAll.Values.network.pxe | indent 12 }}\n          command:\n            - /tmp/manage-cleaning-network.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ironic-bin\n              mountPath: /tmp/manage-cleaning-network.sh\n              subPath: manage-cleaning-network.sh\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: ironic-bin\n          configMap:\n            name: ironic-bin\n            defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "ironic/templates/job-rabbit-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.rabbit_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rmqUserJob := dict \"envAll\" . \"serviceName\" \"ironic\" -}}\n{{- $_ := set $rmqUserJob \"jobAnnotations\" (include \"metadata.annotations.job.rabbit_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.ironic.enabled -}}\n{{- $_ := set $rmqUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $rmqUserJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "ironic/templates/network_policy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"ironic\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "ironic/templates/pdb-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_api }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: ironic-api\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.api.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"ironic\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "ironic/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"ironic\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  DB_CONNECTION: {{ tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "ironic/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"ironic\" \"glance\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "ironic/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"ironic\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_messaging\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass \"http\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "ironic/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "ironic/templates/service-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_api }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"baremetal\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: m-api\n      port: {{ tuple \"baremetal\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.api.node_port.enabled }}\n      nodePort: {{ .Values.network.api.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"ironic\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.api.node_port.enabled }}\n  type: NodePort\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "ironic/templates/service-ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_api .Values.network.api.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"baremetal\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "ironic/templates/statefulset-conductor.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.statefulset_conductor }}\n{{- $envAll := . }}\n\n{{- $mounts_ironic_conductor := .Values.pod.mounts.ironic_conductor.ironic_conductor }}\n{{- $mounts_ironic_conductor_init := .Values.pod.mounts.ironic_conductor.init_container }}\n{{- $etcSources := .Values.pod.etcSources.ironic_conductor }}\n\n{{- $serviceAccountName := \"ironic-conductor\" }}\n{{ tuple $envAll \"conductor\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  name: ironic-conductor\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"ironic\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  serviceName: ironic-conductor\n  replicas: {{ .Values.pod.replicas.conductor }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"ironic\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ironic\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n{{ tuple \"ironic_conductor\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"ironic_conductor\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"ironic\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.conductor.node_selector_key }}: {{ .Values.labels.conductor.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.ironic.enabled }}\n{{ tuple $envAll \"ironic\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 10 }}\n{{ end }}\n      securityContext:\n        runAsUser: 0\n{{ if .Values.pod.useHostNetwork.conductor }}\n      hostNetwork: True\n      dnsPolicy: ClusterFirstWithHostNet\n{{ end }}\n{{ if .Values.pod.useHostIPC.conductor }}\n      hostIPC: True\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"conductor\" $mounts_ironic_conductor_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n{{- if $envAll.Values.conductor.pxe.enabled }}\n        - name: ironic-conductor-pxe-init\n{{ tuple $envAll \"ironic_pxe_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.conductor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/ironic-conductor-pxe-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ironic-bin\n              mountPath: /tmp/ironic-conductor-pxe-init.sh\n              subPath: ironic-conductor-pxe-init.sh\n              readOnly: true\n            - name: pod-data\n              mountPath: /var/lib/openstack-helm\n{{- end }}\n        - name: ironic-conductor-init\n{{ tuple $envAll \"ironic_conductor\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.conductor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n            - name: PROVISIONER_INTERFACE\n              value: {{ .Values.network.pxe.device }}\n          command:\n            - /tmp/ironic-conductor-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ironic-bin\n              mountPath: /tmp/ironic-conductor-init.sh\n              subPath: ironic-conductor-init.sh\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n{{- if $envAll.Values.conductor.http.enabled }}\n        - name: ironic-conductor-http-init\n{{ tuple $envAll \"ironic_conductor\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.conductor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n            - name: PROVISIONER_INTERFACE\n              value: {{ .Values.network.pxe.device }}\n          command:\n            - /tmp/ironic-conductor-http-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ironic-bin\n              mountPath: /tmp/ironic-conductor-http-init.sh\n              subPath: ironic-conductor-http-init.sh\n              readOnly: true\n            - name: ironic-etc\n              mountPath: /etc/nginx/nginx.conf\n              subPath: nginx.conf\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n{{- end }}\n{{- if and (.Values.bootstrap.object_store.enabled) (.Values.bootstrap.object_store.openstack.enabled) }}\n        - name: ironic-retrive-swift-config\n{{ tuple $envAll \"ironic_retrive_swift_config\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.conductor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n            - name: OS_SWIFT_API_VERSION\n              value: {{ .Values.conf.ironic.glance.swift_api_version | quote }}\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.glance }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n          command:\n            - /tmp/retreive-swift-config.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ironic-bin\n              mountPath: /tmp/retreive-swift-config.sh\n              subPath: retreive-swift-config.sh\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n{{- end }}\n        {{- with .Values.conductor.initContainers }}\n        {{- tpl (toYaml .) $ | nindent 8 }}\n        {{- end }}\n      containers:\n        - name: ironic-conductor\n{{ tuple $envAll \"ironic_conductor\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.conductor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          securityContext:\n            privileged: true\n          command:\n            - /tmp/ironic-conductor.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.ironic.oslo_concurrency.lock_path }}\n            - name: ironic-bin\n              mountPath: /tmp/ironic-conductor.sh\n              subPath: ironic-conductor.sh\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: pod-var-cache-ironic\n              mountPath: /var/cache/ironic\n            - name: ironic-etc\n              mountPath: /etc/ironic/ironic.conf\n              subPath: ironic.conf\n              readOnly: true\n            - name: ironic-etc-snippets\n              mountPath: /etc/ironic/ironic.conf.d/\n              readOnly: true\n            {{- if .Values.conf.ironic.DEFAULT.log_config_append }}\n            - name: ironic-etc\n              mountPath: {{ .Values.conf.ironic.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.ironic.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: ironic-etc\n              mountPath: /etc/ironic/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: host-var-lib-ironic\n              mountPath: /var/lib/ironic\n            - name: pod-data\n              mountPath: /var/lib/openstack-helm\n{{ if $mounts_ironic_conductor.volumeMounts }}{{ toYaml $mounts_ironic_conductor.volumeMounts | indent 12 }}{{ end }}\n{{- if $envAll.Values.conductor.pxe.enabled }}\n        - name: ironic-conductor-pxe\n{{ tuple $envAll \"ironic_pxe\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.conductor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          securityContext:\n            privileged: true\n          env:\n            - name: PROVISIONER_INTERFACE\n              value: {{ .Values.network.pxe.device }}\n          command:\n            - /tmp/ironic-conductor-pxe.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ironic-bin\n              mountPath: /tmp/ironic-conductor-pxe.sh\n              subPath: ironic-conductor-pxe.sh\n              readOnly: true\n            - name: ironic-etc\n              mountPath: /tftp-map-file\n              subPath: tftp-map-file\n              readOnly: true\n            - name: pod-data\n              mountPath: /var/lib/openstack-helm\n{{- end }}\n{{- if $envAll.Values.conductor.http.enabled }}\n        - name: ironic-conductor-http\n{{ tuple $envAll \"ironic_pxe_http\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.conductor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/ironic-conductor-http.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ironic-bin\n              mountPath: /tmp/ironic-conductor-http.sh\n              subPath: ironic-conductor-http.sh\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n              readOnly: true\n            - name: pod-data\n              mountPath: /var/lib/openstack-helm\n{{ if $mounts_ironic_conductor.volumeMounts }}{{ toYaml $mounts_ironic_conductor.volumeMounts | indent 12 }}{{ end }}\n{{- end }}\n        {{- with .Values.conductor.extraContainers }}\n        {{- tpl (toYaml .) $ | nindent 8 }}\n        {{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-shared\n          emptyDir: {}\n        - name: pod-var-cache-ironic\n          emptyDir: {}\n        - name: ironic-bin\n          configMap:\n            name: ironic-bin\n            defaultMode: 0555\n        - name: ironic-etc\n          secret:\n            secretName: ironic-etc\n            defaultMode: 0444\n        - name: ironic-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: host-var-lib-ironic\n          hostPath:\n            path: /var/lib/ironic\n        - name: pod-data\n          emptyDir: {}\n{{ if $mounts_ironic_conductor.volumes }}{{ toYaml $mounts_ironic_conductor.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "ironic/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for ironic.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nrelease_group: null\n\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  conductor:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  tags:\n    ironic_manage_cleaning_network: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ironic_retrive_swift_config: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    # Bootstrap image requires curl\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ironic_db_sync: quay.io/airshipit/ironic:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ironic_api: quay.io/airshipit/ironic:2025.1-ubuntu_noble\n    ironic_conductor: quay.io/airshipit/ironic:2025.1-ubuntu_noble\n    ironic_pxe: quay.io/airshipit/ironic:2025.1-ubuntu_noble\n    ironic_pxe_init: quay.io/airshipit/ironic:2025.1-ubuntu_noble\n    ironic_pxe_http: docker.io/nginx:1.29.8\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_noble\n    image_repo_sync: docker.io/docker:17.07.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nconf:\n  paste:\n    override:\n    append:\n  policy: {}\n  tftp_map_file: |\n    re ^(/tftpboot/) /tftpboot/\\2\n    re ^/tftpboot/ /tftpboot/\n    re ^(^/) /tftpboot/\\1\n    re ^([^/]) /tftpboot/\\1\n  nginx: |\n    user  nginx;\n    worker_processes  1;\n    error_log  /var/log/nginx/error.log warn;\n    pid        /var/run/nginx.pid;\n    events {\n        worker_connections  1024;\n    }\n    http {\n        include       /etc/nginx/mime.types;\n        default_type  application/octet-stream;\n        log_format  main  '$remote_addr - $remote_user [$time_local] \"$request\" '\n                          '$status $body_bytes_sent \"$http_referer\" '\n                          '\"$http_user_agent\" \"$http_x_forwarded_for\"';\n        access_log  /var/log/nginx/access.log  main;\n        sendfile        on;\n        #tcp_nopush     on;\n        keepalive_timeout  65;\n        #gzip  on;\n        server {\n          listen       OSH_PXE_IP:{{ tuple \"baremetal\" \"internal\" \"pxe_http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }};\n          server_name  localhost;\n          #charset koi8-r;\n          #access_log  /var/log/nginx/host.access.log  main;\n          location / {\n              root   /var/lib/openstack-helm/httpboot;\n          }\n        }\n    }\n  ironic:\n    DEFAULT:\n      log_config_append: /etc/ironic/logging.conf\n      # conductor may use hardlinks to images for certain boot modes so the default path needs\n      # to be on the same filesystem\n      tempdir: /var/lib/openstack-helm/tmp\n    api:\n      port: null\n    conductor:\n      api_url: null\n    database:\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    deploy:\n      http_root: /var/lib/openstack-helm/httpboot\n    nova:\n      auth_type: password\n    cinder:\n      auth_type: password\n    glance:\n      auth_type: password\n      swift_temp_url_duration: 86400\n      temp_url_endpoint_type: swift\n      swift_container: glance\n      swift_api_version: v1\n      auth_section: glance\n    inspector:\n      auth_type: password\n    keystone_authtoken:\n      service_token_roles: service\n      service_token_roles_required: true\n      auth_type: password\n      auth_version: v3\n    neutron:\n      auth_type: password\n    pxe:\n      pxe_append_params: \"nofb nomodeset vga=normal ipa-debug=1\"\n      images_path: /var/lib/openstack-helm/ironic/images\n      instance_master_path: /var/lib/openstack-helm/ironic/master_images\n      pxe_config_template: $pybasedir/drivers/modules/ipxe_config.template\n      uefi_pxe_config_template: $pybasedir/drivers/modules/ipxe_config.template\n      tftp_root: /var/lib/openstack-helm/tftpboot\n      tftp_master_path: /var/lib/openstack-helm/tftpboot/master_images\n      pxe_bootfile_name: undionly.kpxe\n      uefi_pxe_bootfile_name: ipxe.efi\n      ipxe_enabled: true\n    service_catalog:\n      auth_type: password\n    service_user:\n      auth_type: password\n      send_service_user_token: true\n    swift:\n      auth_url: null\n    oslo_policy:\n      policy_file: /etc/ironic/policy.yaml\n    oslo_concurrency:\n      lock_path: /var/lock\n  logging:\n    loggers:\n      keys:\n        - root\n        - ironic\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: WARNING\n      handlers: 'null'\n    logger_ironic:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: ironic\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    formatter_default:\n      format: \"%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n  ironic_api_uwsgi:\n    uwsgi:\n      add-header: \"Connection: close\"\n      buffer-size: 65535\n      die-on-term: true\n      enable-threads: true\n      exit-on-reload: false\n      hook-master-start: unix_signal:15 gracefully_kill_them_all\n      lazy-apps: true\n      log-x-forwarded-for: true\n      master: true\n      procname-prefix-spaced: \"ironic-api:\"\n      route-user-agent: '^kube-probe.* donotlog:'\n      thunder-lock: true\n      worker-reload-mercy: 80\n      module: ironic.wsgi:application\n\nconductor:\n  # -- Additional containers to add to the conductor pods\n  ## Note: Supports use of custom Helm templates\n  extraContainers: []\n  # -- Additional init containers to add to the conductor pods\n  ## Note: Supports use of custom Helm templates\n  initContainers: []\n  http:\n    enabled: true\n    init_script: |\n      #!/bin/bash\n      set -ex\n      if [ \"x\" == \"x${PROVISIONER_INTERFACE}\" ]; then\n        echo \"Provisioner interface is not set\"\n        exit 1\n      fi\n\n      function net_pxe_addr {\n       ip addr | awk \"/inet / && /${PROVISIONER_INTERFACE}/{print \\$2; exit }\"\n      }\n      function net_pxe_ip {\n       echo $(net_pxe_addr) | awk -F '/' '{ print $1; exit }'\n      }\n      PXE_IP=$(net_pxe_ip)\n\n      if [ \"x\" == \"x${PXE_IP}\" ]; then\n        echo \"Could not find IP for pxe to bind to\"\n        exit 1\n      fi\n\n      sed \"s|OSH_PXE_IP|${PXE_IP}|g\" /etc/nginx/nginx.conf > /tmp/pod-shared/nginx.conf\n    script: |\n      #!/bin/bash\n      set -ex\n      mkdir -p /var/lib/openstack-helm/httpboot\n      cp -v /tmp/pod-shared/nginx.conf /etc/nginx/nginx.conf\n      exec nginx -g 'daemon off;'\n  pxe:\n    enabled: true\n    init_script: |\n      #!/bin/bash\n      set -ex\n      # default to Ubuntu path\n      FILEPATH=${FILEPATH:-/usr/lib/ipxe}\n\n      mkdir -p /var/lib/openstack-helm/tftpboot\n      mkdir -p /var/lib/openstack-helm/tftpboot/master_images\n\n      for FILE in undionly.kpxe ipxe.efi pxelinux.0 snponly.efi; do\n        # copy in default file\n        if [ -f $FILEPATH/$FILE ]; then\n          cp -v $FILEPATH/$FILE /var/lib/openstack-helm/tftpboot\n        fi\n\n      done\n    script: |\n      #!/bin/bash\n      set -ex\n      function net_pxe_addr {\n       ip addr | awk \"/inet / && /${PROVISIONER_INTERFACE}/{print \\$2; exit }\"\n      }\n      function net_pxe_ip {\n       echo $(net_pxe_addr) | awk -F '/' '{ print $1; exit }'\n      }\n      PXE_IP=$(net_pxe_ip)\n\n      if [ \"x\" == \"x${PXE_IP}\" ]; then\n        echo \"Could not find IP for pxe to bind to\"\n        exit 1\n      fi\n\n      ln -s /var/lib/openstack-helm/tftpboot /tftpboot\n      exec /usr/sbin/in.tftpd \\\n        --verbose \\\n        --foreground \\\n        --user root \\\n        --address ${PXE_IP}:69 \\\n        --map-file /tftp-map-file /tftpboot\n\nnetwork:\n  pxe:\n    device: ironic-pxe\n    neutron_network_name: baremetal\n    neutron_subnet_name: baremetal\n    neutron_provider_network: ironic\n    neutron_subnet_gateway: 172.24.6.1/24\n    neutron_subnet_cidr: 172.24.6.0/24\n    neutron_subnet_alloc_start: 172.24.6.100\n    neutron_subnet_alloc_end: 172.24.6.200\n    neutron_subnet_dns_nameserver: 10.96.0.10\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    node_port:\n      enabled: false\n      port: 30511\n\nbootstrap:\n  image:\n    enabled: true\n    openstack:\n      enabled: true\n      ks_user: ironic\n    # NOTE: if source_base is null the source will be used as is\n    source_base: http://tarballs.openstack.org/ironic-python-agent/tinyipa/files\n    structured:\n      ironic-agent.initramfs:\n        source: tinyipa-stable-2025.1.gz\n        disk_format: ari\n        container_format: ari\n      ironic-agent.kernel:\n        source: tinyipa-stable-2025.1.vmlinuz\n        disk_format: aki\n        container_format: aki\n  object_store:\n    enabled: true\n    openstack:\n      enabled: true\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - ironic-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    api:\n      jobs:\n        - ironic-db-sync\n        - ironic-ks-user\n        - ironic-ks-endpoints\n        - ironic-manage-cleaning-network\n        - ironic-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_messaging\n    bootstrap:\n      jobs: null\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: image\n        - endpoint: internal\n          service: baremetal\n    conductor:\n      jobs:\n        - ironic-db-sync\n        - ironic-ks-user\n        - ironic-ks-endpoints\n        - ironic-manage-cleaning-network\n        - ironic-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: baremetal\n        - endpoint: internal\n          service: oslo_messaging\n    db_drop:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - ironic-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    ks_endpoints:\n      jobs:\n        - ironic-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    rabbit_init:\n      services:\n      - endpoint: internal\n        service: oslo_messaging\n    manage_cleaning_network:\n      services:\n        - endpoint: internal\n          service: network\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\n# Names of secrets used by bootstrap and environmental checks\nsecrets:\n  identity:\n    admin: ironic-keystone-admin\n    ironic: ironic-keystone-user\n    glance: ironic-glance-keystone-user\n  oslo_db:\n    admin: ironic-db-admin\n    ironic: ironic-db-user\n  oslo_messaging:\n    admin: ironic-rabbitmq-admin\n    ironic: ironic-rabbitmq-user\n  oci_image_registry:\n    ironic: ironic-oci-image-registry\n  tls:\n    baremetal:\n      api:\n        public: ironic-tls-public\n        internal: ironic-tls-api\n\n# typically overridden by environmental\n# values, but should include all endpoints\n# required by this chart\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      ironic:\n        username: ironic\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      glance:\n        role: admin,service\n        region_name: RegionOne\n        username: glance\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      ironic:\n        role: admin,service\n        region_name: RegionOne\n        username: ironic\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  baremetal:\n    name: ironic\n    hosts:\n      default: ironic-api\n      public: ironic\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      api:\n        default: 6385\n        public: 80\n      pxe_http:\n        default: 8080\n  volumev3:\n    name: cinderv3\n    hosts:\n      default: cinder-api\n      public: cinder\n    host_fqdn_override:\n      default: null\n    path:\n      default: '/v3/%(tenant_id)s'\n      healthcheck: /healthcheck\n    scheme:\n      default: http\n    port:\n      api:\n        default: 8776\n        public: 80\n  compute:\n    name: nova\n    hosts:\n      default: nova-api\n      public: nova\n    host_fqdn_override:\n      default: null\n    path:\n      default: \"/v2.1/%(tenant_id)s\"\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 8774\n        public: 80\n      novncproxy:\n        default: 6080\n  image:\n    name: glance\n    hosts:\n      default: glance-api\n      public: glance\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      api:\n        default: 9292\n        public: 80\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n      ironic:\n        username: ironic\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /ironic\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_cache:\n    auth:\n      # NOTE(portdirect): this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  oslo_messaging:\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n      ironic:\n        username: ironic\n        password: password\n    statefulset:\n      replicas: 2\n      name: rabbitmq-rabbitmq\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /ironic\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n  network:\n    name: neutron\n    hosts:\n      default: neutron-server\n      public: neutron\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 9696\n        public: 80\n  object_store:\n    name: swift\n    namespace: ceph\n    auth:\n      glance:\n        tmpurlkey: supersecret\n    hosts:\n      default: ceph-rgw\n    host_fqdn_override:\n      default: null\n    path:\n      default: /swift/v1/KEY_$(tenant_id)s\n    scheme:\n      default: http\n    port:\n      api:\n        default: 8088\n  fluentd:\n    namespace: null\n    name: fluentd\n    hosts:\n      default: fluentd-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: 'http'\n    port:\n      service:\n        default: 24224\n      metrics:\n        default: 24220\n\npod:\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    ironic:\n      enabled: false\n      tolerations:\n        - key: node-role.kubernetes.io/master\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/control-plane\n          operator: Exists\n          effect: NoSchedule\n  mounts:\n    ironic_api:\n      init_container: null\n      ironic_api:\n        volumeMounts:\n        volumes:\n    ironic_conductor:\n      init_container: null\n      ironic_conductor:\n        volumeMounts:\n        volumes:\n    ironic_bootstrap:\n      init_container: null\n      ironic_bootstrap:\n        volumeMounts:\n        volumes:\n    ironic_db_sync:\n      ironic_db_sync:\n        volumeMounts:\n        volumes:\n  # -- This allows users to add Kubernetes Projected Volumes to be mounted at /etc/ironic/ironic.conf.d/\n  ## This is a list of projected volume source objects for each deployment/statefulset/daemonset/cronjob\n  ## https://kubernetes.io/docs/concepts/storage/projected-volumes/\n  etcSources:\n    ironic_api: []\n    ironic_conductor: []\n    ironic_db_sync: []\n  replicas:\n    api: 1\n    conductor: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    disruption_budget:\n      api:\n        min_available: 0\n    termination_grace_period:\n      api:\n        timeout: 30\n  resources:\n    enabled: false\n    api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    conductor:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rabbit_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n  useHostNetwork:\n    conductor: true\n  useHostIPC:\n    conductor: false\n\nnetwork_policy:\n  ironic:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  deployment_api: true\n  ingress_api: true\n  job_bootstrap: true\n  job_db_drop: false\n  job_db_init: true\n  job_db_sync: true\n  job_image_repo_sync: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  job_manage_cleaning_network: true\n  job_rabbit_init: true\n  pdb_api: true\n  network_policy: false\n  secret_db: true\n  secret_keystone: true\n  secret_rabbitmq: true\n  secret_registry: true\n  service_api: true\n  service_ingress_api: true\n  statefulset_conductor: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "keystone/.helmignore",
    "content": "values_overrides\n"
  },
  {
    "path": "keystone/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Keystone\nname: keystone\nversion: 2025.2.0\nhome: https://docs.openstack.org/keystone/latest/\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Keystone/OpenStack_Project_Keystone_vertical.png\nsources:\n  - https://opendev.org/openstack/keystone\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "keystone/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "keystone/templates/bin/_cred-clean.py.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n   http://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n#!/usr/bin/python\n\n# Drops db and user for an OpenStack Service:\n# Set ROOT_DB_CONNECTION and DB_CONNECTION environment variables to contain\n# SQLAlchemy strings for the root connection to the database and the one you\n# wish the service to use. Alternatively, you can use an ini formatted config\n# at the location specified by OPENSTACK_CONFIG_FILE, and extract the string\n# from the key OPENSTACK_CONFIG_DB_KEY, in the section specified by\n# OPENSTACK_CONFIG_DB_SECTION.\n\nimport os\nimport sys\ntry:\n    import ConfigParser\n    PARSER_OPTS = {}\nexcept ImportError:\n    import configparser as ConfigParser\n    PARSER_OPTS = {\"strict\": False}\nimport logging\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import text\n\n# Create logger, console handler and formatter\nlogger = logging.getLogger('OpenStack-Helm DB Drop')\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\n    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n# Set the formatter and add the handler\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n\n# Get the connection string for the service db root user\nif \"ROOT_DB_CONNECTION\" in os.environ:\n    db_connection = os.environ['ROOT_DB_CONNECTION']\n    logger.info('Got DB root connection')\nelse:\n    logger.critical('environment variable ROOT_DB_CONNECTION not set')\n    sys.exit(1)\n\nmysql_x509 = os.getenv('MARIADB_X509', \"\")\nssl_args = {}\nif mysql_x509:\n    ssl_args = {'ssl': {'ca': '/etc/mysql/certs/ca.crt',\n                        'key': '/etc/mysql/certs/tls.key',\n                        'cert': '/etc/mysql/certs/tls.crt'}}\n\n# Get the connection string for the service db\nif \"OPENSTACK_CONFIG_FILE\" in os.environ:\n    os_conf = os.environ['OPENSTACK_CONFIG_FILE']\n    if \"OPENSTACK_CONFIG_DB_SECTION\" in os.environ:\n        os_conf_section = os.environ['OPENSTACK_CONFIG_DB_SECTION']\n    else:\n        logger.critical(\n            'environment variable OPENSTACK_CONFIG_DB_SECTION not set')\n        sys.exit(1)\n    if \"OPENSTACK_CONFIG_DB_KEY\" in os.environ:\n        os_conf_key = os.environ['OPENSTACK_CONFIG_DB_KEY']\n    else:\n        logger.critical('environment variable OPENSTACK_CONFIG_DB_KEY not set')\n        sys.exit(1)\n    try:\n        config = ConfigParser.RawConfigParser(**PARSER_OPTS)\n        logger.info(\"Using {0} as db config source\".format(os_conf))\n        config.read(os_conf)\n        logger.info(\"Trying to load db config from {0}:{1}\".format(\n            os_conf_section, os_conf_key))\n        user_db_conn = config.get(os_conf_section, os_conf_key)\n        logger.info(\"Got config from {0}\".format(os_conf))\n    except:\n        logger.critical(\n            \"Tried to load config from {0} but failed.\".format(os_conf))\n        raise\nelif \"DB_CONNECTION\" in os.environ:\n    user_db_conn = os.environ['DB_CONNECTION']\n    logger.info('Got config from DB_CONNECTION env var')\nelse:\n    logger.critical(\n        'Could not get db config, either from config file or env var')\n    sys.exit(1)\n\n# Root DB engine\ntry:\n    root_engine_full = create_engine(db_connection)\n    root_user = root_engine_full.url.username\n    root_password = root_engine_full.url.password\n    drivername = root_engine_full.url.drivername\n    host = root_engine_full.url.host\n    port = root_engine_full.url.port\n    root_engine_url = ''.join([drivername, '://', root_user, ':',\n                               root_password, '@', host, ':', str(port)])\n    root_engine = create_engine(root_engine_url, connect_args=ssl_args)\n    connection = root_engine.connect()\n    connection.close()\n    logger.info(\"Tested connection to DB @ {0}:{1} as {2}\".format(\n        host, port, root_user))\nexcept:\n    logger.critical('Could not connect to database as root user')\n    raise\n\n# User DB engine\ntry:\n    user_engine = create_engine(user_db_conn, connect_args=ssl_args)\n    # Get our user data out of the user_engine\n    database = user_engine.url.database\n    user = user_engine.url.username\n    password = user_engine.url.password\n    logger.info('Got user db config')\nexcept:\n    logger.critical('Could not get user database config')\n    raise\n\n# Delete all entries from credential table\n\ntry:\n    cmd = text(\"DELETE FROM credential\")\n    with user_engine.connect() as connection:\n        connection.execute(cmd)\n        try:\n            connection.commit()\n        except AttributeError:\n            pass\n    logger.info('Deleted all entries in credential table')\nexcept:\n    logger.critical('Failed to clean up credential table in keystone db')\n    raise\n\nlogger.info('Finished DB Management')\n"
  },
  {
    "path": "keystone/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nkeystone-manage \\\n    --config-file=/etc/keystone/keystone.conf \\\n    --config-dir=/etc/keystone/keystone.conf.d \\\n    db_sync\n\nkeystone-manage \\\n    --config-file=/etc/keystone/keystone.conf \\\n    --config-dir=/etc/keystone/keystone.conf.d \\\n    bootstrap \\\n    --bootstrap-username ${OS_USERNAME} \\\n    --bootstrap-password ${OS_PASSWORD} \\\n    --bootstrap-project-name ${OS_PROJECT_NAME} \\\n    --bootstrap-admin-url ${OS_BOOTSTRAP_ADMIN_URL} \\\n    --bootstrap-public-url ${OS_BOOTSTRAP_PUBLIC_URL} \\\n    --bootstrap-internal-url ${OS_BOOTSTRAP_INTERNAL_URL} \\\n    --bootstrap-region-id ${OS_REGION_NAME}\n"
  },
  {
    "path": "keystone/templates/bin/_domain-manage-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{- range $k, $v := .Values.conf.ks_domains }}\nopenstack --debug domain create --or-show {{ $k }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/bin/_domain-manage.py.tpl",
    "content": "#!/usr/bin/python\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nimport json\nimport os\nimport requests\nimport sys\n\ndef main(args):\n    base_url, token, domainId, filename = args[1], args[2], args[3], args[5]\n    url = \"%s/domains/%s/config\" % (base_url, domainId)\n    print(\"Connecting to url: %r\" % url)\n\n    headers = {\n        'Content-Type': \"application/json\",\n        'X-Auth-Token': token,\n        'Cache-Control': \"no-cache\"\n    }\n\n    verify = os.getenv('OS_CACERT', True)\n\n    response = requests.request(\"GET\", url, headers=headers, verify=verify)\n\n    if response.status_code == 404:\n        print(\"domain config not found - put\")\n        action = \"PUT\"\n    else:\n        print(\"domain config found - patch\")\n        action = \"PATCH\"\n\n    with open(filename, \"rb\") as f:\n        data = {\"config\": json.load(f)}\n\n    response = requests.request(action, url,\n                                data=json.dumps(data),\n                                headers=headers, verify=verify)\n\n    print(\"Response code on action [%s]: %s\" % (action, response.status_code))\n    # Put and Patch can return 200 or 201. If it is not a 2XX code, error out.\n    if (response.status_code // 100) != 2:\n        sys.exit(1)\n\n\nif __name__ == \"__main__\":\n    if len(sys.argv) != 6:\n        sys.exit(1)\n    main(sys.argv)\n"
  },
  {
    "path": "keystone/templates/bin/_domain-manage.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -e\nendpt={{ tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" }}\npath={{ .Values.conf.keystone.identity.domain_config_dir | default \"/etc/keystone/domains\" }}\n\n{{- range $k, $v := .Values.conf.ks_domains }}\n\nfilename=${path}/keystone.{{ $k }}.json\npython /tmp/domain-manage.py \\\n    $endpt \\\n    $(openstack token issue -f value -c id) \\\n    $(openstack domain show {{ $k }} -f value -c id) \\\n    {{ $k }} $filename\n\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/bin/_fernet-manage.py.tpl",
    "content": "#!/usr/bin/env python\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nimport argparse\nimport base64\nimport errno\nimport grp\nimport logging\nimport os\nimport pwd\nimport re\nimport subprocess  #nosec\nimport sys\nimport time\n\nimport requests\n\nFERNET_DIR = os.environ['KEYSTONE_KEYS_REPOSITORY']\nKEYSTONE_USER = os.environ['KEYSTONE_USER']\nKEYSTONE_GROUP = os.environ['KEYSTONE_GROUP']\nNAMESPACE = os.environ['KUBERNETES_NAMESPACE']\n\n# k8s connection data\nKUBE_HOST = None\nKUBE_CERT = '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt'\nKUBE_TOKEN = None\n\nLOG_DATEFMT = \"%Y-%m-%d %H:%M:%S\"\nLOG_FORMAT = \"%(asctime)s.%(msecs)03d - %(levelname)s - %(message)s\"\nlogging.basicConfig(format=LOG_FORMAT, datefmt=LOG_DATEFMT)\nLOG = logging.getLogger(__name__)\nLOG.setLevel(logging.INFO)\n\n\ndef read_kube_config():\n    global KUBE_HOST, KUBE_TOKEN\n    KUBE_HOST = \"https://%s:%s\" % ('kubernetes.default',\n                                   os.environ['KUBERNETES_SERVICE_PORT'])\n    with open('/var/run/secrets/kubernetes.io/serviceaccount/token', 'r') as f:\n        KUBE_TOKEN = f.read()\n\n\ndef get_secret_definition(name):\n    url = '%s/api/v1/namespaces/%s/secrets/%s' % (KUBE_HOST, NAMESPACE, name)\n    resp = requests.get(url,\n                        headers={'Authorization': 'Bearer %s' % KUBE_TOKEN},\n                        verify=KUBE_CERT)\n    if resp.status_code != 200:\n        LOG.error('Cannot get secret %s.', name)\n        LOG.error(resp.text)\n        return None\n    return resp.json()\n\n\ndef update_secret(name, secret):\n    url = '%s/api/v1/namespaces/%s/secrets/%s' % (KUBE_HOST, NAMESPACE, name)\n    resp = requests.put(url,\n                        json=secret,\n                        headers={'Authorization': 'Bearer %s' % KUBE_TOKEN},\n                        verify=KUBE_CERT)\n    if resp.status_code != 200:\n        LOG.error('Cannot update secret %s.', name)\n        LOG.error(resp.text)\n        return False\n    return True\n\n\ndef read_from_files():\n    keys = [name for name in os.listdir(FERNET_DIR) if os.path.isfile(FERNET_DIR + name)\n            and re.match(\"^\\d+$\", name)]\n    data = {}\n    for key in keys:\n        with open(FERNET_DIR + key, 'r') as f:\n            data[key] = f.read()\n    if len(list(keys)):\n        LOG.debug(\"Keys read from files: %s\", keys)\n    else:\n        LOG.warning(\"No keys were read from files.\")\n    return data\n\n\ndef get_keys_data():\n    keys = read_from_files()\n    return dict([(key, base64.b64encode(value.encode()).decode())\n                for (key, value) in keys.items()])\n\n\ndef write_to_files(data):\n    if not os.path.exists(os.path.dirname(FERNET_DIR)):\n        try:\n            os.makedirs(os.path.dirname(FERNET_DIR))\n        except OSError as exc: # Guard against race condition\n            if exc.errno != errno.EEXIST:\n                raise\n        uid = pwd.getpwnam(KEYSTONE_USER).pw_uid\n        gid = grp.getgrnam(KEYSTONE_GROUP).gr_gid\n        os.chown(FERNET_DIR, uid, gid)\n\n    for (key, value) in data.items():\n        with open(FERNET_DIR + key, 'w') as f:\n            decoded_value = base64.b64decode(value).decode()\n            f.write(decoded_value)\n            LOG.debug(\"Key %s: %s\", key, decoded_value)\n    LOG.info(\"%s keys were written\", len(data))\n\n\ndef execute_command(cmd):\n    LOG.info(\"Executing 'keystone-manage %s --keystone-user=%s \"\n             \"--keystone-group=%s' command.\",\n             cmd, KEYSTONE_USER, KEYSTONE_GROUP)\n    subprocess.call(['keystone-manage', cmd,  #nosec\n                     '--keystone-user=%s' % KEYSTONE_USER,\n                     '--keystone-group=%s' % KEYSTONE_GROUP])\n\ndef main():\n    parser = argparse.ArgumentParser()\n    parser.add_argument('command', choices=['fernet_setup', 'fernet_rotate',\n                                            'credential_setup',\n                                            'credential_rotate'])\n    args = parser.parse_args()\n\n    is_credential = args.command.startswith('credential')\n\n    SECRET_NAME = ('keystone-credential-keys' if is_credential else\n                   'keystone-fernet-keys')\n\n    read_kube_config()\n    secret = get_secret_definition(SECRET_NAME)\n    if not secret:\n        LOG.error(\"Secret '%s' does not exist.\", SECRET_NAME)\n        sys.exit(1)\n\n    if args.command in ('fernet_rotate', 'credential_rotate'):\n        LOG.info(\"Copying existing %s keys from secret '%s' to %s.\",\n                 'credential' if is_credential else 'fernet', SECRET_NAME,\n                 FERNET_DIR)\n        write_to_files(secret['data'])\n\n    if args.command in ('credential_setup', 'fernet_setup'):\n        if secret.get('data', False):\n            LOG.info('Keys already exist, skipping setup...')\n            sys.exit(0)\n\n    execute_command(args.command)\n\n    LOG.info(\"Updating data for '%s' secret.\", SECRET_NAME)\n    updated_keys = get_keys_data()\n    secret['data'] = updated_keys\n    if not update_secret(SECRET_NAME, secret):\n        sys.exit(1)\n    LOG.info(\"%s fernet keys have been placed to secret '%s'\",\n             len(updated_keys), SECRET_NAME)\n    LOG.debug(\"Placed keys: %s\", updated_keys)\n    LOG.info(\"%s keys %s has been completed\",\n             \"Credential\" if is_credential else 'Fernet',\n             \"rotation\" if args.command.endswith('_rotate') else \"generation\")\n\n    if args.command == 'credential_rotate':\n        # `credential_rotate` needs doing `credential_migrate` as well once all\n        # of the nodes have the new keys. So we'll sleep configurable amount of\n        # time to make sure k8s reloads the secrets in all pods and then\n        # execute `credential_migrate`.\n\n        migrate_wait = int(os.getenv('KEYSTONE_CREDENTIAL_MIGRATE_WAIT', \"60\"))\n        LOG.info(\"Waiting %d seconds to execute `credential_migrate`.\",\n                 migrate_wait)\n        time.sleep(migrate_wait)\n\n        execute_command('credential_migrate')\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "keystone/templates/bin/_keystone-api.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  {{- if .Values.conf.software.apache2.a2enmod }}\n    {{- range .Values.conf.software.apache2.a2enmod }}\n  a2enmod {{ . }}\n    {{- end }}\n  {{- end }}\n\n  {{- if .Values.conf.software.apache2.a2dismod }}\n    {{- range .Values.conf.software.apache2.a2dismod }}\n  a2dismod {{ . }}\n    {{- end }}\n  {{- end }}\n\n  if [ -f /etc/apache2/envvars ]; then\n     # Loading Apache2 ENV variables\n     source /etc/apache2/envvars\n  fi\n\n  if [ -f /var/run/apache2/apache2.pid ]; then\n     # Remove the stale pid for debian/ubuntu images\n     rm -f /var/run/apache2/apache2.pid\n  fi\n\n  # Start Apache2\n  exec {{ .Values.conf.software.apache2.binary }} {{ .Values.conf.software.apache2.start_parameters }}\n}\n\nfunction stop () {\n  if [ -f /etc/apache2/envvars ]; then\n     # Loading Apache2 ENV variables\n     source /etc/apache2/envvars\n  fi\n  {{ .Values.conf.software.apache2.binary }} -k graceful-stop\n}\n\n$COMMAND\n"
  },
  {
    "path": "keystone/templates/certificates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.certificates .Values.secrets.tls.identity.api.internal -}}\n{{ dict \"envAll\" . \"service\" \"identity\" \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end -}}\n"
  },
  {
    "path": "keystone/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n{{- $rallyTests := .Values.conf.rally_tests }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: keystone-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  rally-test.sh: |\n{{ tuple $rallyTests | include \"helm-toolkit.scripts.rally_test\" | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  cred-clean.py: |\n{{ tuple \"bin/_cred-clean.py.tpl\" . | include \"helm-toolkit.utils.template\" |indent 4}}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.wsgi_script \"key\" \"wsgi.py\" \"format\" \"ConfigMap\" ) | indent 2 }}\n  keystone-api.sh: |\n{{ tuple \"bin/_keystone-api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  fernet-manage.py: |\n{{ tuple \"bin/_fernet-manage.py.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  domain-manage-init.sh: |\n{{ tuple \"bin/_domain-manage-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  domain-manage.sh: |\n{{ tuple \"bin/_domain-manage.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  domain-manage.py: |\n{{ tuple \"bin/_domain-manage.py.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.keystone.database.connection)) (empty .Values.conf.keystone.database.connection) -}}\n{{- $connection := tuple \"oslo_db\" \"internal\" \"keystone\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" -}}\n{{- if and .Values.manifests.certificates .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- $_ := (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | set .Values.conf.keystone.database \"connection\" -}}\n{{- else -}}\n{{- $_ := set .Values.conf.keystone.database \"connection\" $connection -}}\n{{- end -}}\n{{- end -}}\n\n{{- if empty .Values.conf.keystone.DEFAULT.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"keystone\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.keystone.DEFAULT \"transport_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.keystone.cache.memcache_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.keystone.cache \"memcache_servers\" -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.handler_fluent) (has \"fluent\" .Values.conf.logging.handlers.keys) -}}\n{{- $fluentd_host := tuple \"fluentd\" \"internal\" $envAll | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\n{{- $fluentd_port := tuple \"fluentd\" \"internal\" \"service\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $fluent_args := printf \"('%s.%s', '%s', %s)\" .Release.Namespace .deployment_name $fluentd_host $fluentd_port }}\n{{- $handler_fluent := dict \"class\" \"fluent.handler.FluentHandler\" \"formatter\" \"fluent\" \"args\" $fluent_args -}}\n{{- $_ := set .Values.conf.logging \"handler_fluent\" $handler_fluent -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.formatter_fluent) (has \"fluent\" .Values.conf.logging.formatters.keys) -}}\n{{- $formatter_fluent := dict \"class\" \"oslo_log.formatters.FluentFormatter\" -}}\n{{- $_ := set .Values.conf.logging \"formatter_fluent\" $formatter_fluent -}}\n{{- end -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: keystone-etc\ntype: Opaque\ndata:\n  rally_tests.yaml: {{ toYaml .Values.conf.rally_tests.tests | b64enc }}\n  keystone.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.keystone | b64enc }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" $envAll.Values.conf.logging | b64enc }}\n  policy.yaml: {{ toYaml .Values.conf.policy | b64enc }}\n  access_rules.json: {{ toJson .Values.conf.access_rules | b64enc }}\n  ports.conf: ''\n{{- range $k, $v := .Values.conf.ks_domains }}\n  keystone.{{ $k }}.json: {{ toJson $v | b64enc }}\n{{- end }}\n{{- if .Values.conf.security }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.security \"key\" \"security.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- end}}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.mpm_event \"key\" \"mpm_event.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.wsgi_keystone \"key\" \"wsgi-keystone.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.sso_callback_template \"key\" \"sso_callback_template.html\" \"format\" \"Secret\" ) | indent 2 }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/cron-job-credential-rotate.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.cron_credential_rotate }}\n{{- $envAll := . }}\n\n{{- $mounts_keystone_credential_rotate := .Values.pod.mounts.keystone_credential_rotate.keystone_credential_rotate }}\n{{- $mounts_keystone_credential_rotate_init := .Values.pod.mounts.keystone_credential_rotate.init_container }}\n\n{{- $serviceAccountName := \"keystone-credential-rotate\" }}\n{{ tuple $envAll \"credential_rotate\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.keystone_credential_rotate }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - list\n      - create\n      - update\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: keystone-credential-rotate\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  schedule: {{ .Values.jobs.credential_rotate.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.credential_rotate.history.success }}\n  failedJobsHistoryLimit: {{ .Values.jobs.credential_rotate.history.failed }}\n  concurrencyPolicy: Forbid\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"keystone\" \"credential-rotate\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"keystone\" \"credential-rotate\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n        spec:\n{{ tuple \"keystone_credential_rotate\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 10 }}\n{{ tuple \"keystone_credential_rotate\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 10 }}\n          serviceAccountName: {{ $serviceAccountName }}\n          initContainers:\n{{ tuple $envAll \"credential_rotate\" $mounts_keystone_credential_rotate_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 12 }}\n          restartPolicy: OnFailure\n{{ if $envAll.Values.pod.tolerations.keystone.enabled }}\n{{ tuple $envAll \"keystone\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 10 }}\n{{ end }}\n          nodeSelector:\n            {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n          containers:\n            - name: keystone-credential-rotate\n{{ tuple $envAll \"keystone_credential_rotate\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.credential_rotate | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n              env:\n                - name: KEYSTONE_USER\n                  value: {{ .Values.jobs.credential_rotate.user | quote }}\n                - name: KEYSTONE_GROUP\n                  value: {{ .Values.jobs.credential_rotate.group | quote }}\n                - name: KUBERNETES_NAMESPACE\n                  value: {{ .Release.Namespace | quote }}\n                - name: KEYSTONE_KEYS_REPOSITORY\n                  value: {{ .Values.conf.keystone.credential.key_repository | quote }}\n                - name: KEYSTONE_CREDENTIAL_MIGRATE_WAIT\n                  value: {{ .Values.jobs.credential_rotate.migrate_wait | quote }}\n              command:\n                - python\n                - /tmp/fernet-manage.py\n                - credential_rotate\n              volumeMounts:\n                - name: pod-tmp\n                  mountPath: /tmp\n                - name: etckeystone\n                  mountPath: /etc/keystone\n                - name: keystone-etc\n                  mountPath: /etc/keystone/keystone.conf\n                  subPath: keystone.conf\n                  readOnly: true\n                - name: keystone-etc-snippets\n                  mountPath: /etc/keystone/keystone.conf.d/\n                  readOnly: true\n                {{- if .Values.conf.keystone.DEFAULT.log_config_append }}\n                - name: keystone-etc\n                  mountPath: {{ .Values.conf.keystone.DEFAULT.log_config_append }}\n                  subPath: {{ base .Values.conf.keystone.DEFAULT.log_config_append }}\n                  readOnly: true\n                {{- end }}\n                - name: keystone-bin\n                  mountPath: /tmp/fernet-manage.py\n                  subPath: fernet-manage.py\n                  readOnly: true\n{{ if $mounts_keystone_credential_rotate.volumeMounts }}{{ toYaml $mounts_keystone_credential_rotate.volumeMounts | indent 16 }}{{ end }}\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: etckeystone\n              emptyDir: {}\n            - name: keystone-etc\n              secret:\n                secretName: keystone-etc\n                defaultMode: 0444\n            - name: keystone-etc-snippets\n{{- if $etcSources }}\n              projected:\n                sources:\n{{ toYaml $etcSources | indent 18 }}\n{{- else }}\n              emptyDir: {}\n{{ end }}\n            - name: keystone-bin\n              configMap:\n                name: keystone-bin\n                defaultMode: 0555\n{{ if $mounts_keystone_credential_rotate.volumes }}{{ toYaml $mounts_keystone_credential_rotate.volumes | indent 12 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/cron-job-fernet-rotate.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.cron_fernet_rotate }}\n{{- if eq .Values.conf.keystone.token.provider \"fernet\" }}\n{{- $envAll := . }}\n\n{{- $mounts_keystone_fernet_rotate := .Values.pod.mounts.keystone_fernet_rotate.keystone_fernet_rotate }}\n{{- $mounts_keystone_fernet_rotate_init := .Values.pod.mounts.keystone_fernet_rotate.init_container }}\n\n{{- $serviceAccountName := \"keystone-fernet-rotate\" }}\n{{ tuple $envAll \"fernet_rotate\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.keystone_fernet_rotate }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - list\n      - create\n      - update\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: keystone-fernet-rotate\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  schedule: {{ .Values.jobs.fernet_rotate.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.fernet_rotate.history.success }}\n  failedJobsHistoryLimit: {{ .Values.jobs.fernet_rotate.history.failed }}\n  concurrencyPolicy: Forbid\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"keystone\" \"fernet-rotate\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"keystone\" \"fernet-rotate\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n        spec:\n{{ tuple \"keystone_fernet_rotate\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 10 }}\n{{ tuple \"keystone_fernet_rotate\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 10 }}\n          serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"fernet_rotate\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 10 }}\n          initContainers:\n{{ tuple $envAll \"fernet_rotate\" $mounts_keystone_fernet_rotate_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 12 }}\n          restartPolicy: OnFailure\n{{ if $envAll.Values.pod.tolerations.keystone.enabled }}\n{{ tuple $envAll \"keystone\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 10 }}\n{{ end }}\n          nodeSelector:\n            {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n          containers:\n            - name: keystone-fernet-rotate\n{{ tuple $envAll \"keystone_fernet_rotate\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.fernet_rotate | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{ dict \"envAll\" $envAll \"application\" \"fernet_rotate\" \"container\" \"keystone_fernet_rotate\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 14}}\n              env:\n                - name: KEYSTONE_USER\n                  value: {{ .Values.jobs.fernet_rotate.user | quote }}\n                - name: KEYSTONE_GROUP\n                  value: {{ .Values.jobs.fernet_rotate.group | quote }}\n                - name: KUBERNETES_NAMESPACE\n                  value: {{ .Release.Namespace | quote }}\n                - name: KEYSTONE_KEYS_REPOSITORY\n                  value: {{ .Values.conf.keystone.fernet_tokens.key_repository | quote }}\n              command:\n                - python\n                - /tmp/fernet-manage.py\n                - fernet_rotate\n              volumeMounts:\n                - name: pod-tmp\n                  mountPath: /tmp\n                - name: etckeystone\n                  mountPath: /etc/keystone\n                - name: keystone-etc\n                  mountPath: /etc/keystone/keystone.conf\n                  subPath: keystone.conf\n                  readOnly: true\n                - name: keystone-etc-snippets\n                  mountPath: /etc/keystone/keystone.conf.d/\n                  readOnly: true\n                {{- if .Values.conf.keystone.DEFAULT.log_config_append }}\n                - name: keystone-etc\n                  mountPath: {{ .Values.conf.keystone.DEFAULT.log_config_append }}\n                  subPath: {{ base .Values.conf.keystone.DEFAULT.log_config_append }}\n                  readOnly: true\n                {{- end }}\n                - name: keystone-bin\n                  mountPath: /tmp/fernet-manage.py\n                  subPath: fernet-manage.py\n                  readOnly: true\n{{ if $mounts_keystone_fernet_rotate.volumeMounts }}{{ toYaml $mounts_keystone_fernet_rotate.volumeMounts | indent 16 }}{{ end }}\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: etckeystone\n              emptyDir: {}\n            - name: keystone-etc\n              secret:\n                secretName: keystone-etc\n                defaultMode: 0444\n            - name: keystone-etc-snippets\n{{- if $etcSources }}\n              projected:\n                sources:\n{{ toYaml $etcSources | indent 18 }}\n{{- else }}\n              emptyDir: {}\n{{ end }}\n            - name: keystone-bin\n              configMap:\n                name: keystone-bin\n                defaultMode: 0555\n{{ if $mounts_keystone_fernet_rotate.volumes }}{{ toYaml $mounts_keystone_fernet_rotate.volumes | indent 12 }}{{ end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/deployment-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"apiProbeTemplate\" }}\nhttpGet:\n  scheme: {{ tuple \"identity\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n  path: {{ tuple \"identity\" \"healthcheck\" \"internal\" . | include \"helm-toolkit.endpoints.keystone_endpoint_path_lookup\" }}\n  port: {{ tuple \"identity\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- if .Values.manifests.deployment_api }}\n{{- $envAll := . }}\n\n{{- $mounts_keystone_api := .Values.pod.mounts.keystone_api.keystone_api }}\n{{- $mounts_keystone_api_init := .Values.pod.mounts.keystone_api.init_container }}\n\n{{- $serviceAccountName := \"keystone-api\" }}\n{{ tuple $envAll \"api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.keystone_api }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: keystone-api\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"keystone\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"keystone\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"keystone\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"keystone_api\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"keystone-api\" \"containerNames\" (list \"keystone-api\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"keystone\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"keystone_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"keystone_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"keystone\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.api.node_selector_key }}: {{ .Values.labels.api.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.keystone.enabled }}\n{{ tuple $envAll \"keystone\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.api.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"api\" $mounts_keystone_api_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: keystone-api\n{{ tuple $envAll \"keystone_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"keystone\" \"container\" \"keystone_api\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/keystone-api.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/keystone-api.sh\n                  - stop\n          ports:\n            - name: ks-pub\n              containerPort: {{ tuple \"identity\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{ dict \"envAll\" $envAll \"component\" \"api\" \"container\" \"api\" \"type\" \"readiness\" \"probeTemplate\" (include \"apiProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | trim | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"api\" \"container\" \"api\" \"type\" \"liveness\" \"probeTemplate\" (include \"apiProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | trim | indent 10 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.keystone.oslo_concurrency.lock_path }}\n            - name: etckeystone\n              mountPath: /etc/keystone\n            - name: logs-apache\n              mountPath: /var/log/apache2\n            - name: run-apache\n              mountPath: /var/run/apache2\n            - name: keystone-bin\n              mountPath: /var/www/cgi-bin/keystone/wsgi.py\n              subPath: wsgi.py\n              readOnly: true\n            - name: keystone-etc\n              mountPath: /etc/keystone/keystone.conf\n              subPath: keystone.conf\n              readOnly: true\n            - name: keystone-etc-snippets\n              mountPath: /etc/keystone/keystone.conf.d/\n              readOnly: true\n            - name: keystone-etc\n              mountPath: /etc/apache2/ports.conf\n              subPath: ports.conf\n              readOnly: true\n            {{- if .Values.conf.keystone.DEFAULT.log_config_append }}\n            - name: keystone-etc\n              mountPath: {{ .Values.conf.keystone.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.keystone.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: keystone-etc\n              mountPath: /etc/keystone/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: keystone-etc\n              mountPath: /etc/keystone/access_rules.json\n              subPath: access_rules.json\n              readOnly: true\n            - name: keystone-etc\n              mountPath: /etc/keystone/sso_callback_template.html\n              subPath: sso_callback_template.html\n              readOnly: true\n            - name: keystone-etc\n              mountPath: {{ .Values.conf.software.apache2.conf_dir }}/wsgi-keystone.conf\n              subPath: wsgi-keystone.conf\n              readOnly: true\n            - name: keystone-etc\n              mountPath: {{ .Values.conf.software.apache2.mods_dir }}/mpm_event.conf\n              subPath: mpm_event.conf\n              readOnly: true\n{{- if .Values.conf.security }}\n            - name: keystone-etc\n              mountPath: {{ .Values.conf.software.apache2.conf_dir }}/security.conf\n              subPath: security.conf\n              readOnly: true\n{{- end }}\n            - name: keystone-bin\n              mountPath: /tmp/keystone-api.sh\n              subPath: keystone-api.sh\n              readOnly: true\n{{- if .Values.endpoints.ldap.auth.client.tls.ca }}\n            - name: keystone-ldap-tls\n              mountPath: /etc/keystone/ldap/tls.ca\n              subPath: tls.ca\n              readOnly: true\n{{- end }}\n{{- if eq .Values.conf.keystone.token.provider \"fernet\" }}\n            - name: keystone-fernet-keys\n              mountPath: {{ .Values.conf.keystone.fernet_tokens.key_repository }}\n{{- end }}\n            - name: keystone-credential-keys\n              mountPath: {{ .Values.conf.keystone.credential.key_repository }}\n{{- dict \"enabled\" .Values.tls.oslo_db \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" .Values.tls.identity \"name\" .Values.secrets.tls.identity.api.internal \"path\" \"/etc/keystone/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.tls.oslo_messaging \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n\n{{ if $mounts_keystone_api.volumeMounts }}{{ toYaml $mounts_keystone_api.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: etckeystone\n          emptyDir: {}\n        - name: logs-apache\n          emptyDir: {}\n        - name: run-apache\n          emptyDir: {}\n        - name: keystone-etc\n          secret:\n            secretName: keystone-etc\n            defaultMode: 0444\n        - name: keystone-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: keystone-bin\n          configMap:\n            name: keystone-bin\n            defaultMode: 0555\n{{- if .Values.endpoints.ldap.auth.client.tls.ca }}\n        - name: keystone-ldap-tls\n          secret:\n            secretName: keystone-ldap-tls\n{{- end }}\n{{- if eq .Values.conf.keystone.token.provider \"fernet\" }}\n        - name: keystone-fernet-keys\n          secret:\n            secretName: keystone-fernet-keys\n{{- end }}\n        - name: keystone-credential-keys\n          secret:\n            secretName: keystone-credential-keys\n{{- dict \"enabled\" .Values.tls.oslo_db \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" .Values.tls.identity \"name\" .Values.secrets.tls.identity.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.tls.oslo_messaging \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n\n{{ if $mounts_keystone_api.volumes }}{{ toYaml $mounts_keystone_api.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "keystone/templates/ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_api .Values.network.api.ingress.public }}\n{{- $envAll := . }}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendServiceType\" \"identity\" \"backendPort\" \"ks-pub\" -}}\n{{- $secretName := $envAll.Values.secrets.tls.identity.api.internal -}}\n{{- if and .Values.manifests.certificates $secretName -}}\n{{- $_ := set $ingressOpts \"certIssuer\" .Values.endpoints.identity.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.bootstrap\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"5\"\n{{- end }}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $bootstrapJob := dict \"envAll\" . \"serviceName\" \"keystone\" \"keystoneUser\" .Values.bootstrap.ks_user \"logConfigFile\" .Values.conf.keystone.DEFAULT.log_config_append \"jobAnnotations\" (include \"metadata.annotations.job.bootstrap\" . | fromYaml) -}}\n{{- if and ( or .Values.manifests.certificates .Values.tls.identity) .Values.secrets.tls.identity.api.internal -}}\n{{- $_ := set $bootstrapJob \"tlsSecret\" .Values.secrets.tls.identity.api.internal -}}\n{{- end -}}\n{{- if .Values.pod.tolerations.keystone.enabled -}}\n{{- $_ := set $bootstrapJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{- if and .Values.jobs.bootstrap .Values.jobs.bootstrap.backoffLimit }}\n{{- $_ := set $bootstrapJob \"backoffLimit\" .Values.jobs.bootstrap.backoffLimit -}}\n{{- end }}\n{{- if and .Values.jobs.bootstrap .Values.jobs.bootstrap.activeDeadlineSeconds }}\n{{- $_ := set $bootstrapJob \"activeDeadlineSeconds\" .Values.jobs.bootstrap.activeDeadlineSeconds -}}\n{{- end }}\n\n{{ $bootstrapJob | include \"helm-toolkit.manifests.job_bootstrap\" }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/job-credential-cleanup.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_credential_cleanup }}\n{{- $envAll := index . -}}\n\n{{- $serviceName := \"keystone\" -}}\n{{- $nodeSelector := index . \"nodeSelector\" | default ( dict $envAll.Values.labels.job.node_selector_key $envAll.Values.labels.job.node_selector_value ) -}}\n{{- $configMapBin := \"keystone-bin\" -}}\n{{- $configMapEtc := \"keystone-etc\" -}}\n{{- $dbToClean := index . \"dbToClean\" | default ( dict \"adminSecret\" $envAll.Values.secrets.oslo_db.admin \"configFile\" (printf \"/etc/%s/%s.conf\" $serviceName $serviceName ) \"logConfigFile\" (printf \"/etc/%s/logging.conf\" $serviceName ) \"configDbSection\" \"database\" \"configDbKey\" \"connection\" ) -}}\n\n{{ tuple $envAll \"credential_cleanup\" $serviceName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: \"keystone-credential-cleanup\"\n  labels:\n{{ tuple $envAll \"keystone\" \"credential-cleanup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": pre-delete\n    \"helm.sh/hook-delete-policy\": hook-succeeded, hook-failed\n{{ tuple \"keystone_credential_cleanup\" $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll $serviceName \"credential-cleanup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"keystone-credential-cleanup\" \"containerNames\" (list \"keystone-credential-cleanup\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"keystone_credential_cleanup\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"keystone_credential_cleanup\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceName }}\n      restartPolicy: Never\n{{ if $envAll.Values.pod.tolerations.keystone.enabled }}\n{{ tuple $envAll \"keystone\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n{{ toYaml $nodeSelector | indent 8 }}\n      initContainers:\n{{ tuple $envAll \"credential_cleanup\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n{{ $dbToCleanType := default \"oslo\" $dbToClean.inputType }}\n        - name: {{ printf \"%s-%s\" $serviceName \"credential-cleanup\" | quote }}\n          image: {{ $envAll.Values.images.tags.keystone_credential_cleanup }}\n          imagePullPolicy: {{ $envAll.Values.images.pull_policy }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.db_drop | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n            - name: ROOT_DB_CONNECTION\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $dbToClean.adminSecret | quote }}\n                  key: DB_CONNECTION\n{{- if eq $dbToCleanType \"oslo\" }}\n            - name: OPENSTACK_CONFIG_FILE\n              value: {{ $dbToClean.configFile | quote }}\n            - name: OPENSTACK_CONFIG_DB_SECTION\n              value: {{ $dbToClean.configDbSection | quote }}\n            - name: OPENSTACK_CONFIG_DB_KEY\n              value: {{ $dbToClean.configDbKey | quote }}\n{{- end }}\n{{- if and $envAll.Values.manifests.certificates $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal }}\n            - name: MARIADB_X509\n              value: \"REQUIRE X509\"\n{{- end }}\n          command:\n            - python\n            - /tmp/cred-clean.py\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: cred-clean-sh\n              mountPath: /tmp/cred-clean.py\n              subPath: cred-clean.py\n              readOnly: true\n{{- if eq $dbToCleanType \"oslo\" }}\n            - name: etc-service\n              mountPath: {{ dir $dbToClean.configFile | quote }}\n            - name: cred-clean-conf\n              mountPath: {{ $dbToClean.configFile | quote }}\n              subPath: {{ base $dbToClean.configFile | quote }}\n              readOnly: true\n            - name: cred-clean-conf\n              mountPath: {{ $dbToClean.logConfigFile | quote }}\n              subPath: {{ base $dbToClean.logConfigFile | quote }}\n              readOnly: true\n{{- end }}\n{{- if and $envAll.Values.manifests.certificates $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: cred-clean-sh\n          configMap:\n            name: \"keystone-bin\"\n            defaultMode: 0555\n{{- if and $envAll.Values.manifests.certificates $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n{{- $local := dict \"configMapBinFirst\" true -}}\n{{- $dbToCleanType := default \"oslo\" $dbToClean.inputType }}\n{{- if and (eq $dbToCleanType \"oslo\") $local.configMapBinFirst }}\n{{- $_ := set $local \"configMapBinFirst\" false }}\n        - name: etc-service\n          emptyDir: {}\n        - name: cred-clean-conf\n          secret:\n            secretName: \"keystone-etc\"\n            defaultMode: 0444\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "keystone/templates/job-credential-setup.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_credential_setup }}\n{{- $envAll := . }}\n\n{{- $mounts_keystone_credential_setup := .Values.pod.mounts.keystone_credential_setup.keystone_credential_setup }}\n{{- $mounts_keystone_credential_setup_init := .Values.pod.mounts.keystone_credential_setup.init_container }}\n\n{{- $serviceAccountName := \"keystone-credential-setup\" }}\n{{ tuple $envAll \"credential_setup\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.keystone_credential_setup }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - list\n      - create\n      - update\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: keystone-credential-setup\n  labels:\n{{ tuple $envAll \"keystone\" \"credential-setup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": post-install,post-upgrade\n    \"helm.sh/hook-weight\": \"-5\"\n    \"helm.sh/hook-delete-policy\": before-hook-creation\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ tuple \"keystone_credential_setup\" $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"keystone\" \"credential-setup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"keystone-credential-setup\" \"containerNames\" (list \"keystone-credential-setup\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"keystone_credential_setup\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"keystone_credential_setup\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"credential_setup\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      initContainers:\n{{ tuple $envAll \"credential_setup\" $mounts_keystone_credential_setup_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      restartPolicy: OnFailure\n{{ if $envAll.Values.pod.tolerations.keystone.enabled }}\n{{ tuple $envAll \"keystone\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      containers:\n        - name: keystone-credential-setup\n{{ tuple $envAll \"keystone_credential_setup\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.credential_setup | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"credential_setup\" \"container\" \"keystone_credential_setup\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: KEYSTONE_USER\n              value: {{ .Values.jobs.credential_setup.user | quote }}\n            - name: KEYSTONE_GROUP\n              value: {{ .Values.jobs.credential_setup.group | quote }}\n            - name: KUBERNETES_NAMESPACE\n              value: {{ .Release.Namespace | quote }}\n            - name: KEYSTONE_KEYS_REPOSITORY\n              value: {{ .Values.conf.keystone.credential.key_repository | quote }}\n          command:\n            - python\n            - /tmp/fernet-manage.py\n            - credential_setup\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etckeystone\n              mountPath: /etc/keystone\n            - name: credential-keys\n              mountPath: {{ .Values.conf.keystone.credential.key_repository | quote }}\n            - name: keystone-etc\n              mountPath: /etc/keystone/keystone.conf\n              subPath: keystone.conf\n              readOnly: true\n            - name: keystone-etc-snippets\n              mountPath: /etc/keystone/keystone.conf.d/\n              readOnly: true\n            {{- if .Values.conf.keystone.DEFAULT.log_config_append }}\n            - name: keystone-etc\n              mountPath: {{ .Values.conf.keystone.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.keystone.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: keystone-bin\n              mountPath: /tmp/fernet-manage.py\n              subPath: fernet-manage.py\n              readOnly: true\n{{ if $mounts_keystone_credential_setup.volumeMounts }}{{ toYaml $mounts_keystone_credential_setup.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: etckeystone\n          emptyDir: {}\n        - name: credential-keys\n          emptyDir: {}\n        - name: keystone-etc\n          secret:\n            secretName: keystone-etc\n            defaultMode: 0444\n        - name: keystone-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: keystone-bin\n          configMap:\n            name: keystone-bin\n            defaultMode: 0555\n{{ if $mounts_keystone_credential_setup.volumes }}{{ toYaml $mounts_keystone_credential_setup.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" \"keystone\" -}}\n{{- if and .Values.manifests.certificates .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- $_ := set $dbDropJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- if .Values.pod.tolerations.keystone.enabled -}}\n{{- $_ := set $dbDropJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"keystone\" \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) -}}\n{{- if and .Values.manifests.certificates .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- $_ := set $dbInitJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- if .Values.pod.tolerations.keystone.enabled -}}\n{{- $_ := set $dbInitJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- define \"keystone.templates._job_db_sync.env_vars\" -}}\n{{- $envAll := index . 0 -}}\nenv:\n  - name: OS_BOOTSTRAP_ADMIN_URL\n    value: {{ tuple \"identity\" \"admin\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" }}\n  - name: OS_BOOTSTRAP_INTERNAL_URL\n    value: {{ tuple \"identity\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" }}\n  - name: OS_BOOTSTRAP_PUBLIC_URL\n    value: {{ tuple \"identity\" \"public\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" }}\n{{- with $env := dict \"ksUserSecret\" $envAll.Values.secrets.identity.admin }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 2}}\n{{- end }}\n{{- end }}\n\n{{- define \"keystone.templates._job_db_sync.pod_vol_mounts\" -}}\n{{- $envAll := index . 0 -}}\nvolumeMounts:\n  - name: keystone-fernet-keys\n    mountPath: {{ $envAll.Values.conf.keystone.fernet_tokens.key_repository }}\n    readOnly: true\n{{- if and $envAll.Values.manifests.certificates $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 2 }}\n{{- end }}\n{{- if and $envAll.Values.manifests.certificates $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 2 }}\n{{- end }}\n{{- end }}\n\n{{- define \"keystone.templates._job_db_sync.pod_vols\" -}}\n{{- $envAll := index . 0 -}}\nvolumes:\n  - name: keystone-fernet-keys\n    secret:\n      secretName: keystone-fernet-keys\n{{- if and $envAll.Values.manifests.certificates $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 2 }}\n{{- end }}\n{{- if and $envAll.Values.manifests.certificates $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 2 }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $local := dict \"podVolMounts\" false \"podVols\" false -}}\n{{- if eq .Values.conf.keystone.token.provider \"fernet\" }}\n{{- $_ := set $local \"podVolMounts\" ( index ( tuple . | include \"keystone.templates._job_db_sync.pod_vol_mounts\" | toString | fromYaml ) \"volumeMounts\" ) }}\n{{- $_ := set $local \"podVols\" ( index ( tuple . | include \"keystone.templates._job_db_sync.pod_vols\" | toString | fromYaml ) \"volumes\" ) }}\n{{- end }}\n{{- $podEnvVars := tuple . | include \"keystone.templates._job_db_sync.env_vars\" | toString | fromYaml }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"keystone\" \"podVolMounts\" $local.podVolMounts \"podVols\" $local.podVols \"podEnvVars\" $podEnvVars.env \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.keystone.enabled -}}\n{{- $_ := set $dbSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/job-domain-manage.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_domain_manage }}\n{{- $envAll := . }}\n\n{{- $mounts_keystone_domain_manage := .Values.pod.mounts.keystone_domain_manage.keystone_domain_manage }}\n{{- $mounts_keystone_domain_manage_init := .Values.pod.mounts.keystone_domain_manage.init_container }}\n\n{{- $serviceAccountName := \"keystone-domain-manage\" }}\n{{ tuple $envAll \"domain_manage\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.keystone_domain_manage }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: keystone-domain-manage\n  labels:\n{{ tuple $envAll \"keystone\" \"domain-manage\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": post-install,post-upgrade\n    \"helm.sh/hook-delete-policy\": before-hook-creation\n{{ tuple $serviceAccountName $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"keystone\" \"domain-manage\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"keystone-domain-manage\" \"containerNames\" (list \"keystone-domain-manage\" \"keystone-domain-manage-init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"keystone_domain_manage\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"keystone_domain_manage\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"domain_manage\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      restartPolicy: OnFailure\n{{ if $envAll.Values.pod.tolerations.keystone.enabled }}\n{{ tuple $envAll \"keystone\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"domain_manage\" $mounts_keystone_domain_manage_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n        - name: keystone-domain-manage-init\n{{ tuple $envAll \"bootstrap\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.domain_manage | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"domain_manage\" \"container\" \"keystone_domain_manage_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" (and .Values.manifests.certificates .Values.secrets.tls.identity.api.internal) }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n          command:\n            - /tmp/domain-manage-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: keystone-bin\n              mountPath: /tmp/domain-manage-init.sh\n              subPath: domain-manage-init.sh\n              readOnly: true\n{{- if and .Values.manifests.certificates .Values.secrets.tls.identity.api.internal }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.identity.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- end }}\n      containers:\n        - name: keystone-domain-manage\n{{ tuple $envAll \"keystone_domain_manage\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.domain_manage | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"domain_manage\" \"container\" \"keystone_domain_manage\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" (and .Values.manifests.certificates .Values.secrets.tls.identity.api.internal) }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n          command:\n            - /tmp/domain-manage.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etckeystonedomains\n              mountPath: {{ .Values.conf.keystone.identity.domain_config_dir | default \"/etc/keystone/domains\" }}\n            - name: etckeystone\n              mountPath: /etc/keystone\n            - name: keystone-bin\n              mountPath: /tmp/domain-manage.sh\n              subPath: domain-manage.sh\n              readOnly: true\n            - name: keystone-bin\n              mountPath: /tmp/domain-manage.py\n              subPath: domain-manage.py\n              readOnly: true\n            - name: keystone-etc\n              mountPath: /etc/keystone/keystone.conf\n              subPath: keystone.conf\n              readOnly: true\n            - name: keystone-etc-snippets\n              mountPath: /etc/keystone/keystone.conf.d/\n              readOnly: true\n            {{- if .Values.conf.keystone.DEFAULT.log_config_append }}\n            - name: keystone-etc\n              mountPath: {{ .Values.conf.keystone.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.keystone.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n{{- range $k, $v := .Values.conf.ks_domains }}\n            - name: keystone-etc\n              mountPath: {{ $envAll.Values.conf.keystone.identity.domain_config_dir | default \"/etc/keystone/domains\" }}/keystone.{{ $k }}.json\n              subPath: keystone.{{ $k }}.json\n              readOnly: true\n{{- end }}\n{{- if eq .Values.conf.keystone.token.provider \"fernet\" }}\n            - name: keystone-fernet-keys\n              mountPath: {{ .Values.conf.keystone.fernet_tokens.key_repository }}\n{{- end }}\n            - name: keystone-credential-keys\n              mountPath: {{ .Values.conf.keystone.credential.key_repository }}\n{{- if and .Values.manifests.certificates .Values.secrets.tls.identity.api.internal }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.identity.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- end }}\n{{ if $mounts_keystone_domain_manage.volumeMounts }}{{ toYaml $mounts_keystone_domain_manage.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: etckeystone\n          emptyDir: {}\n        - name: etckeystonedomains\n          emptyDir: {}\n        - name: keystone-etc\n          secret:\n            secretName: keystone-etc\n            defaultMode: 0444\n        - name: keystone-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: keystone-bin\n          configMap:\n            name: keystone-bin\n            defaultMode: 0555\n{{- if eq .Values.conf.keystone.token.provider \"fernet\" }}\n        - name: keystone-fernet-keys\n          secret:\n            secretName: keystone-fernet-keys\n{{- end }}\n        - name: keystone-credential-keys\n          secret:\n            secretName: keystone-credential-keys\n{{- if and .Values.manifests.certificates .Values.secrets.tls.identity.api.internal }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.identity.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n{{ if $mounts_keystone_domain_manage.volumes }}{{ toYaml $mounts_keystone_domain_manage.volumes | indent 9 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/job-fernet-setup.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_fernet_setup }}\n{{- if eq .Values.conf.keystone.token.provider \"fernet\" }}\n{{- $envAll := . }}\n\n{{- $mounts_keystone_fernet_setup := .Values.pod.mounts.keystone_fernet_setup.keystone_fernet_setup }}\n{{- $mounts_keystone_fernet_setup_init := .Values.pod.mounts.keystone_fernet_setup.init_container }}\n\n{{- $serviceAccountName := \"keystone-fernet-setup\" }}\n{{ tuple $envAll \"fernet_setup\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.keystone_fernet_setup }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - list\n      - create\n      - update\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: keystone-fernet-setup\n  labels:\n{{ tuple $envAll \"keystone\" \"fernet-setup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": post-install,post-upgrade\n    \"helm.sh/hook-weight\": \"-5\"\n    \"helm.sh/hook-delete-policy\": before-hook-creation\n{{ tuple \"keystone_fernet_setup\" $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"keystone\" \"fernet-setup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"keystone-fernet-setup\" \"containerNames\" (list \"keystone-fernet-setup\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"keystone_fernet_setup\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"keystone_fernet_setup\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"fernet_setup\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      initContainers:\n{{ tuple $envAll \"fernet_setup\" $mounts_keystone_fernet_setup_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      restartPolicy: OnFailure\n{{ if $envAll.Values.pod.tolerations.keystone.enabled }}\n{{ tuple $envAll \"keystone\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      containers:\n        - name: keystone-fernet-setup\n{{ tuple $envAll \"keystone_fernet_setup\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.fernet_setup | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"fernet_setup\" \"container\" \"keystone_fernet_setup\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: KEYSTONE_USER\n              value: {{ .Values.jobs.fernet_setup.user | quote }}\n            - name: KEYSTONE_GROUP\n              value: {{ .Values.jobs.fernet_setup.group | quote }}\n            - name: KUBERNETES_NAMESPACE\n              value: {{ .Release.Namespace | quote }}\n            - name: KEYSTONE_KEYS_REPOSITORY\n              value: {{ .Values.conf.keystone.fernet_tokens.key_repository | quote }}\n          command:\n            - python\n            - /tmp/fernet-manage.py\n            - fernet_setup\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etckeystone\n              mountPath: /etc/keystone\n            - name: fernet-keys\n              mountPath: {{ .Values.conf.keystone.fernet_tokens.key_repository | quote }}\n            - name: keystone-etc\n              mountPath: /etc/keystone/keystone.conf\n              subPath: keystone.conf\n              readOnly: true\n            - name: keystone-etc-snippets\n              mountPath: /etc/keystone/keystone.conf.d/\n              readOnly: true\n            {{- if .Values.conf.keystone.DEFAULT.log_config_append }}\n            - name: keystone-etc\n              mountPath: {{ .Values.conf.keystone.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.keystone.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: keystone-bin\n              mountPath: /tmp/fernet-manage.py\n              subPath: fernet-manage.py\n              readOnly: true\n{{ if $mounts_keystone_fernet_setup.volumeMounts }}{{ toYaml $mounts_keystone_fernet_setup.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: etckeystone\n          emptyDir: {}\n        - name: fernet-keys\n          emptyDir: {}\n        - name: keystone-etc\n          secret:\n            secretName: keystone-etc\n            defaultMode: 0444\n        - name: keystone-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: keystone-bin\n          configMap:\n            name: keystone-bin\n            defaultMode: 0555\n{{ if $mounts_keystone_fernet_setup.volumes }}{{ toYaml $mounts_keystone_fernet_setup.volumes | indent 8 }}{{ end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- define \"metadata.annotations.job.repo_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\n{{- end }}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"keystone\" \"jobAnnotations\" (include \"metadata.annotations.job.repo_sync\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.keystone.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/job-rabbit-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.rabbit_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rmqUserJob := dict \"envAll\" . \"serviceName\" \"keystone\" \"jobAnnotations\" (include \"metadata.annotations.job.rabbit_init\" . | fromYaml) -}}\n{{- if and .Values.tls.oslo_messaging .Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal -}}\n{{- $_ := set $rmqUserJob \"tlsSecret\" .Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- if .Values.pod.tolerations.keystone.enabled -}}\n{{- $_ := set $rmqUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $rmqUserJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/network_policy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"keystone\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "keystone/templates/pdb.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_api }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: keystone-api\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.api.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"keystone\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/pod-rally-test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if .Values.manifests.pod_rally_test }}\n{{- $envAll := . }}\n\n{{- $mounts_tests := .Values.pod.mounts.keystone_tests.keystone_tests }}\n{{- $mounts_tests_init := .Values.pod.mounts.keystone_tests.init_container }}\n\n{{- $serviceAccountName := print $envAll.deployment_name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: {{ print $envAll.deployment_name \"-test\" }}\n  labels:\n{{ tuple $envAll \"keystone\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"keystone-test\" \"containerNames\" (list \"init\" \"keystone-test\" \"keystone-test-ks-user\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n  restartPolicy: Never\n{{ dict \"envAll\" $envAll \"application\" \"test\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 2 }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n{{ tuple \"keystone_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 2 }}\n{{ tuple \"keystone_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 2 }}\n  serviceAccountName: {{ $serviceAccountName }}\n  initContainers:\n{{ tuple $envAll \"tests\" $mounts_tests_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n    - name: keystone-test-ks-user\n{{ tuple $envAll \"ks_user\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.ks_user | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" \"container\" \"keystone_test_ks_user\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      command:\n        - /tmp/ks-user.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: keystone-bin\n          mountPath: /tmp/ks-user.sh\n          subPath: ks-user.sh\n          readOnly: true\n{{- if and .Values.manifests.certificates .Values.secrets.tls.identity.api.internal }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.identity.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\"  | indent 8 }}\n{{- end }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" (and .Values.manifests.certificates .Values.secrets.tls.identity.api.internal) }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_SERVICE_NAME\n          value: \"test\"\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.test }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_ROLE\n          value: {{ .Values.endpoints.identity.auth.test.role | quote }}\n  containers:\n    - name: keystone-test\n{{ tuple $envAll \"test\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" \"container\" \"keystone_test\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6}}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" (and .Values.manifests.certificates .Values.secrets.tls.identity.api.internal) }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.test }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: RALLY_ENV_NAME\n          value: {{.deployment_name}}\n      command:\n        - /tmp/rally-test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: keystone-etc\n          mountPath: /etc/rally/rally_tests.yaml\n          subPath: rally_tests.yaml\n          readOnly: true\n        - name: keystone-bin\n          mountPath: /tmp/rally-test.sh\n          subPath: rally-test.sh\n          readOnly: true\n        - name: rally-db\n          mountPath: /var/lib/rally\n        - name: rally-work\n          mountPath: /home/rally/.rally\n{{- if and  .Values.manifests.certificates .Values.secrets.tls.identity.api.internal }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.identity.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\"  | indent 8 }}\n{{- end }}\n{{ if $mounts_tests.volumeMounts }}{{ toYaml $mounts_tests.volumeMounts | indent 8 }}{{ end }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: keystone-etc\n      secret:\n        secretName: keystone-etc\n        defaultMode: 0444\n    - name: keystone-bin\n      configMap:\n        name: keystone-bin\n        defaultMode: 0555\n    - name: rally-db\n      emptyDir: {}\n    - name: rally-work\n      emptyDir: {}\n{{- if and .Values.manifests.certificates .Values.secrets.tls.identity.api.internal }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.identity.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 4 }}\n{{- end }}\n{{ if $mounts_tests.volumes }}{{ toYaml $mounts_tests.volumes | indent 4 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/secret-credential-keys.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_credential_keys }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: keystone-credential-keys\n  annotations:\n    \"helm.sh/hook\": pre-install\n    \"helm.sh/resource-policy\": keep\ntype: Opaque\ndata:\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"keystone\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n{{- $connection := tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- if $envAll.Values.manifests.certificates }}\n  DB_CONNECTION: {{ (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | b64enc -}}\n{{- else }}\n  DB_CONNECTION: {{  $connection | b64enc -}}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/secret-fernet-keys.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_fernet_keys }}\n{{- $envAll := . }}\n{{- if eq .Values.conf.keystone.token.provider \"fernet\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: keystone-fernet-keys\n  annotations:\n    \"helm.sh/hook\": pre-install\n    \"helm.sh/resource-policy\": keep\ntype: Opaque\ndata:\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{- include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"identity\" ) }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"test\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/secret-ldap-tls.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.endpoints.ldap.auth.client.tls.ca }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ .Values.secrets.ldap.tls }}\n  annotations:\n{{ tuple \"ldap\" \"tls\" . | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  tls.ca: {{ .Values.endpoints.ldap.auth.client.tls.ca | default \"\" | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- $rabbitmqProtocol := \"http\" }}\n{{- if and $envAll.Values.manifests.certificates $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal }}\n{{- $rabbitmqProtocol = \"https\" }}\n{{- end }}\n{{- range $key1, $userClass := tuple \"admin\" \"keystone\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_messaging\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass $rabbitmqProtocol $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/service-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_api }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"identity\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: ks-pub\n      port: {{ tuple \"identity\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n      {{ if .Values.network.api.node_port.enabled }}\n      nodePort: {{ .Values.network.api.node_port.port }}\n      {{ end }}\n  selector:\n{{ tuple $envAll \"keystone\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if or (.Values.network.api.node_port.enabled) (.Values.network.admin.node_port.enabled) }}\n  type: NodePort\n  {{ if .Values.network.api.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{ end }}\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "keystone/templates/service-ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_api .Values.network.api.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"identity\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "keystone/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for keystone.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nrelease_group: null\n\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    keystone_db_sync: quay.io/airshipit/keystone:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    keystone_fernet_setup: quay.io/airshipit/keystone:2025.1-ubuntu_noble\n    keystone_fernet_rotate: quay.io/airshipit/keystone:2025.1-ubuntu_noble\n    keystone_credential_setup: quay.io/airshipit/keystone:2025.1-ubuntu_noble\n    keystone_credential_rotate: quay.io/airshipit/keystone:2025.1-ubuntu_noble\n    keystone_credential_cleanup: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    keystone_api: quay.io/airshipit/keystone:2025.1-ubuntu_noble\n    keystone_domain_manage: quay.io/airshipit/keystone:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: docker.io/docker:17.07.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nbootstrap:\n  enabled: true\n  ks_user: admin\n  script: |\n    # admin needs the admin role for the default domain\n    openstack role add \\\n          --user=\"${OS_USERNAME}\" \\\n          --domain=\"${OS_DEFAULT_DOMAIN}\" \\\n          \"admin\"\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30500\n  admin:\n    node_port:\n      enabled: false\n      port: 30357\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - keystone-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n    rabbit_init:\n      services:\n        - service: oslo_messaging\n          endpoint: internal\n  static:\n    api:\n      jobs:\n        - keystone-db-sync\n        - keystone-credential-setup\n        - keystone-fernet-setup\n      services:\n        - endpoint: internal\n          service: oslo_cache\n        - endpoint: internal\n          service: oslo_db\n    bootstrap:\n      jobs:\n        - keystone-domain-manage\n      services:\n        - endpoint: internal\n          service: identity\n    credential_rotate:\n      jobs:\n        - keystone-credential-setup\n    credential_setup: null\n    credential_cleanup:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_drop:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - keystone-db-init\n        - keystone-credential-setup\n        - keystone-fernet-setup\n      services:\n        - endpoint: internal\n          service: oslo_db\n    domain_manage:\n      services:\n        - endpoint: internal\n          service: identity\n    fernet_rotate:\n      jobs:\n        - keystone-fernet-setup\n    fernet_setup: null\n    tests:\n      services:\n        - endpoint: internal\n          service: identity\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\npod:\n  security_context:\n    keystone:\n      pod:\n        runAsUser: 42424\n      container:\n        keystone_api:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    credential_setup:\n      pod:\n        runAsUser: 42424\n      container:\n        keystone_credential_setup:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    fernet_setup:\n      pod:\n        runAsUser: 42424\n      container:\n        keystone_fernet_setup:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    fernet_rotate:\n      pod:\n        runAsUser: 42424\n      container:\n        keystone_fernet_rotate:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    domain_manage:\n      pod:\n        runAsUser: 42424\n      container:\n        keystone_domain_manage_init:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        keystone_domain_manage:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    test:\n      pod:\n        runAsUser: 42424\n      container:\n        keystone_test_ks_user:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        keystone_test:\n          runAsUser: 65500\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    keystone:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n        effect: NoSchedule\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n        effect: NoSchedule\n  mounts:\n    keystone_db_init:\n      init_container: null\n      keystone_db_init:\n        volumeMounts:\n        volumes:\n    keystone_db_sync:\n      init_container: null\n      keystone_db_sync:\n        volumeMounts:\n        volumes:\n    keystone_api:\n      init_container: null\n      keystone_api:\n        volumeMounts:\n        volumes:\n    keystone_tests:\n      init_container: null\n      keystone_tests:\n        volumeMounts:\n        volumes:\n    keystone_bootstrap:\n      init_container: null\n      keystone_bootstrap:\n        volumeMounts:\n        volumes:\n    keystone_fernet_setup:\n      init_container: null\n      keystone_fernet_setup:\n        volumeMounts:\n        volumes:\n    keystone_fernet_rotate:\n      init_container: null\n      keystone_fernet_rotate:\n        volumeMounts:\n        volumes:\n    keystone_credential_setup:\n      init_container: null\n      keystone_credential_setup:\n        volumeMounts:\n        volumes:\n    keystone_credential_rotate:\n      init_container: null\n      keystone_credential_rotate:\n        volumeMounts:\n        volumes:\n    keystone_credential_cleanup:\n      init_container: null\n      keystone_credential_cleanup:\n        volumeMounts:\n        volumes:\n    keystone_domain_manage:\n      init_container: null\n      keystone_domain_manage:\n        volumeMounts:\n        volumes:\n  # -- This allows users to add Kubernetes Projected Volumes to be mounted at /etc/keystone/keystone.conf.d/\n  ## This is a list of projected volume source objects for each deployment/statefulset/daemonset/cronjob\n  ## https://kubernetes.io/docs/concepts/storage/projected-volumes/\n  etcSources:\n    keystone_db_sync: []\n    keystone_api: []\n    keystone_fernet_setup: []\n    keystone_fernet_rotate: []\n    keystone_credential_setup: []\n    keystone_credential_rotate: []\n    keystone_domain_manage: []\n  replicas:\n    api: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    disruption_budget:\n      api:\n        min_available: 0\n    termination_grace_period:\n      api:\n        timeout: 30\n  resources:\n    enabled: false\n    api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      domain_manage:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rabbit_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      fernet_setup:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      fernet_rotate:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      credential_setup:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      credential_rotate:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      credential_cleanup:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n  probes:\n    api:\n      api:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 15\n            periodSeconds: 60\n            timeoutSeconds: 15\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 50\n            periodSeconds: 60\n            timeoutSeconds: 15\njobs:\n  fernet_setup:\n    user: keystone\n    group: keystone\n  fernet_rotate:\n    # NOTE(rk760n): key rotation frequency, token expiration, active keys, and allow_expired_window should statisfy the formula\n    # max_active_keys = ((token_expiration + allow_expired_window) / rotation_frequency) + 2\n    # As expiration is 12h, max_active_keys is 7 and allow_expired_window is 48h by default,\n    # rotation_frequency need to be adjusted\n    # 12 hours\n    cron: \"0 */12 * * *\"\n    user: keystone\n    group: keystone\n    history:\n      success: 3\n      failed: 1\n  credential_setup:\n    user: keystone\n    group: keystone\n  credential_rotate:\n    # monthly\n    cron: \"0 0 1 * *\"\n    migrate_wait: 120\n    user: keystone\n    group: keystone\n    history:\n      success: 3\n      failed: 1\n\nnetwork_policy:\n  keystone:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nconf:\n  security: |\n    #\n    # Disable access to the entire file system except for the directories that\n    # are explicitly allowed later.\n    #\n    # This currently breaks the configurations that come with some web application\n    # Debian packages.\n    #\n    #<Directory />\n    #   AllowOverride None\n    #   Require all denied\n    #</Directory>\n\n    # Changing the following options will not really affect the security of the\n    # server, but might make attacks slightly more difficult in some cases.\n\n    #\n    # ServerTokens\n    # This directive configures what you return as the Server HTTP response\n    # Header. The default is 'Full' which sends information about the OS-Type\n    # and compiled in modules.\n    # Set to one of:  Full | OS | Minimal | Minor | Major | Prod\n    # where Full conveys the most information, and Prod the least.\n    ServerTokens Prod\n\n    #\n    # Optionally add a line containing the server version and virtual host\n    # name to server-generated pages (internal error documents, FTP directory\n    # listings, mod_status and mod_info output etc., but not CGI generated\n    # documents or custom error documents).\n    # Set to \"EMail\" to also include a mailto: link to the ServerAdmin.\n    # Set to one of:  On | Off | EMail\n    ServerSignature Off\n\n    #\n    # Allow TRACE method\n    #\n    # Set to \"extended\" to also reflect the request body (only for testing and\n    # diagnostic purposes).\n    #\n    # Set to one of:  On | Off | extended\n    TraceEnable Off\n\n    #\n    # Forbid access to version control directories\n    #\n    # If you use version control systems in your document root, you should\n    # probably deny access to their directories. For example, for subversion:\n    #\n    #<DirectoryMatch \"/\\.svn\">\n    #   Require all denied\n    #</DirectoryMatch>\n\n    #\n    # Setting this header will prevent MSIE from interpreting files as something\n    # else than declared by the content type in the HTTP headers.\n    # Requires mod_headers to be enabled.\n    #\n    #Header set X-Content-Type-Options: \"nosniff\"\n\n    #\n    # Setting this header will prevent other sites from embedding pages from this\n    # site as frames. This defends against clickjacking attacks.\n    # Requires mod_headers to be enabled.\n    #\n    #Header set X-Frame-Options: \"sameorigin\"\n  software:\n    apache2:\n      binary: apache2\n      start_parameters: -DFOREGROUND\n      site_dir: /etc/apache2/sites-enable\n      conf_dir: /etc/apache2/conf-enabled\n      mods_dir: /etc/apache2/mods-available\n      a2enmod: null\n      a2dismod: null\n  keystone:\n    DEFAULT:\n      log_config_append: /etc/keystone/logging.conf\n      max_token_size: 255\n      # NOTE(rk760n): if you need auth notifications to be sent, uncomment it\n      # notification_opt_out: \"\"\n    token:\n      provider: fernet\n      # 12 hours\n      expiration: 43200\n    identity:\n      domain_specific_drivers_enabled: True\n      domain_config_dir: /etc/keystone/domains\n    fernet_tokens:\n      key_repository: /etc/keystone/fernet-keys/\n      max_active_keys: 7\n    credential:\n      key_repository: /etc/keystone/credential-keys/\n    database:\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    cache:\n      enabled: true\n      backend: dogpile.cache.memcached\n    oslo_messaging_notifications:\n      driver: messagingv2\n    oslo_messaging_rabbit:\n      rabbit_ha_queues: true\n    oslo_middleware:\n      enable_proxy_headers_parsing: true\n    oslo_policy:\n      policy_file: /etc/keystone/policy.yaml\n    oslo_concurrency:\n      lock_path: /var/lock\n    security_compliance:\n      # NOTE(vdrok): The following two options have effect only for SQL backend\n      lockout_failure_attempts: 5\n      lockout_duration: 1800\n  # NOTE(lamt) We can leverage multiple domains with different\n  # configurations as outlined in\n  # https://docs.openstack.org/keystone/pike/admin/identity-domain-specific-config.html.\n  # A sample of the value override can be found in sample file:\n  # tools/overrides/example/keystone_domain_config.yaml\n  # ks_domains:\n  policy: {}\n  access_rules: {}\n  rabbitmq:\n    # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones\n    policies:\n      - vhost: \"keystone\"\n        name: \"ha_ttl_keystone\"\n        definition:\n          # mirror messges to other nodes in rmq cluster\n          ha-mode: \"all\"\n          ha-sync-mode: \"automatic\"\n          # 70s\n          message-ttl: 70000\n        priority: 0\n        apply-to: all\n        pattern: '^(?!(amq\\.|reply_)).*'\n  rally_tests:\n    run_tempest: false\n    tests:\n      KeystoneBasic.add_and_remove_user_role:\n        - runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      KeystoneBasic.authenticate_user_and_validate_token:\n        - args: {}\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      KeystoneBasic.create_add_and_list_user_roles:\n        - runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      KeystoneBasic.create_and_delete_ec2credential:\n        - runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      KeystoneBasic.create_and_list_ec2credentials:\n        - runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      KeystoneBasic.create_and_delete_role:\n        - runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      KeystoneBasic.create_and_delete_service:\n        - args:\n            description: test_description\n            service_type: Rally_test_type\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      KeystoneBasic.create_and_get_role:\n        - args: {}\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      KeystoneBasic.create_and_list_services:\n        - args:\n            description: test_description\n            service_type: Rally_test_type\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      KeystoneBasic.create_and_list_tenants:\n        - args: {}\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      KeystoneBasic.create_and_list_users:\n        - args: {}\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      KeystoneBasic.create_delete_user:\n        - args: {}\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      KeystoneBasic.create_tenant:\n        - args: {}\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      KeystoneBasic.create_tenant_with_users:\n        - args:\n            users_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      KeystoneBasic.create_update_and_delete_tenant:\n        - args: {}\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      KeystoneBasic.create_user:\n        - args: {}\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      KeystoneBasic.create_user_set_enabled_and_delete:\n        - args:\n            enabled: true\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n        - args:\n            enabled: false\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      KeystoneBasic.create_user_update_password:\n        - args: {}\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      KeystoneBasic.get_entities:\n        - runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n  mpm_event: |\n    <IfModule mpm_event_module>\n      ServerLimit         1024\n      StartServers        32\n      MinSpareThreads     32\n      MaxSpareThreads     256\n      ThreadsPerChild     25\n      MaxRequestsPerChild 128\n      ThreadLimit         720\n    </IfModule>\n  # -- WSGIScriptAlias for apache2. Copied from keystone/wsgi/api.py\n  ## apache cannot load a module and the path can change depending on python version\n  wsgi_script: |\n    import threading\n\n    from keystone.server import wsgi\n\n    application = None\n    lock = threading.Lock()\n    with lock:\n        if application is None:\n            application = wsgi.initialize_public_application()\n  wsgi_keystone: |\n    {{- $portInt := tuple \"identity\" \"service\" \"api\" $ | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    Listen 0.0.0.0:{{ $portInt }}\n\n    LogFormat \"%h %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" combined\n    LogFormat \"%{X-Forwarded-For}i %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" proxy\n\n    SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n    CustomLog /dev/stdout combined env=!forwarded\n    CustomLog /dev/stdout proxy env=forwarded\n    ErrorLogFormat \"%{cu}t %M\"\n    ErrorLog /dev/stdout\n\n    <VirtualHost *:{{ $portInt }}>\n        WSGIDaemonProcess keystone-public processes=1 threads=1 user=keystone group=keystone display-name=%{GROUP}\n        WSGIProcessGroup keystone-public\n        WSGIScriptAlias / /var/www/cgi-bin/keystone/wsgi.py\n        WSGIApplicationGroup %{GLOBAL}\n        WSGIPassAuthorization On\n        ErrorLogFormat \"%{cu}t %M\"\n        ErrorLog /dev/stdout\n\n        SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n        CustomLog /dev/stdout combined env=!forwarded\n        CustomLog /dev/stdout proxy env=forwarded\n    </VirtualHost>\n  sso_callback_template: |\n    <!DOCTYPE html>\n    <html xmlns=\"http://www.w3.org/1999/xhtml\">\n      <head>\n        <title>Keystone WebSSO redirect</title>\n      </head>\n      <body>\n         <form id=\"sso\" name=\"sso\" action=\"$host\" method=\"post\">\n           Please wait...\n           <br/>\n           <input type=\"hidden\" name=\"token\" id=\"token\" value=\"$token\"/>\n           <noscript>\n             <input type=\"submit\" name=\"submit_no_javascript\" id=\"submit_no_javascript\"\n                value=\"If your JavaScript is disabled, please click to continue\"/>\n           </noscript>\n         </form>\n         <script type=\"text/javascript\">\n           window.onload = function() {\n             document.forms['sso'].submit();\n           }\n         </script>\n      </body>\n    </html>\n  logging:\n    loggers:\n      keys:\n        - root\n        - keystone\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: WARNING\n      handlers: 'null'\n    logger_keystone:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: keystone\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    formatter_default:\n      format: \"%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n\n# Names of secrets used by bootstrap and environmental checks\nsecrets:\n  identity:\n    admin: keystone-keystone-admin\n    test: keystone-keystone-test\n  oslo_db:\n    admin: keystone-db-admin\n    keystone: keystone-db-user\n  oslo_messaging:\n    admin: keystone-rabbitmq-admin\n    keystone: keystone-rabbitmq-user\n  ldap:\n    tls: keystone-ldap-tls\n  tls:\n    identity:\n      api:\n        public: keystone-tls-public\n        internal: keystone-tls-api\n  oci_image_registry:\n    keystone: keystone-oci-image-registry\n\n# typically overridden by environmental\n# values, but should include all endpoints\n# required by this chart\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      keystone:\n        username: keystone\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  identity:\n    namespace: null\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n        default_domain_id: default\n      test:\n        role: admin\n        region_name: RegionOne\n        username: keystone-test\n        password: password\n        project_name: test\n        user_domain_name: default\n        project_domain_name: default\n        default_domain_id: default\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n      # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public\n      # endpoints using the following format:\n      # public:\n      #   host: null\n      #   tls:\n      #     crt: null\n      #     key: null\n    path:\n      default: /v3\n      healthcheck: /healthcheck\n    scheme:\n      default: http\n      service: http\n    port:\n      api:\n        default: 80\n        # NOTE(portdirect): to retain portability across images, and allow\n        # running under a unprivileged user simply, we default to a port > 1000.\n        internal: 5000\n        service: 5000\n  oslo_db:\n    namespace: null\n    auth:\n      admin:\n        username: root\n        password: password\n        secret:\n          tls:\n            internal: mariadb-tls-direct\n      keystone:\n        username: keystone\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /keystone\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_messaging:\n    namespace: null\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n        secret:\n          tls:\n            internal: rabbitmq-tls-direct\n      keystone:\n        username: keystone\n        password: password\n    statefulset:\n      replicas: 2\n      name: rabbitmq-rabbitmq\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /keystone\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n  oslo_cache:\n    namespace: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  ldap:\n    auth:\n      client:\n        tls:\n          # NOTE(lamt): Specify a CA value here will place a LDAPS certificate at\n          # /etc/certs/tls.ca.  To ensure keystone uses LDAPS, the\n          # following key will need to be overrided under section [ldap] or the\n          # correct domain-specific setting, else it will not be enabled:\n          #\n          #   use_tls: true\n          #   tls_req_cert: allow # Valid values: demand, never, allow\n          #   tls_cacertfile: /etc/certs/tls.ca # abs path to the CA cert\n          ca: null\n  fluentd:\n    namespace: null\n    name: fluentd\n    hosts:\n      default: fluentd-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: 'http'\n    port:\n      service:\n        default: 24224\n      metrics:\n        default: 24220\n  # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress\n  # They are using to enable the Egress K8s network policy.\n  kube_dns:\n    namespace: kube-system\n    name: kubernetes-dns\n    hosts:\n      default: kube-dns\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: http\n    port:\n      dns:\n        default: 53\n        protocol: UDP\n  ingress:\n    namespace: null\n    name: ingress\n    hosts:\n      default: ingress\n    port:\n      ingress:\n        default: 80\n\ntls:\n  identity: false\n  oslo_messaging: false\n  oslo_db: false\n\nmanifests:\n  certificates: false\n  configmap_bin: true\n  configmap_etc: true\n  cron_credential_rotate: true\n  cron_fernet_rotate: true\n  deployment_api: true\n  ingress_api: true\n  job_bootstrap: true\n  job_credential_cleanup: true\n  job_credential_setup: true\n  job_db_init: true\n  job_db_sync: true\n  job_db_drop: false\n  job_domain_manage: true\n  job_fernet_setup: true\n  job_image_repo_sync: true\n  job_rabbit_init: true\n  pdb_api: true\n  pod_rally_test: true\n  network_policy: false\n  secret_credential_keys: true\n  secret_db: true\n  secret_fernet_keys: true\n  secret_ingress_tls: true\n  secret_keystone: true\n  secret_rabbitmq: true\n  secret_registry: true\n  service_ingress_api: true\n  service_api: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "kibana/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v8.19.9\ndescription: OpenStack-Helm Kibana\nname: kibana\nversion: 2025.2.0\nhome: https://www.elastic.co/products/kibana\nsources:\n  - https://github.com/elastic/kibana\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "kibana/templates/bin/_apache.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ev\n\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n\n  if [ -f /etc/apache2/envvars ]; then\n     # Loading Apache2 ENV variables\n     source /etc/httpd/apache2/envvars\n  fi\n  # Apache gets grumpy about PID files pre-existing\n  rm -f /etc/httpd/logs/httpd.pid\n\n  if [ -f /usr/local/apache2/conf/.htpasswd ]; then\n    htpasswd -b /usr/local/apache2/conf/.htpasswd \"$ELASTICSEARCH_USERNAME\" \"$ELASTICSEARCH_PASSWORD\"\n  else\n    htpasswd -cb /usr/local/apache2/conf/.htpasswd \"$ELASTICSEARCH_USERNAME\" \"$ELASTICSEARCH_PASSWORD\"\n  fi\n\n  #Launch Apache on Foreground\n  exec httpd -DFOREGROUND\n}\n\nfunction stop () {\n  apachectl -k graceful-stop\n}\n\n$COMMAND\n"
  },
  {
    "path": "kibana/templates/bin/_create_kibana_index_patterns.sh.tpl",
    "content": "#!/bin/bash\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\nset -ex\nset -o noglob\n\ncreate_data_view() {\n  local index_name=$1\n  curl -u \"${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}\" \\\n    --max-time 30 \\\n    -X POST \"${KIBANA_ENDPOINT}/api/data_views/data_view\" \\\n    -H \"kbn-xsrf: true\" \\\n    -H \"Content-Type: application/json\" \\\n    -d \"{\n      \\\"data_view\\\": {\n        \\\"title\\\": \\\"${index_name}-*\\\",\n        \\\"timeFieldName\\\": \\\"@timestamp\\\"\n      }\n    }\"\n}\n\ndata_view_exists() {\n  local index_name=$1\n  local response=$(curl -s -u \"${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}\" \\\n    --max-time 30 \\\n    -X GET \"${KIBANA_ENDPOINT}/api/data_views\" \\\n    -H \"kbn-xsrf: true\" \\\n    -H \"Content-Type: application/json\")\n\n  if echo \"$response\" | grep -Fq \"\\\"title\\\":\\\"${index_name}-*\\\"\"; then\n    return 0\n  fi\n  return 1\n}\n\nset_default_data_view() {\n  local view_id=$1\n  curl -u \"${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}\" \\\n    --max-time 30 \\\n    -X POST \"${KIBANA_ENDPOINT}/api/data_views/default\" \\\n    -H \"kbn-xsrf: true\" \\\n    -H \"Content-Type: application/json\" \\\n    -d \"{\n      \\\"data_view_id\\\": \\\"${view_id}\\\",\n      \\\"force\\\": true\n    }\"\n}\n\nfind_and_set_python() {\n  pythons=\"python3 python python2\"\n  for p in ${pythons[@]}; do\n    python=$(which ${p})\n    if [[ $? -eq 0 ]]; then\n      echo found python: ${python}\n      break\n    fi\n  done\n}\n\nget_view_id() {\n  local index_name=$1\n  local response=$(curl -s -u \"${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}\" \\\n    --max-time 30 \\\n    -X GET \"${KIBANA_ENDPOINT}/api/data_views\" \\\n    -H \"kbn-xsrf: true\" \\\n    -H \"Content-Type: application/json\" |\n    $python -c \"import sys,json; j=json.load(sys.stdin); t=[x['id'] for x in j['data_view'] if x['title'] == '${index_name}-*']; print(t[0] if len(t) else '')\"\n    )\n  echo $response\n}\n\n# Create data views\n{{- range $objectType, $indices := .Values.conf.create_kibana_indexes.indexes }}\n{{- range $indices }}\nif ! data_view_exists \"{{ . }}\"; then\n  create_data_view \"{{ . }}\"\n  for t in 30 60 120 180; do\n    if data_view_exists \"{{ . }}\"; then\n      echo \"Data view '{{ . }}-*' exists\"\n      break\n    fi\n    sleep $t\n    echo \"Retrying creation of data view '{{ . }}-*' ...\"\n    create_data_view \"{{ . }}\"\n  done\n  if ! data_view_exists \"{{ . }}\"; then\n    echo \"Giving up\"\n    return 1\n  fi\nelse\n  echo \"Data view '{{ . }}-*' exists\"\nfi\n\n{{- end }}\n{{- end }}\n\n# Lookup default view id.  The new Kibana view API requires the id\n# instead of simply the name like the previous index API did.\nfind_and_set_python\n\ndefault_index=\"{{ .Values.conf.create_kibana_indexes.default_index }}\"\ndefault_index_id=$(get_view_id $default_index)\n\nset_default_data_view \"$default_index_id\"\necho \"Default data view set to '${default_index}'.\"\n"
  },
  {
    "path": "kibana/templates/bin/_flush_kibana_metadata.sh.tpl",
    "content": "#!/bin/bash\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\nset -ex\necho \"Deleting index created for metadata\"\n\ncurl ${CACERT_OPTION} -K- <<< \"--user ${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}\" \\\n  -XDELETE \"${ELASTICSEARCH_ENDPOINT}/.kibana*\"\n"
  },
  {
    "path": "kibana/templates/bin/_kibana.sh.tpl",
    "content": "#!/bin/bash\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -e\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec /usr/share/kibana/bin/kibana \\\n    --elasticsearch.hosts=\"${ELASTICSEARCH_HOSTS}\" \\\n    --elasticsearch.username=\"${ELASTICSEARCH_USERNAME}\" \\\n    --elasticsearch.password=\"${ELASTICSEARCH_PASSWORD}\"\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "kibana/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: kibana-bin\ndata:\n  apache.sh: |\n{{ tuple \"bin/_apache.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  kibana.sh: |\n{{ tuple \"bin/_kibana.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  create_kibana_index_patterns.sh: |\n{{ tuple \"bin/_create_kibana_index_patterns.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  flush_kibana_metadata.sh: |\n{{ tuple \"bin/_flush_kibana_metadata.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "kibana/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: kibana-etc\ntype: Opaque\ndata:\n  kibana.yml: {{ toYaml .Values.conf.kibana | b64enc }}\n  # NOTE(portdirect): this must be last, to work round helm ~2.7 bug.\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.httpd \"key\" \"httpd.conf\" \"format\" \"Secret\") | indent 2 }}\n{{- end }}\n"
  },
  {
    "path": "kibana/templates/deployment.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"kibanaProbeTemplate\" }}\n{{- $kibanaPort := tuple \"kibana\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $esUser := .Values.endpoints.elasticsearch.auth.admin.username }}\n{{- $esPass := .Values.endpoints.elasticsearch.auth.admin.password }}\n{{- $authHeader := printf \"%s:%s\" $esUser $esPass | b64enc }}\nhttpGet:\n  path: /status\n  port: {{ $kibanaPort }}\n  httpHeaders:\n    - name: Authorization\n      value: Basic {{ $authHeader }}\n{{- end }}\n\n{{- if .Values.manifests.deployment }}\n{{- $envAll := . }}\n\n{{- $esUserSecret := .Values.secrets.elasticsearch.user }}\n{{- $esUser := .Values.endpoints.elasticsearch.auth.admin.username }}\n{{- $esPass := .Values.endpoints.elasticsearch.auth.admin.password }}\n{{- $authHeader := printf \"%s:%s\" $esUser $esPass | b64enc }}\n\n{{- $esScheme := tuple \"elasticsearch\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" }}\n{{- $esSvc := tuple \"elasticsearch\" \"default\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\n{{- $esHosts := printf \"%s://%s\" $esScheme $esSvc }}\n\n{{- $serviceAccountName := \"kibana\" }}\n{{ tuple $envAll \"kibana\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $kibanaPort := tuple \"kibana\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: kibana\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"kibana\" \"dashboard\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.kibana }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"kibana\" \"dashboard\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"kibana\" \"dashboard\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"kibana\" \"containerNames\" (list \"apache-proxy\" \"kibana\" \"init\")  | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"dashboard\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"kibana\" \"dashboard\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.kibana.node_selector_key }}: {{ .Values.labels.kibana.node_selector_value | quote }}\n      initContainers:\n{{ tuple $envAll \"kibana\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: apache-proxy\n{{ tuple $envAll \"apache_proxy\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.apache_proxy | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"dashboard\" \"container\" \"apache_proxy\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/apache.sh\n            - start\n          ports:\n            - name: http\n              containerPort: {{ $kibanaPort }}\n          readinessProbe:\n            tcpSocket:\n              port: {{ $kibanaPort }}\n            initialDelaySeconds: 20\n            periodSeconds: 30\n          env:\n            - name: ELASTICSEARCH_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_USERNAME\n            - name: ELASTICSEARCH_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_PASSWORD\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: kibana-bin\n              mountPath: /tmp/apache.sh\n              subPath: apache.sh\n              readOnly: true\n            - name: kibana-etc\n              mountPath: /usr/local/apache2/conf/httpd.conf\n              subPath: httpd.conf\n              readOnly: true\n        - name: kibana\n{{ tuple $envAll \"kibana\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.kibana | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"dashboard\" \"container\" \"kibana\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/kibana.sh\n            - start\n          ports:\n            - name: kibana\n              containerPort: {{ tuple \"kibana\" \"internal\" \"kibana\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{ dict \"envAll\" . \"component\" \"kibana\" \"container\" \"kibana\" \"type\" \"liveness\" \"probeTemplate\" (include \"kibanaProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" . \"component\" \"kibana\" \"container\" \"kibana\" \"type\" \"readiness\" \"probeTemplate\" (include \"kibanaProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          env:\n            - name: ELASTICSEARCH_HOSTS\n              value: {{ $esHosts }}\n            - name: ELASTICSEARCH_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_USERNAME\n            - name: ELASTICSEARCH_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_PASSWORD\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-run\n              mountPath: /run\n            - name: kibana-bin\n              mountPath: /tmp/kibana.sh\n              subPath: kibana.sh\n              readOnly: true\n            - name: pod-etc-kibana\n              mountPath: /usr/share/kibana/config\n            - name: pod-optimize-kibana\n              mountPath: /usr/share/kibana/optimize\n            - name: kibana-etc\n              mountPath: /usr/share/kibana/config/kibana.yml\n              subPath: kibana.yml\n              readOnly: true\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.elasticsearch.auth.admin.secret.tls.internal \"path\" \"/etc/elasticsearch/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-run\n          emptyDir:\n            medium: \"Memory\"\n        - name: pod-etc-kibana\n          emptyDir: {}\n        - name: pod-optimize-kibana\n          emptyDir: {}\n        - name: kibana-bin\n          configMap:\n            name: kibana-bin\n            defaultMode: 0555\n        - name: kibana-etc\n          secret:\n            secretName: kibana-etc\n            defaultMode: 0444\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.elasticsearch.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "kibana/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "kibana/templates/ingress-kibana.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n\n{{- if and .Values.manifests.ingress .Values.network.kibana.ingress.public }}\n{{- $envAll := . -}}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendService\" \"kibana\" \"backendServiceType\" \"kibana\" \"backendPort\" \"http\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $ingressOpts \"certIssuer\" .Values.endpoints.kibana.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "kibana/templates/job-flush-kibana-metadata.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nThis hook is enabled for post-delete and pre-upgrade triggers.\nThe indices deleted by this hook are Kibana's meta indices\n  - .kibana\n  - .kibana_1\n  - .kibana_2\n  etc\n\nThis is done to get around https://github.com/elastic/kibana/issues/58388\nwhich sometimes prevents Kibana deployments from upgrading successfully.\n*/}}\n\n{{- if .Values.manifests.job_flush_kibana_metadata }}\n{{- $envAll := . }}\n{{- $esUserSecret := .Values.secrets.elasticsearch.user }}\n{{- $serviceAccountName := \"flush-kibana-metadata\" }}\n{{ tuple $envAll \"flush_kibana_metadata\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: flush-kibana-metadata\n  labels:\n{{ tuple $envAll \"kibana\" \"flush_kibana_metadata\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  backoffLimit: {{ .Values.jobs.flush_kibana_metadata.backoffLimit }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"kibana\" \"flush_kibana_metadata\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n        \"helm.sh/hook\": pre-install, post-delete, pre-upgrade\n        \"helm.sh/hook-delete-policy\": hook-succeeded\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"flush-kibana-metadata\" \"containerNames\" (list \"flush-kibana-metadata\" \"init\")  | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"flush_kibana_metadata\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      activeDeadlineSeconds: {{ .Values.jobs.flush_kibana_metadata.activeDeadlineSeconds }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"flush_kibana_metadata\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n      containers:\n        - name: flush-kibana-metadata\n{{ tuple $envAll \"flush_kibana_metadata\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.flush_kibana_metadata | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"flush_kibana_metadata\" \"container\" \"flush_kibana_metadata\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: ELASTICSEARCH_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_USERNAME\n            - name: ELASTICSEARCH_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_PASSWORD\n            - name: KIBANA_ENDPOINT\n              value: {{ tuple \"kibana\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n            - name: ELASTICSEARCH_ENDPOINT\n              value: {{ printf \"%s://%s\" (tuple \"elasticsearch\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\") (tuple \"elasticsearch\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\") }}\n{{- if .Values.manifests.certificates }}\n            - name: CACERT_OPTION\n              value: \"--cacert /etc/elasticsearch/certs/ca.crt\"\n{{- end }}\n          command:\n            - /tmp/flush_kibana_metadata.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-run\n              mountPath: /run\n            - name: kibana-bin\n              mountPath: /tmp/flush_kibana_metadata.sh\n              subPath: flush_kibana_metadata.sh\n              readOnly: false\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.elasticsearch.auth.admin.secret.tls.internal \"path\" \"/etc/elasticsearch/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-run\n          emptyDir:\n            medium: \"Memory\"\n        - name: kibana-bin\n          configMap:\n            name: kibana-bin\n            defaultMode: 0755\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.elasticsearch.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "kibana/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"kibana\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "kibana/templates/job-register-kibana-indexes.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_register_kibana_indexes }}\n{{- $envAll := . }}\n{{- $esUserSecret := .Values.secrets.elasticsearch.user }}\n{{- $serviceAccountName := \"register-kibana-indexes\" }}\n{{ tuple $envAll \"register_kibana_indexes\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: register-kibana-indexes\n  labels:\n{{ tuple $envAll \"kibana\" \"register_kibana_indexes\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"kibana\" \"register_kibana_indexes\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"register-kibana-indexes\" \"containerNames\" (list \"register-kibana-indexes\" \"init\")  | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"register_kibana_indexes\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"register_kibana_indexes\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n      containers:\n        - name: register-kibana-indexes\n{{ tuple $envAll \"register_kibana_indexes\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.register_kibana_indexes | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"register_kibana_indexes\" \"container\" \"register_kibana_indexes\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: ELASTICSEARCH_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_USERNAME\n            - name: ELASTICSEARCH_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $esUserSecret }}\n                  key: ELASTICSEARCH_PASSWORD\n            - name: KIBANA_ENDPOINT\n              value: {{ tuple \"kibana\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n            - name: ELASTICSEARCH_ENDPOINT\n              value: {{ tuple \"elasticsearch\" \"internal\" \"client\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n          command:\n            - /tmp/create_kibana_index_patterns.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-run\n              mountPath: /run\n            - name: kibana-bin\n              mountPath: /tmp/create_kibana_index_patterns.sh\n              subPath: create_kibana_index_patterns.sh\n              readOnly: false\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-run\n          emptyDir:\n            medium: \"Memory\"\n        - name: kibana-bin\n          configMap:\n            name: kibana-bin\n            defaultMode: 0755\n{{- end }}\n"
  },
  {
    "path": "kibana/templates/network_policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"kibana\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "kibana/templates/secret-elasticsearch-creds.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_elasticsearch }}\n{{- $envAll := . }}\n{{- $secretName := index $envAll.Values.secrets.elasticsearch.user }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n  ELASTICSEARCH_USERNAME: {{ .Values.endpoints.elasticsearch.auth.admin.username | b64enc }}\n  ELASTICSEARCH_PASSWORD: {{ .Values.endpoints.elasticsearch.auth.admin.password | b64enc }}\n  BIND_DN: {{ .Values.endpoints.ldap.auth.admin.bind | b64enc }}\n  BIND_PASSWORD: {{ .Values.endpoints.ldap.auth.admin.password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "kibana/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{- include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"kibana\" \"backendService\" \"kibana\" ) }}\n{{- end }}\n"
  },
  {
    "path": "kibana/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "kibana/templates/service-ingress-kibana.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress .Values.network.kibana.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"kibana\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "kibana/templates/service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"kibana\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: http\n    port: {{ tuple \"kibana\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.kibana.node_port.enabled }}\n    nodePort: {{ .Values.network.kibana.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"kibana\" \"dashboard\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.kibana.node_port.enabled }}\n  type: NodePort\n  {{ end }}\n"
  },
  {
    "path": "kibana/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nlabels:\n  kibana:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  tags:\n    apache_proxy: docker.io/library/httpd:2.4\n    kibana: docker.elastic.co/kibana/kibana:8.19.9\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n    register_kibana_indexes: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    flush_kibana_metadata: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\npod:\n  security_context:\n    dashboard:\n      pod:\n        runAsUser: 1000\n      container:\n        apache_proxy:\n          runAsUser: 0\n          readOnlyRootFilesystem: false\n        kibana:\n          runAsNonRoot: true\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: false\n    register_kibana_indexes:\n      pod:\n        runAsUser: 1000\n      container:\n        register_kibana_indexes:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    flush_kibana_metadata:\n      pod:\n        runAsUser: 1000\n      container:\n        flush_kibana_metadata:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  lifecycle:\n    upgrades:\n      deployments:\n        pod_replacement_strategy: RollingUpdate\n        revision_history: 3\n        rolling_update:\n          max_surge: 3\n          max_unavailable: 1\n  replicas:\n    kibana: 1\n  resources:\n    enabled: false\n    apache_proxy:\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n    kibana:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      register_kibana_indexes:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      flush_kibana_metadata:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n  probes:\n    kibana:\n      kibana:\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 180\n            periodSeconds: 60\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 20\n            periodSeconds: 30\nnetwork_policy:\n  kibana:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nsecrets:\n  elasticsearch:\n    user: kibana-elasticsearch-user\n  oci_image_registry:\n    kibana: kibana-oci-image-registry-key\n  tls:\n    kibana:\n      kibana:\n        public: kibana-tls-public\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - kibana-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    kibana:\n      jobs:\n        - flush-kibana-metadata\n      services:\n        - endpoint: internal\n          service: elasticsearch\n    register_kibana_indexes:\n      jobs:\n        - flush-kibana-metadata\n      services:\n        - endpoint: internal\n          service: kibana\n    flush_kibana_metadata:\n      services:\n        - endpoint: internal\n          service: elasticsearch\n\njobs:\n  flush_kibana_metadata:\n    backoffLimit: 6\n    activeDeadlineSeconds: 600\n\nconf:\n  httpd: |\n    ServerRoot \"/usr/local/apache2\"\n\n    Listen 80\n\n    LoadModule mpm_event_module modules/mod_mpm_event.so\n    LoadModule authn_file_module modules/mod_authn_file.so\n    LoadModule authn_core_module modules/mod_authn_core.so\n    LoadModule authz_host_module modules/mod_authz_host.so\n    LoadModule authz_groupfile_module modules/mod_authz_groupfile.so\n    LoadModule authz_user_module modules/mod_authz_user.so\n    LoadModule authz_core_module modules/mod_authz_core.so\n    LoadModule access_compat_module modules/mod_access_compat.so\n    LoadModule auth_basic_module modules/mod_auth_basic.so\n    LoadModule ldap_module modules/mod_ldap.so\n    LoadModule authnz_ldap_module modules/mod_authnz_ldap.so\n    LoadModule reqtimeout_module modules/mod_reqtimeout.so\n    LoadModule filter_module modules/mod_filter.so\n    LoadModule proxy_html_module modules/mod_proxy_html.so\n    LoadModule log_config_module modules/mod_log_config.so\n    LoadModule env_module modules/mod_env.so\n    LoadModule headers_module modules/mod_headers.so\n    LoadModule setenvif_module modules/mod_setenvif.so\n    LoadModule version_module modules/mod_version.so\n    LoadModule proxy_module modules/mod_proxy.so\n    LoadModule proxy_connect_module modules/mod_proxy_connect.so\n    LoadModule proxy_http_module modules/mod_proxy_http.so\n    LoadModule proxy_balancer_module modules/mod_proxy_balancer.so\n    LoadModule remoteip_module modules/mod_remoteip.so\n    LoadModule slotmem_shm_module modules/mod_slotmem_shm.so\n    LoadModule slotmem_plain_module modules/mod_slotmem_plain.so\n    LoadModule unixd_module modules/mod_unixd.so\n    LoadModule status_module modules/mod_status.so\n    LoadModule autoindex_module modules/mod_autoindex.so\n\n    <IfModule unixd_module>\n    User daemon\n    Group daemon\n    </IfModule>\n\n    <Directory />\n        AllowOverride none\n        Require all denied\n    </Directory>\n\n    <Files \".ht*\">\n        Require all denied\n    </Files>\n\n    ErrorLog /dev/stderr\n\n    LogLevel warn\n\n    <IfModule log_config_module>\n        LogFormat \"%a %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" combined\n        LogFormat \"%{X-Forwarded-For}i %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" proxy\n        LogFormat \"%h %l %u %t \\\"%r\\\" %>s %b\" common\n\n        <IfModule logio_module>\n          LogFormat \"%a %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\" %I %O\" combinedio\n        </IfModule>\n\n        SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n        CustomLog /dev/stdout common\n        CustomLog /dev/stdout combined\n        CustomLog /dev/stdout proxy env=forwarded\n    </IfModule>\n\n    <Directory \"/usr/local/apache2/cgi-bin\">\n        AllowOverride None\n        Options None\n        Require all granted\n    </Directory>\n\n    <IfModule headers_module>\n        RequestHeader unset Proxy early\n    </IfModule>\n\n    <IfModule proxy_html_module>\n    Include conf/extra/proxy-html.conf\n    </IfModule>\n\n    <VirtualHost *:80>\n      RemoteIPHeader X-Original-Forwarded-For\n      <Location />\n          ProxyPass http://localhost:{{ tuple \"kibana\" \"internal\" \"kibana\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/\n          ProxyPassReverse http://localhost:{{ tuple \"kibana\" \"internal\" \"kibana\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/\n      </Location>\n      <Proxy *>\n          AuthName \"Kibana\"\n          AuthType Basic\n          AuthBasicProvider file ldap\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          AuthLDAPBindDN {{ .Values.endpoints.ldap.auth.admin.bind }}\n          AuthLDAPBindPassword {{ .Values.endpoints.ldap.auth.admin.password }}\n          AuthLDAPURL {{ tuple \"ldap\" \"default\" \"ldap\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | quote }}\n          Require valid-user\n      </Proxy>\n    </VirtualHost>\n  kibana:\n    elasticsearch:\n      pingTimeout: 1500\n      requestTimeout: 30000\n      shardTimeout: 0\n    ops:\n      interval: 5000\n    server:\n      rewriteBasePath: false\n      host: localhost\n      name: kibana\n      maxPayload: 1048576\n      port: 5601\n      ssl:\n        enabled: false\n  create_kibana_indexes:\n    indexes:\n      base:\n        - logstash\n        - journal\n        - kernel\n      application:\n        - openstack\n    default_index: logstash\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      kibana:\n        username: kibana\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  elasticsearch:\n    name: elasticsearch\n    namespace: null\n    auth:\n      admin:\n        username: admin\n        password: changeme\n        secret:\n          tls:\n            internal: elasticsearch-tls-api\n    hosts:\n      default: elasticsearch-logging\n      public: elasticsearch\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      client:\n        default: 80\n  kibana:\n    name: kibana\n    namespace: null\n    hosts:\n      default: kibana-dash\n      public: kibana\n    host_fqdn_override:\n      default: null\n      # NOTE(srwilkers): this chart supports TLS for fqdn over-ridden public\n      # endpoints using the following format:\n      # public:\n      #   host: null\n      #   tls:\n      #     crt: null\n      #     key: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      kibana:\n        default: 5601\n      http:\n        default: 80\n  ldap:\n    hosts:\n      default: ldap\n    auth:\n      admin:\n        bind: \"cn=admin,dc=cluster,dc=local\"\n        password: password\n    host_fqdn_override:\n      default: null\n    path:\n      default: \"/ou=People,dc=cluster,dc=local\"\n    scheme:\n      default: ldap\n    port:\n      ldap:\n        default: 389\n\nnetwork:\n  kibana:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        nginx.ingress.kubernetes.io/affinity: cookie\n        nginx.ingress.kubernetes.io/session-cookie-name: kube-ingress-session-kibana\n        nginx.ingress.kubernetes.io/session-cookie-hash: sha1\n        nginx.ingress.kubernetes.io/session-cookie-expires: \"600\"\n        nginx.ingress.kubernetes.io/session-cookie-max-age: \"600\"\n        haproxy.org/path-rewrite: /\n        haproxy.org/cookie-persistence: \"kube-ingress-session-kibana\"\n    node_port:\n      enabled: false\n      port: 30905\n    port: 5601\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  deployment: true\n  ingress: true\n  job_image_repo_sync: true\n  network_policy: false\n  secret_elasticsearch: true\n  secret_ingress_tls: true\n  secret_registry: true\n  service: true\n  service_ingress: true\n  job_register_kibana_indexes: true\n  job_flush_kibana_metadata: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "kube-dns/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.14.5\ndescription: OpenStack-Helm Kube-DNS\nname: kube-dns\nversion: 2025.2.0\nhome: https://github.com/coreos/flannel\nicon: https://raw.githubusercontent.com/coreos/flannel/master/logos/flannel-horizontal-color.png\nsources:\n  - https://github.com/coreos/flannel\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "kube-dns/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: kube-dns-bin\ndata:\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "kube-dns/templates/configmap-kube-dns.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_kube_dns }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: kube-dns\n  labels:\n    addonmanager.kubernetes.io/mode: EnsureExists\n{{- end }}\n"
  },
  {
    "path": "kube-dns/templates/deployment-kube-dns.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_kube_dns }}\n{{- $envAll := . }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n    k8s-app: kube-dns\n{{ tuple $envAll \"kubernetes\" \"dns\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  name: kube-dns\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      k8s-app: kube-dns\n{{ tuple $envAll \"kubernetes\" \"dns\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  strategy:\n    rollingUpdate:\n      maxSurge: 10%\n      maxUnavailable: 0\n    type: RollingUpdate\n  template:\n    metadata:\n      labels:\n        k8s-app: kube-dns\n{{ tuple $envAll \"kubernetes\" \"dns\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                  - key: beta.kubernetes.io/arch\n                    operator: In\n                    values:\n                      - amd64\n      containers:\n        - name: kubedns\n{{ tuple $envAll \"kube_dns\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n          args:\n            - --domain={{ .Values.networking.dnsDomain }}.\n            - --dns-port=10053\n            - --config-dir=/kube-dns-config\n            - --v=2\n          env:\n            - name: PROMETHEUS_PORT\n              value: \"10055\"\n          livenessProbe:\n            failureThreshold: 5\n            httpGet:\n              path: /healthcheck/kubedns\n              port: 10054\n              scheme: HTTP\n            initialDelaySeconds: 60\n            periodSeconds: 10\n            successThreshold: 1\n            timeoutSeconds: 5\n          ports:\n            - containerPort: 10053\n              name: dns-local\n              protocol: UDP\n            - containerPort: 10053\n              name: dns-tcp-local\n              protocol: TCP\n            - containerPort: 10055\n              name: metrics\n              protocol: TCP\n          readinessProbe:\n            failureThreshold: 3\n            httpGet:\n              path: /readiness\n              port: 8081\n              scheme: HTTP\n            initialDelaySeconds: 3\n            periodSeconds: 10\n            successThreshold: 1\n            timeoutSeconds: 5\n          resources:\n            limits:\n              memory: 170Mi\n            requests:\n              cpu: 100m\n              memory: 70Mi\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - mountPath: /kube-dns-config\n              name: kube-dns-config\n        - name: dnsmasq\n{{ tuple $envAll \"kube_dns_nanny\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n          args:\n            - -v=2\n            - -logtostderr\n            - -configDir=/etc/k8s/dns/dnsmasq-nanny\n            - -restartDnsmasq=true\n            - --\n            - -k\n            - --cache-size=1000\n            - --log-facility=-\n            - --server=/{{ .Values.networking.dnsDomain }}/127.0.0.1#10053\n            - --server=/in-addr.arpa/127.0.0.1#10053\n            - --server=/ip6.arpa/127.0.0.1#10053\n          livenessProbe:\n            failureThreshold: 5\n            httpGet:\n              path: /healthcheck/dnsmasq\n              port: 10054\n              scheme: HTTP\n            initialDelaySeconds: 60\n            periodSeconds: 10\n            successThreshold: 1\n            timeoutSeconds: 5\n          ports:\n            - containerPort: 53\n              name: dns\n              protocol: UDP\n            - containerPort: 53\n              name: dns-tcp\n              protocol: TCP\n          resources:\n            requests:\n              cpu: 150m\n              memory: 20Mi\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - mountPath: /etc/k8s/dns/dnsmasq-nanny\n              name: kube-dns-config\n        - name: sidecar\n{{ tuple $envAll \"kube_dns_sidecar\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n          args:\n            - --v=2\n            - --logtostderr\n            - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ .Values.networking.dnsDomain }},5,A\n            - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ .Values.networking.dnsDomain }},5,A\n          livenessProbe:\n            failureThreshold: 5\n            httpGet:\n              path: /metrics\n              port: 10054\n              scheme: HTTP\n            initialDelaySeconds: 60\n            periodSeconds: 10\n            successThreshold: 1\n            timeoutSeconds: 5\n          ports:\n            - containerPort: 10054\n              name: metrics\n              protocol: TCP\n          resources:\n            requests:\n              cpu: 10m\n              memory: 20Mi\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n      dnsPolicy: {{ .Values.pod.dns_policy }}\n      restartPolicy: Always\n      schedulerName: default-scheduler\n      securityContext: {}\n      serviceAccount: kube-dns\n      serviceAccountName: kube-dns\n      terminationGracePeriodSeconds: 30\n      tolerations:\n      - key: CriticalAddonsOnly\n        operator: Exists\n      - effect: NoSchedule\n        key: node-role.kubernetes.io/master\n      - effect: NoSchedule\n        key: node-role.kubernetes.io/control-plane\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - configMap:\n            defaultMode: 420\n            name: kube-dns\n            optional: true\n          name: kube-dns-config\n{{- end }}\n"
  },
  {
    "path": "kube-dns/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "kube-dns/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"kube-dns\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "kube-dns/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "kube-dns/templates/service-kube-dns.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_kube_dns }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  labels:\n    k8s-app: kube-dns\n    kubernetes.io/cluster-service: \"true\"\n    kubernetes.io/name: KubeDNS\n{{ tuple $envAll \"kubernetes\" \"dns\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  name: kube-dns\nspec:\n  type: ClusterIP\n  clusterIP: {{ .Values.networking.dnsIP }}\n  sessionAffinity: None\n  ports:\n    - name: dns\n      port: 53\n      protocol: UDP\n      targetPort: 53\n    - name: dns-tcp\n      port: 53\n      protocol: TCP\n      targetPort: 53\n  selector:\n    k8s-app: kube-dns\n{{ tuple $envAll \"kubernetes\" \"dns\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "kube-dns/templates/serviceaccount-kube-dns.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.serviceaccount_kube_dns }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: kube-dns\n  labels:\n    kubernetes.io/cluster-service: \"true\"\n    addonmanager.kubernetes.io/mode: Reconcile\n{{- if $envAll.Values.manifests.secret_registry }}\n{{- if $envAll.Values.endpoints.oci_image_registry.auth.enabled }}\nimagePullSecrets:\n  - name: {{ index $envAll.Values.secrets.oci_image_registry $envAll.Chart.Name }}\n{{- end -}}\n{{- end -}}\n{{- end }}\n"
  },
  {
    "path": "kube-dns/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# https://raw.githubusercontent.com/coreos/flannel/v0.8.0/Documentation/kube-flannel.yml\n\n---\nlabels:\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  tags:\n    kube_dns: registry.k8s.io/k8s-dns-kube-dns-amd64:1.14.5\n    kube_dns_nanny: registry.k8s.io/k8s-dns-dnsmasq-nanny-amd64:1.14.5\n    kube_dns_sidecar: registry.k8s.io/k8s-dns-sidecar-amd64:1.14.5\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\npod:\n  dns_policy: \"Default\"\n  resources:\n    enabled: false\n    jobs:\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nnetworking:\n  dnsDomain: cluster.local\n  dnsIP: 10.96.0.10\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - kube-dns-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    kube_dns:\n      services: null\n\nsecrets:\n  oci_image_registry:\n    kube-dns: kube-dns-oci-image-registry-key\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      kube-dns:\n        username: kube-dns\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n\nmanifests:\n  configmap_bin: true\n  configmap_kube_dns: true\n  deployment_kube_dns: true\n  job_image_repo_sync: true\n  secret_registry: true\n  service_kube_dns: true\n  serviceaccount_kube_dns: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "kubernetes-keystone-webhook/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v0.2.0\ndescription: OpenStack-Helm Kubernetes keystone webhook\nname: kubernetes-keystone-webhook\nversion: 2025.2.0\nhome: https://github.com/kubernetes/cloud-provider-openstack\nsources:\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "kubernetes-keystone-webhook/templates/bin/_kubernetes-keystone-webhook-test.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nTOKEN=\"$(openstack token issue -f value -c id)\"\ncat << EOF | curl -kvs -XPOST -d @- \"${WEBHOOK_URL}\" | python -mjson.tool\n{\n    \"apiVersion\": \"authentication.k8s.io/v1beta1\",\n    \"kind\": \"TokenReview\",\n    \"metadata\": {\n        \"creationTimestamp\": null\n    },\n    \"spec\": {\n        \"token\": \"$TOKEN\"\n    }\n}\nEOF\n"
  },
  {
    "path": "kubernetes-keystone-webhook/templates/bin/_start.sh.tpl",
    "content": "#!/bin/sh\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -xe\n\nexec /bin/k8s-keystone-auth \\\n  --tls-cert-file /opt/kubernetes-keystone-webhook/pki/tls.crt \\\n  --tls-private-key-file /opt/kubernetes-keystone-webhook/pki/tls.key \\\n  --keystone-policy-file /etc/kubernetes-keystone-webhook/policy.json \\\n  --keystone-url {{ tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" }}\n"
  },
  {
    "path": "kubernetes-keystone-webhook/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . -}}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: kubernetes-keystone-webhook-bin\ndata:\n  start.sh: |\n{{ tuple \"bin/_start.sh.tpl\" $envAll | include \"helm-toolkit.utils.template\" | indent 4 }}\n  kubernetes-keystone-webhook-test.sh: |\n{{ tuple \"bin/_kubernetes-keystone-webhook-test.sh.tpl\" $envAll | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "kubernetes-keystone-webhook/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . -}}\n\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: kubernetes-keystone-webhook-etc\ndata:\n  policy.json: |\n{{ toPrettyJson $envAll.Values.conf.policy | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "kubernetes-keystone-webhook/templates/deployment.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment }}\n{{- $envAll := . }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: kubernetes-keystone-webhook\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"kubernetes-keystone-webhook\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ $envAll.Values.pod.replicas.api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"kubernetes-keystone-webhook\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"kubernetes-keystone-webhook\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"kubernetes-keystone-webhook\" \"containerNames\" (list \"kubernetes-keystone-webhook\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"kubernetes_keystone_webhook\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      containers:\n        - name: kubernetes-keystone-webhook\n{{ tuple $envAll \"kubernetes_keystone_webhook\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.server | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"kubernetes_keystone_webhook\" \"container\" \"kubernetes_keystone_webhook\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/start.sh\n          readinessProbe:\n            tcpSocket:\n              port: {{ tuple \"kubernetes_keystone_webhook\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 15\n            periodSeconds: 10\n          ports:\n            - name: k8sksauth-pub\n              containerPort: {{ tuple \"kubernetes_keystone_webhook\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etc-kubernetes-keystone-webhook\n              mountPath: /etc/kubernetes-keystone-webhook\n            - name: key-kubernetes-keystone-webhook\n              mountPath: /opt/kubernetes-keystone-webhook/pki/tls.crt\n              subPath: tls.crt\n              readOnly: true\n            - name: key-kubernetes-keystone-webhook\n              mountPath: /opt/kubernetes-keystone-webhook/pki/tls.key\n              subPath: tls.key\n              readOnly: true\n            - name: kubernetes-keystone-webhook-etc\n              mountPath: /etc/kubernetes-keystone-webhook/policy.json\n              subPath: policy.json\n              readOnly: true\n            - name: kubernetes-keystone-webhook-bin\n              mountPath: /tmp/start.sh\n              subPath: start.sh\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: etc-kubernetes-keystone-webhook\n          emptyDir: {}\n        - name: key-kubernetes-keystone-webhook\n          secret:\n            secretName: {{ $envAll.Values.secrets.certificates.api }}\n            defaultMode: 0444\n        - name: kubernetes-keystone-webhook-etc\n          configMap:\n            name: kubernetes-keystone-webhook-etc\n            defaultMode: 0444\n        - name: kubernetes-keystone-webhook-bin\n          configMap:\n            name: kubernetes-keystone-webhook-bin\n            defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "kubernetes-keystone-webhook/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "kubernetes-keystone-webhook/templates/ingress.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_webhook .Values.network.api.ingress.public }}\n{{- $ingressOpts := dict \"envAll\" . \"backendService\" \"api\" \"backendServiceType\" \"kubernetes_keystone_webhook\" \"backendPort\" \"k8sksauth-pub\" -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "kubernetes-keystone-webhook/templates/pod-test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pod_test }}\n{{- $envAll := . }}\n\n{{- $mounts_kubernetes_keystone_webhook_tests := $envAll.Values.pod.mounts.kubernetes_keystone_webhook_tests.kubernetes_keystone_webhook_tests }}\n{{- $mounts_kubernetes_keystone_webhook_tests_init := $envAll.Values.pod.mounts.kubernetes_keystone_webhook_tests.init_container }}\n\n{{- $serviceAccountName := print $envAll.Release.Name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: \"{{ $envAll.Release.Name }}-test\"\n  annotations:\n    \"helm.sh/hook\": test-success\nspec:\n  serviceAccountName: {{ $serviceAccountName }}\n  nodeSelector:\n    {{ $envAll.Values.labels.test.node_selector_key }}: {{ $envAll.Values.labels.test.node_selector_value | quote }}\n  restartPolicy: Never\n  initContainers:\n{{ tuple $envAll \"tests\" $mounts_kubernetes_keystone_webhook_tests_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n  containers:\n    - name: {{ $envAll.Release.Name }}-kubernetes-keystone-webhook-test\n{{ tuple $envAll \"scripted_test\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n      env:\n        - name: WEBHOOK_URL\n          value: {{ tuple \"kubernetes_keystone_webhook\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | quote }}\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n      command:\n        - /tmp/kubernetes-keystone-webhook-test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: kubernetes-keystone-webhook-bin\n          mountPath: /tmp/kubernetes-keystone-webhook-test.sh\n          subPath: kubernetes-keystone-webhook-test.sh\n          readOnly: true\n{{ if $mounts_kubernetes_keystone_webhook_tests.volumeMounts }}{{ toYaml $mounts_kubernetes_keystone_webhook_tests.volumeMounts | indent 8 }}{{ end }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: kubernetes-keystone-webhook-bin\n      configMap:\n        name: kubernetes-keystone-webhook-bin\n        defaultMode: 0555\n{{ if $mounts_kubernetes_keystone_webhook_tests.volumes }}{{ toYaml $mounts_kubernetes_keystone_webhook_tests.volumes | indent 4 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "kubernetes-keystone-webhook/templates/secret-certificates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_certificates }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $envAll.Values.secrets.certificates.api }}\ntype: kubernetes.io/tls\ndata:\n  tls.crt: {{ $envAll.Values.endpoints.kubernetes.auth.api.tls.crt | default \"\" | b64enc }}\n  tls.key: {{ $envAll.Values.endpoints.kubernetes.auth.api.tls.key | default \"\" | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "kubernetes-keystone-webhook/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "kubernetes-keystone-webhook/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "kubernetes-keystone-webhook/templates/service-ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_api .Values.network.api.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendService\" \"api\" \"backendServiceType\" \"kubernetes_keystone_webhook\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "kubernetes-keystone-webhook/templates/service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"kubernetes_keystone_webhook\" \"internal\" $envAll | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: k8sksauth-pub\n      port: {{ tuple \"kubernetes_keystone_webhook\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"kubernetes-keystone-webhook\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "kubernetes-keystone-webhook/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  tags:\n    kubernetes_keystone_webhook: docker.io/k8scloudprovider/k8s-keystone-auth:v1.19.0\n    scripted_test: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n        nginx.ingress.kubernetes.io/secure-backends: \"true\"\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30601\n\npod:\n  security_context:\n    kubernetes_keystone_webhook:\n      pod:\n        runAsUser: 65534\n      container:\n        kubernetes_keystone_webhook:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  replicas:\n    api: 1\n  resources:\n    enabled: false\n    api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"256Mi\"\n        cpu: \"200m\"\n    jobs:\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"256Mi\"\n          cpu: \"200m\"\n  mounts:\n    kubernetes_keystone_webhook_api:\n      init_container: null\n      kubernetes_keystone_webhook_api: null\n    kubernetes_keystone_webhook_tests:\n      init_container: null\n      kubernetes_keystone_webhook_tests: null\n\nrelease_group: null\n\nconf:\n  policy:\n    - resource:\n        verbs:\n          - \"*\"\n        resources:\n          - \"*\"\n        namespace: \"*\"\n        version: \"*\"\n      match:\n        - type: role\n          values:\n            - admin\n    - resource:\n        verbs:\n          - \"*\"\n        resources:\n          - \"*\"\n        namespace: \"kube-system\"\n        version: \"*\"\n      match:\n        - type: role\n          values:\n            - kube-system-admin\n    - resource:\n        verbs:\n          - get\n          - list\n          - watch\n        resources:\n          - \"*\"\n        namespace: \"kube-system\"\n        version: \"*\"\n      match:\n        - type: role\n          values:\n            - kube-system-viewer\n    - resource:\n        verbs:\n          - \"*\"\n        resources:\n          - \"*\"\n        namespace: \"openstack\"\n        version: \"*\"\n      match:\n        - type: project\n          values:\n            - openstack-system\n    - resource:\n        verbs:\n          - \"*\"\n        resources:\n          - \"*\"\n        namespace: \"*\"\n        version: \"*\"\n      match:\n        - type: role\n          values:\n            - admin_k8cluster\n    - nonresource:\n        verbs:\n          - \"*\"\n        path: \"*\"\n      match:\n        - type: role\n          values:\n            - admin_k8cluster\n    - resource:\n        resources:\n          - pods\n          - pods/attach\n          - pods/exec\n          - pods/portforward\n          - pods/proxy\n          - configmaps\n          - endpoints\n          - persistentvolumeclaims\n          - replicationcontrollers\n          - replicationcontrollers/scale\n          - secrets\n          - serviceaccounts\n          - services\n          - services/proxy\n        verbs:\n          - create\n          - delete\n          - deletecollection\n          - get\n          - list\n          - patch\n          - update\n          - watch\n        namespace: \"*\"\n        version: \"\"\n      match:\n        - type: role\n          values:\n            - admin_k8cluster_editor\n    - resource:\n        resources:\n          - bindings\n          - events\n          - limitranges\n          - namespaces/status\n          - pods/log\n          - pods/status\n          - replicationcontrollers/status\n          - resourcequotas\n          - resourcequotas/status\n          - namespaces\n        verbs:\n          - get\n          - list\n          - watch\n        namespace: \"*\"\n        version: \"\"\n      match:\n        - type: role\n          values:\n            - admin_k8cluster_editor\n    - resource:\n        resources:\n          - serviceaccounts\n        verbs:\n          - impersonate\n        namespace: \"*\"\n        version: \"\"\n      match:\n        - type: role\n          values:\n            - admin_k8cluster_editor\n    - resource:\n        resources:\n          - daemonsets\n          - deployments\n          - deployments/rollback\n          - deployments/scale\n          - replicasets\n          - replicasets/scale\n          - statefulsets\n        verbs:\n          - create\n          - delete\n          - deletecollection\n          - get\n          - list\n          - patch\n          - update\n          - watch\n        namespace: \"*\"\n        version: \"apps\"\n      match:\n        - type: role\n          values:\n            - admin_k8cluster_editor\n    - resource:\n        resources:\n          - horizontalpodautoscalers\n        verbs:\n          - create\n          - delete\n          - deletecollection\n          - get\n          - list\n          - patch\n          - update\n          - watch\n        namespace: \"*\"\n        version: \"autoscaling\"\n      match:\n        - type: role\n          values:\n            - admin_k8cluster_editor\n    - resource:\n        resources:\n          - cronjobs\n          - jobs\n        verbs:\n          - create\n          - delete\n          - deletecollection\n          - get\n          - list\n          - patch\n          - update\n          - watch\n        namespace: \"*\"\n        version: \"batch\"\n      match:\n        - type: role\n          values:\n            - admin_k8cluster_editor\n    - resource:\n        resources:\n          - daemonsets\n          - deployments\n          - deployments/rollback\n          - deployments/scale\n          - ingresses\n          - networkpolicies\n          - replicasets\n          - replicasets/scale\n          - replicationcontrollers/scale\n        verbs:\n          - create\n          - delete\n          - deletecollection\n          - get\n          - list\n          - patch\n          - update\n          - watch\n        namespace: \"*\"\n        version: \"extensions\"\n      match:\n        - type: role\n          values:\n            - admin_k8cluster_editor\n    - resource:\n        resources:\n          - poddisruptionbudgets\n        verbs:\n          - create\n          - delete\n          - deletecollection\n          - get\n          - list\n          - patch\n          - update\n          - watch\n        namespace: \"*\"\n        version: \"policy\"\n      match:\n        - type: role\n          values:\n            - admin_k8cluster_editor\n    - resource:\n        resources:\n          - networkpolicies\n        verbs:\n          - create\n          - delete\n          - deletecollection\n          - get\n          - list\n          - patch\n          - update\n          - watch\n        namespace: \"*\"\n        version: \"networking.k8s.io\"\n      match:\n        - type: role\n          values:\n            - admin_k8cluster_editor\n    - resource:\n        resources:\n          - configmaps\n          - endpoints\n          - persistentvolumeclaims\n          - pods\n          - replicationcontrollers\n          - replicationcontrollers/scale\n          - serviceaccounts\n          - services\n          - bindings\n          - events\n          - limitranges\n          - namespaces/status\n          - pods/log\n          - pods/status\n          - replicationcontrollers/status\n          - resourcequotas\n          - resourcequotas/status\n          - namespaces\n        verbs:\n          - get\n          - list\n          - watch\n        namespace: \"*\"\n        version: \"\"\n      match:\n        - type: role\n          values:\n            - admin_k8cluster_viewer\n    - resource:\n        resources:\n          - daemonsets\n          - deployments\n          - deployments/scale\n          - replicasets\n          - replicasets/scale\n          - statefulsets\n        verbs:\n          - get\n          - list\n          - watch\n        namespace: \"*\"\n        version: \"apps\"\n      match:\n        - type: role\n          values:\n            - admin_k8cluster_viewer\n    - resource:\n        resources:\n          - horizontalpodautoscalers\n        verbs:\n          - get\n          - list\n          - watch\n        namespace: \"*\"\n        version: \"autoscaling\"\n      match:\n        - type: role\n          values:\n            - admin_k8cluster_viewer\n    - resource:\n        resources:\n          - cronjobs\n          - jobs\n        verbs:\n          - get\n          - list\n          - watch\n        namespace: \"*\"\n        version: \"batch\"\n      match:\n        - type: role\n          values:\n            - admin_k8cluster_viewer\n    - resource:\n        resources:\n          - daemonsets\n          - deployments\n          - deployments/scale\n          - ingresses\n          - networkpolicies\n          - replicasets\n          - replicasets/scale\n          - replicationcontrollers/scale\n        verbs:\n          - get\n          - list\n          - watch\n        namespace: \"*\"\n        version: \"extensions\"\n      match:\n        - type: role\n          values:\n            - admin_k8cluster_viewer\n    - resource:\n        resources:\n          - poddisruptionbudgets\n        verbs:\n          - get\n          - list\n          - watch\n        namespace: \"*\"\n        version: \"policy\"\n      match:\n        - type: role\n          values:\n            - admin_k8cluster_viewer\n    - resource:\n        resources:\n          - networkpolicies\n        verbs:\n          - get\n          - list\n          - watch\n        namespace: \"*\"\n        version: \"networking.k8s.io\"\n      match:\n        - type: role\n          values:\n            - admin_k8cluster_viewer\n\nsecrets:\n  identity:\n    admin: kubernetes-keystone-webhook-admin\n  certificates:\n    api: kubernetes-keystone-webhook-certs\n  oci_image_registry:\n    kubernetes-keystone-webhook: kubernetes-keystone-webhook-oci-image-registry-key\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      kubernetes-keystone-webhook:\n        username: kubernetes-keystone-webhook\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  kubernetes:\n    auth:\n      api:\n        tls:\n          crt: null\n          key: null\n  identity:\n    name: keystone\n    namespace: null\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  kubernetes_keystone_webhook:\n    namespace: null\n    name: k8sksauth\n    hosts:\n      default: k8sksauth-api\n      public: k8sksauth\n    host_fqdn_override:\n      default: null\n    path:\n      default: /webhook\n    scheme:\n      default: https\n    port:\n      api:\n        default: 8443\n        public: 443\n\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - k8sksauth-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    api:\n      jobs: null\n      services: null\n\nmanifests:\n  api_secret: true\n  configmap_etc: true\n  configmap_bin: true\n  deployment: true\n  ingress_webhook: true\n  pod_test: true\n  secret_certificates: true\n  secret_keystone: true\n  secret_registry: true\n  service_ingress_api: true\n  service: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "kubernetes-node-problem-detector/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Kubernetes Node Problem Detector\nname: kubernetes-node-problem-detector\nversion: 2025.2.0\nhome: https://github.com/kubernetes/node-problem-detector\nsources:\n  - https://github.com/kubernetes/node-problem-detector\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "kubernetes-node-problem-detector/templates/bin/_node-problem-detector.sh.tpl",
    "content": "#!/bin/sh\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec /opt/node-problem-detector/bin/node-problem-detector \\\n  {{- range $monitor, $monitorConfig := .Values.conf.monitors }}\n  {{- if $monitorConfig.enabled }}\n  --config.{{$monitor}}={{ include \"helm-toolkit.utils.joinListWithComma\" $monitorConfig.enabled }} \\\n  {{- end }}\n  {{- end }}\n  --logtostderr \\\n  --prometheus-address=0.0.0.0\n"
  },
  {
    "path": "kubernetes-node-problem-detector/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: node-problem-detector-bin\ndata:\n  node-problem-detector.sh: |\n{{ tuple \"bin/_node-problem-detector.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- range $monitor, $monitorConfig := $envAll.Values.conf.monitors }}\n{{- $scripts := $monitorConfig.scripts }}\n{{- range $script, $scriptSource := $scripts.source }}\n{{- if has $script $scripts.enabled }}\n  {{$script}}: |\n{{$scriptSource | indent 4 -}}\n{{- end }}\n{{- end -}}\n{{- end -}}\n{{- end }}\n"
  },
  {
    "path": "kubernetes-node-problem-detector/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: node-problem-detector-etc\ntype: Opaque\ndata:\n{{- range $monitor, $monitorConfig := $envAll.Values.conf.monitors }}\n{{- $plugins := $monitorConfig.config }}\n{{- range $plugin, $config := $plugins }}\n  {{$plugin}}.json: {{ toJson $config | b64enc }}\n{{- end }}\n{{ end }}\n{{- end }}\n"
  },
  {
    "path": "kubernetes-node-problem-detector/templates/daemonset.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.daemonset }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := printf \"%s-%s\" .Release.Name \"node-problem-detector\" }}\n{{ tuple $envAll \"node_problem_detector\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: run-node-problem-detector\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ .Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: cluster-admin\n  apiGroup: rbac.authorization.k8s.io\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: node-problem-detector\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"node_problem_detector\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"node_problem_detector\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"node_problem_detector\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"node_problem_detector\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{- if .Values.monitoring.prometheus.pod.enabled }}\n{{- $prometheus_annotations := $envAll.Values.monitoring.prometheus.node_problem_detector }}\n{{ tuple $prometheus_annotations | include \"helm-toolkit.snippets.prometheus_pod_annotations\" | indent 8 }}\n{{- end }}\n{{ dict \"envAll\" $envAll \"podName\" \"node-problem-detector\" \"containerNames\" (list \"node-problem-detector\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"node_problem_detector\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ if .Values.pod.tolerations.node_problem_detector.enabled }}\n{{ tuple $envAll \"node_exporter\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ else }}\n      nodeSelector:\n        {{ .Values.labels.node_problem_detector.node_selector_key }}: {{ .Values.labels.node_problem_detector.node_selector_value | quote }}\n{{ end }}\n      containers:\n        - name: node-problem-detector\n{{ tuple $envAll \"node_problem_detector\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.node_problem_detector | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"node_problem_detector\" \"container\" \"node_problem_detector\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/node-problem-detector.sh\n          ports:\n            - name: metrics\n              containerPort: {{ tuple \"node_problem_detector\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          env:\n          - name: NODE_NAME\n            valueFrom:\n              fieldRef:\n                fieldPath: spec.nodeName\n          volumeMounts:\n            - name: log\n              mountPath: /var/log\n              readOnly: true\n            - name: kmsg\n              mountPath: /dev/kmsg\n              readOnly: true\n            - name: localtime\n              mountPath: /etc/localtime\n              readOnly: true\n            - name: node-problem-detector-bin\n              mountPath: /tmp/node-problem-detector.sh\n              subPath: node-problem-detector.sh\n              readOnly: true\n            {{- range $monitor, $monitorConfig := $envAll.Values.conf.monitors }}\n            {{- $scripts := $monitorConfig.scripts }}\n            {{- range $script, $scriptSource := $scripts.source }}\n            {{- if has $script $scripts.enabled }}\n            - name: node-problem-detector-bin\n              mountPath: /config/plugin/{{$script}}\n              subPath: {{$script}}\n            {{- end }}\n            {{- end }}\n            {{- end }}\n            {{- range $monitor, $monitorConfig := $envAll.Values.conf.monitors }}\n            {{- $plugins := $monitorConfig.config }}\n            {{- range $plugin, $config := $plugins }}\n            - name: node-problem-detector-etc\n              mountPath: /config/{{$plugin}}.json\n              subPath: {{$plugin}}.json\n            {{- end }}\n            {{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: log\n          hostPath:\n            path: /var/log\n        - name: kmsg\n          hostPath:\n            path: /dev/kmsg\n        - name: localtime\n          hostPath:\n            path: /etc/localtime\n        - name: node-problem-detector-etc\n          secret:\n            secretName: node-problem-detector-etc\n            defaultMode: 292\n        - name: node-problem-detector-bin\n          configMap:\n            name: node-problem-detector-bin\n            defaultMode: 365\n{{- end }}\n"
  },
  {
    "path": "kubernetes-node-problem-detector/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "kubernetes-node-problem-detector/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"node-problem-detector\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "kubernetes-node-problem-detector/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "kubernetes-node-problem-detector/templates/service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service }}\n{{- $envAll := . }}\n{{- $prometheus_annotations := $envAll.Values.monitoring.prometheus.node_problem_detector }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"node_problem_detector\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  labels:\n{{ tuple $envAll \"node_problem_detector\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n{{- if .Values.monitoring.prometheus.service.enabled }}\n{{ tuple $prometheus_annotations | include \"helm-toolkit.snippets.prometheus_service_annotations\" | indent 4 }}\n{{- end }}\nspec:\n  type: ClusterIP\n  clusterIP: None\n  ports:\n  - name: metrics\n    port: {{ tuple \"node_problem_detector\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    targetPort: {{ tuple \"node_problem_detector\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"node_problem_detector\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "kubernetes-node-problem-detector/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for node-exporter.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nimages:\n  tags:\n    node_problem_detector: quay.io/airshipit/node-problem-detector:latest-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  node_problem_detector:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nsecrets:\n  oci_image_registry:\n    kubernetes-node-problem-detector: kubernetes-node-problem-detector-oci-image-registry-key\n\npod:\n  security_context:\n    node_problem_detector:\n      pod:\n        runAsUser: 0\n      container:\n        node_problem_detector:\n          readOnlyRootFilesystem: true\n          privileged: true\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n  mounts:\n    node_problem_detector:\n      node_problem_detector:\n      init_container: null\n  lifecycle:\n    upgrades:\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        node_problem_detector:\n          enabled: true\n          min_ready_seconds: 0\n      revision_history: 3\n      pod_replacement_strategy: RollingUpdate\n      rolling_update:\n        max_unavailable: 1\n        max_surge: 3\n    termination_grace_period:\n      node_problem_detector:\n        timeout: 30\n  resources:\n    enabled: false\n    node_problem_detector:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n  tolerations:\n    node_problem_detector:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n      - key: node-role.kubernetes.io/node\n        operator: Exists\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - node-exporter-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    node_problem_detector:\n      services: null\n\nmonitoring:\n  prometheus:\n    pod:\n      enabled: true\n    service:\n      enabled: false\n    node_problem_detector:\n      scrape: true\n      port: 20257\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      kubernetes-node-problem-detector:\n        username: kubernetes-node-problem-detector\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  node_problem_detector:\n    name: node-problem-detector\n    namespace: null\n    hosts:\n      default: node-problem-detector\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    port:\n      metrics:\n        default: 20257\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  daemonset: true\n  job_image_repo_sync: true\n  secret_registry: true\n  service: false\n\nconf:\n  monitors:\n    system-log-monitor:\n      enabled:\n        - /config/kernel-monitor.json\n        - /config/docker-monitor.json\n        - /config/systemd-monitor.json\n      scripts:\n        enabled: null\n        source: null\n      config:\n        kernel-monitor:\n          plugin: kmsg\n          logPath: \"/dev/kmsg\"\n          lookback: 5m\n          bufferSize: 10\n          source: kernel-monitor\n          conditions:\n          - type: KernelDeadlock\n            reason: KernelHasNoDeadlock\n            message: kernel has no deadlock\n          - type: ReadonlyFilesystem\n            reason: FilesystemIsNotReadOnly\n            message: Filesystem is not read-only\n          rules:\n          - type: temporary\n            reason: OOMKilling\n            pattern: Kill process \\d+ (.+) score \\d+ or sacrifice child\\nKilled process \\d+\n              (.+) total-vm:\\d+kB, anon-rss:\\d+kB, file-rss:\\d+kB.*\n          - type: temporary\n            reason: TaskHung\n            pattern: task \\S+:\\w+ blocked for more than \\w+ seconds\\.\n          - type: temporary\n            reason: UnregisterNetDevice\n            pattern: 'unregister_netdevice: waiting for \\w+ to become free. Usage count = \\d+'\n          - type: temporary\n            reason: KernelOops\n            pattern: 'BUG: unable to handle kernel NULL pointer dereference at .*'\n          - type: temporary\n            reason: KernelOops\n            pattern: 'divide error: 0000 \\[#\\d+\\] SMP'\n          - type: permanent\n            condition: KernelDeadlock\n            reason: AUFSUmountHung\n            pattern: task umount\\.aufs:\\w+ blocked for more than \\w+ seconds\\.\n          - type: permanent\n            condition: KernelDeadlock\n            reason: DockerHung\n            pattern: task docker:\\w+ blocked for more than \\w+ seconds\\.\n          - type: permanent\n            condition: ReadonlyFilesystem\n            reason: FilesystemIsReadOnly\n            pattern: Remounting filesystem read-only\n        kernel-monitor-filelog:\n          plugin: filelog\n          pluginConfig:\n            timestamp: \"^.{15}\"\n            message: 'kernel: \\[.*\\] (.*)'\n            timestampFormat: Jan _2 15:04:05\n          logPath: \"/var/log/kern.log\"\n          lookback: 5m\n          bufferSize: 10\n          source: kernel-monitor\n          conditions:\n          - type: KernelDeadlock\n            reason: KernelHasNoDeadlock\n            message: kernel has no deadlock\n          rules:\n          - type: temporary\n            reason: OOMKilling\n            pattern: Kill process \\d+ (.+) score \\d+ or sacrifice child\\nKilled process \\d+\n              (.+) total-vm:\\d+kB, anon-rss:\\d+kB, file-rss:\\d+kB.*\n          - type: temporary\n            reason: TaskHung\n            pattern: task \\S+:\\w+ blocked for more than \\w+ seconds\\.\n          - type: temporary\n            reason: UnregisterNetDevice\n            pattern: 'unregister_netdevice: waiting for \\w+ to become free. Usage count = \\d+'\n          - type: temporary\n            reason: KernelOops\n            pattern: 'BUG: unable to handle kernel NULL pointer dereference at .*'\n          - type: temporary\n            reason: KernelOops\n            pattern: 'divide error: 0000 \\[#\\d+\\] SMP'\n          - type: permanent\n            condition: KernelDeadlock\n            reason: AUFSUmountHung\n            pattern: task umount\\.aufs:\\w+ blocked for more than \\w+ seconds\\.\n          - type: permanent\n            condition: KernelDeadlock\n            reason: DockerHung\n            pattern: task docker:\\w+ blocked for more than \\w+ seconds\\.\n        kernel-monitor-counter:\n          plugin: custom\n          pluginConfig:\n            invoke_interval: 5m\n            timeout: 1m\n            max_output_length: 80\n            concurrency: 1\n          source: kernel-monitor\n          conditions:\n          - type: FrequentUnregisterNetDevice\n            reason: NoFrequentUnregisterNetDevice\n            message: node is functioning properly\n          rules:\n          - type: permanent\n            condition: FrequentUnregisterNetDevice\n            reason: UnregisterNetDevice\n            path: \"/home/kubernetes/bin/log-counter\"\n            args:\n            - \"--journald-source=kernel\"\n            - \"--log-path=/var/log/journal\"\n            - \"--lookback=20m\"\n            - \"--count=3\"\n            - \"--pattern=unregister_netdevice: waiting for \\\\w+ to become free. Usage count\n              = \\\\d+\"\n            timeout: 1m\n        docker-monitor:\n          plugin: journald\n          pluginConfig:\n            source: dockerd\n          logPath: \"/var/log/journal\"\n          lookback: 5m\n          bufferSize: 10\n          source: docker-monitor\n          conditions: []\n          rules:\n          - type: temporary\n            reason: CorruptDockerImage\n            pattern: 'Error trying v2 registry: failed to register layer: rename /var/lib/docker/image/(.+)\n              /var/lib/docker/image/(.+): directory not empty.*'\n        docker-monitor-filelog:\n          plugin: filelog\n          pluginConfig:\n            timestamp: ^time=\"(\\S*)\"\n            message: |-\n              msg=\"([^\n              ]*)\"\n            timestampFormat: '2006-01-02T15:04:05.999999999-07:00'\n          logPath: \"/var/log/docker.log\"\n          lookback: 5m\n          bufferSize: 10\n          source: docker-monitor\n          conditions: []\n          rules:\n          - type: temporary\n            reason: CorruptDockerImage\n            pattern: 'Error trying v2 registry: failed to register layer: rename /var/lib/docker/image/(.+)\n              /var/lib/docker/image/(.+): directory not empty.*'\n        docker-monitor-counter:\n          plugin: custom\n          pluginConfig:\n            invoke_interval: 5m\n            timeout: 1m\n            max_output_length: 80\n            concurrency: 1\n          source: docker-monitor\n          conditions:\n          - type: CorruptDockerOverlay2\n            reason: NoCorruptDockerOverlay2\n            message: docker overlay2 is functioning properly\n          rules:\n          - type: permanent\n            condition: CorruptDockerOverlay2\n            reason: CorruptDockerOverlay2\n            path: \"/home/kubernetes/bin/log-counter\"\n            args:\n            - \"--journald-source=dockerd\"\n            - \"--log-path=/var/log/journal\"\n            - \"--lookback=5m\"\n            - \"--count=10\"\n            - \"--pattern=returned error: readlink /var/lib/docker/overlay2.*: invalid argument.*\"\n            timeout: 1m\n        systemd-monitor:\n          plugin: journald\n          pluginConfig:\n            source: systemd\n          logPath: \"/var/log/journal\"\n          lookback: 5m\n          bufferSize: 10\n          source: systemd-monitor\n          conditions: []\n          rules:\n          - type: temporary\n            reason: KubeletStart\n            pattern: Started Kubernetes kubelet.\n          - type: temporary\n            reason: DockerStart\n            pattern: Starting Docker Application Container Engine...\n          - type: temporary\n            reason: ContainerdStart\n            pattern: Starting containerd container runtime...\n        systemd-monitor-counter:\n          plugin: custom\n          pluginConfig:\n            invoke_interval: 5m\n            timeout: 1m\n            max_output_length: 80\n            concurrency: 1\n          source: systemd-monitor\n          conditions:\n          - type: FrequentKubeletRestart\n            reason: NoFrequentKubeletRestart\n            message: kubelet is functioning properly\n          - type: FrequentDockerRestart\n            reason: NoFrequentDockerRestart\n            message: docker is functioning properly\n          - type: FrequentContainerdRestart\n            reason: NoFrequentContainerdRestart\n            message: containerd is functioning properly\n          rules:\n          - type: permanent\n            condition: FrequentKubeletRestart\n            reason: FrequentKubeletRestart\n            path: \"/home/kubernetes/bin/log-counter\"\n            args:\n            - \"--journald-source=systemd\"\n            - \"--log-path=/var/log/journal\"\n            - \"--lookback=20m\"\n            - \"--delay=5m\"\n            - \"--count=5\"\n            - \"--pattern=Started Kubernetes kubelet.\"\n            timeout: 1m\n          - type: permanent\n            condition: FrequentDockerRestart\n            reason: FrequentDockerRestart\n            path: \"/home/kubernetes/bin/log-counter\"\n            args:\n            - \"--journald-source=systemd\"\n            - \"--log-path=/var/log/journal\"\n            - \"--lookback=20m\"\n            - \"--count=5\"\n            - \"--pattern=Starting Docker Application Container Engine...\"\n            timeout: 1m\n          - type: permanent\n            condition: FrequentContainerdRestart\n            reason: FrequentContainerdRestart\n            path: \"/home/kubernetes/bin/log-counter\"\n            args:\n            - \"--journald-source=systemd\"\n            - \"--log-path=/var/log/journal\"\n            - \"--lookback=20m\"\n            - \"--count=5\"\n            - \"--pattern=Starting containerd container runtime...\"\n            timeout: 1m\n    custom-plugin-monitor:\n      enabled:\n        - /config/network-problem-monitor.json\n      scripts:\n        enabled:\n          - network_problem.sh\n        source:\n          network_problem.sh: |\n            #!/bin/bash\n\n            # This plugin checks for common network issues. Currently, it only checks\n            # if the conntrack table is 50% full.\n            set -eu\n            set -o pipefail\n\n            conntrack_threshold=$(($(cat /proc/sys/net/netfilter/nf_conntrack_max)/2 ))\n            conntrack_count=$(cat /proc/sys/net/netfilter/nf_conntrack_count)\n\n            if [ \"$conntrack_count\" -ge \"$conntrack_threshold\" ]; then\n              echo \"Conntrack table approaching full\"\n              exit 1\n            fi\n\n            exit 0\n      config:\n        network-problem-monitor:\n          plugin: custom\n          pluginConfig:\n            invoke_interval: 30s\n            timeout: 5s\n            max_output_length: 80\n            concurrency: 3\n          source: network-custom-plugin-monitor\n          conditions: []\n          rules:\n          - type: temporary\n            reason: ConntrackFull\n            path: \"./config/plugin/network_problem.sh\"\n            timeout: 3s\n    system-stats-monitor:\n      enabled:\n        - /config/system-stats-monitor.json\n      scripts:\n        enabled: null\n        source: null\n      config:\n        system-stats-monitor:\n          disk:\n            metricsConfigs:\n              disk/io_time:\n                displayName: disk/io_time\n              disk/weighted_io:\n                displayName: disk/weighted_io\n              disk/avg_queue_len:\n                displayName: disk/avg_queue_len\n            includeRootBlk: true\n            includeAllAttachedBlk: true\n            lsblkTimeout: 5s\n          invokeInterval: 60s\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "ldap/.helmignore",
    "content": "# Patterns to ignore when building packages.\n# This supports shell glob matching, relative path matching, and\n# negation (prefixed with !). Only one pattern per line.\n.DS_Store\n# Common VCS dirs\n.git/\n.gitignore\n.bzr/\n.bzrignore\n.hg/\n.hgignore\n.svn/\n# Common backup files\n*.swp\n*.pyc\n*.bak\n*.tmp\n*~\n# Various IDEs\n.project\n.idea/\n*.tmproj\n"
  },
  {
    "path": "ldap/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.2.0\ndescription: OpenStack-Helm LDAP\nname: ldap\nversion: 2025.2.0\nhome: https://www.openldap.org/\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "ldap/templates/_helpers.tpl",
    "content": "{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{- define \"splitdomain\" -}}\n{{- $name := index . 0 -}}\n{{- $local := dict \"first\" true }}\n{{- range $k, $v := splitList \".\" $name }}{{- if not $local.first -}},{{- end -}}dc={{- $v -}}{{- $_ := set $local \"first\" false -}}{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "ldap/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\nset -xe\n\n{{- $url := tuple \"ldap\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\n{{- $port := tuple \"ldap\" \"internal\" \"ldap\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\nLDAPHOST=\"{{ .Values.endpoints.ldap.scheme }}://{{ $url }}:{{ $port }}\"\nADMIN=\"cn={{ .Values.secrets.identity.admin }},{{ tuple .Values.openldap.domain . | include \"splitdomain\" }}\"\nPASSWORD=\"{{ .Values.openldap.password }}\"\n\n# Wait for LDAP server to be ready\nretries=0\nmax_retries=60\nuntil ldapsearch -x -H $LDAPHOST -b \"\" -s base \"(objectclass=*)\" namingContexts 2>/dev/null | grep -q namingContexts; do\n  retries=$((retries + 1))\n  if [ $retries -ge $max_retries ]; then\n    echo \"ERROR: LDAP server not reachable after $max_retries attempts\"\n    exit 1\n  fi\n  echo \"Waiting for LDAP server to be ready... ($retries/$max_retries)\"\n  sleep 5\ndone\n\nldapadd -x -c -D $ADMIN -H $LDAPHOST -w $PASSWORD -f /etc/sample_data.ldif\n"
  },
  {
    "path": "ldap/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- if .Values.manifests.configmap_bin }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: ldap-bin\ndata:\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "ldap/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: ldap-etc\ntype: Opaque\ndata:\n{{- if .Values.bootstrap.enabled }}\n  sample_data.ldif: {{ .Values.data.sample | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "ldap/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "ldap/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $bootstrapJob := dict \"envAll\" . \"serviceName\" \"ldap\" \"configFile\" \"/etc/sample_data.ldif\" \"keystoneUser\" \"admin\" \"openrc\" \"false\" -}}\n{{ $bootstrapJob | include \"helm-toolkit.manifests.job_bootstrap\" }}\n{{- end }}\n"
  },
  {
    "path": "ldap/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"ldap\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "ldap/templates/network_policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"ldap\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "ldap/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "ldap/templates/service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"ldap\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: ldap\n      port: {{ tuple \"ldap\" \"internal\" \"ldap\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"ldap\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "ldap/templates/statefulset.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.statefulset }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"ldap\" }}\n{{ tuple $envAll \"ldap\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  name: ldap\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"ldap\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  serviceName: {{ tuple \"ldap\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  replicas: {{ .Values.pod.replicas.server }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"ldap\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ldap\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"ldap\" \"server\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.server.node_selector_key }}: {{ .Values.labels.server.node_selector_value | quote }}\n      securityContext:\n        fsGroup: 1001\n      initContainers:\n{{ tuple $envAll \"ldap\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 6 }}\n      - name: ldap-perms\n{{ tuple $envAll \"ldap\" | include \"helm-toolkit.snippets.image\" | indent 8 }}\n        securityContext:\n          runAsUser: 0\n        command:\n          - chown\n          - -R\n          - \"1001:1001\"\n          - /openldap/data\n          - /openldap/slapd.d\n        volumeMounts:\n          - name: ldap-data\n            mountPath: /openldap/data\n          - name: ldap-config\n            mountPath: /openldap/slapd.d\n      containers:\n        - name: ldap\n{{ tuple $envAll \"ldap\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.server | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n            - name: LDAP_ROOT\n              value: {{ tuple .Values.openldap.domain . | include \"splitdomain\" }}\n            - name: LDAP_ADMIN_PASSWORD\n              value: {{ .Values.openldap.password }}\n            - name: LDAP_SKIP_DEFAULT_TREE\n              value: \"yes\"\n          ports:\n            - containerPort: {{ tuple \"ldap\" \"internal\" \"ldap\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            tcpSocket:\n              port: {{ tuple \"ldap\" \"internal\" \"ldap\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 5\n            periodSeconds: 10\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ldap-data\n              mountPath: /openldap/data\n            - name: ldap-config\n              mountPath: /openldap/slapd.d\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n{{- if not .Values.storage.pvc.enabled }}\n        - name: ldap-data\n          hostPath:\n            path: {{ .Values.storage.host.data_path }}\n        - name: ldap-config\n          hostPath:\n            path: {{ .Values.storage.host.config_path }}\n{{- else }}\n  volumeClaimTemplates:\n    - metadata:\n        name: ldap-data\n      spec:\n        accessModes: [\"ReadWriteOnce\"]\n        storageClassName: {{ .Values.storage.pvc.class_name }}\n        resources:\n          requests:\n            storage: {{ .Values.storage.pvc.size }}\n    - metadata:\n        name: ldap-config\n      spec:\n        accessModes: [\"ReadWriteOnce\"]\n        storageClassName: {{ .Values.storage.pvc.class_name }}\n        resources:\n          requests:\n            storage: {{ .Values.storage.pvc.size }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "ldap/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for ldap.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\npod:\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  replicas:\n    server: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n  resources:\n    enabled: false\n    server:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n  mounts:\n    ldap_data_load:\n      init_container: null\n      ldap_data_load:\n\nimages:\n  tags:\n    bootstrap: \"symas/openldap:2.6.8-debian-12\"\n    ldap: \"symas/openldap:2.6.8-debian-12\"\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_noble\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - ldap-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    ldap:\n      jobs: null\n    bootstrap:\n      services:\n        - endpoint: internal\n          service: ldap\n    server:\n      jobs:\n        - ldap-load-data\n      services:\n        - endpoint: internal\n          service: ldap\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\nstorage:\n  pvc:\n    enabled: true\n    size: 2Gi\n    class_name: general\n  host:\n    data_path: /data/openstack-helm/ldap\n    config_path: /data/openstack-helm/config\n\nlabels:\n  server:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nbootstrap:\n  enabled: false\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      ldap:\n        username: ldap\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  ldap:\n    hosts:\n      default: ldap\n    host_fqdn_override:\n      default: null\n    path: null\n    scheme: 'ldap'\n    port:\n      ldap:\n        default: 1389\n\nnetwork_policy:\n  ldap:\n    ingress:\n      - {}\n    egress:\n      - {}\n\ndata:\n  sample: |\n    dn: dc=cluster,dc=local\n    objectClass: top\n    objectClass: dcObject\n    objectClass: organization\n    dc: cluster\n    o: cluster\n\n    dn: ou=People,dc=cluster,dc=local\n    objectclass: organizationalunit\n    ou: People\n    description: We the People\n\n    # NOTE: Password is \"password\" without quotes\n    dn: uid=alice,ou=People,dc=cluster,dc=local\n    objectClass: inetOrgPerson\n    objectClass: top\n    objectClass: posixAccount\n    objectClass: shadowAccount\n    objectClass: person\n    sn: Alice\n    cn: alice\n    uid: alice\n    userPassword: {SSHA}+i3t/DLCgLDGaIOAmfeFJ2kDeJWmPUDH\n    description: SHA\n    gidNumber: 1000\n    uidNumber: 1493\n    homeDirectory: /home/alice\n    mail: alice@example.com\n\n    # NOTE: Password is \"password\" without quotes\n    dn: uid=bob,ou=People,dc=cluster,dc=local\n    objectClass: inetOrgPerson\n    objectClass: top\n    objectClass: posixAccount\n    objectClass: shadowAccount\n    objectClass: person\n    sn: Bob\n    cn: bob\n    uid: bob\n    userPassword: {SSHA}fCJ5vuW1BQ4/OfOVkkx1qjwi7yHFuGNB\n    description: MD5\n    gidNumber: 1000\n    uidNumber: 5689\n    homeDirectory: /home/bob\n    mail: bob@example.com\n\n    dn: ou=Groups,dc=cluster,dc=local\n    objectclass: organizationalunit\n    ou: Groups\n    description: We the People\n\n    dn: cn=cryptography,ou=Groups,dc=cluster,dc=local\n    objectclass: top\n    objectclass: posixGroup\n    gidNumber: 418\n    cn: cryptography\n    description: Cryptography Team\n    memberUID: uid=alice,ou=People,dc=cluster,dc=local\n    memberUID: uid=bob,ou=People,dc=cluster,dc=local\n\n    dn: cn=blue,ou=Groups,dc=cluster,dc=local\n    objectclass: top\n    objectclass: posixGroup\n    gidNumber: 419\n    cn: blue\n    description: Blue Team\n    memberUID: uid=bob,ou=People,dc=cluster,dc=local\n\n    dn: cn=red,ou=Groups,dc=cluster,dc=local\n    objectclass: top\n    objectclass: posixGroup\n    gidNumber: 420\n    cn: red\n    description: Red Team\n    memberUID: uid=alice,ou=People,dc=cluster,dc=local\n\nsecrets:\n  identity:\n    admin: admin\n    ldap: ldap\n  oci_image_registry:\n    ldap: ldap-oci-image-registry-key\n\nopenldap:\n  domain: cluster.local\n  password: password\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  job_bootstrap: true\n  job_image_repo_sync: true\n  network_policy: false\n  secret_registry: true\n  statefulset: true\n  service: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "libvirt/.helmignore",
    "content": "values_overrides\n"
  },
  {
    "path": "libvirt/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm libvirt\nname: libvirt\nversion: 2025.2.0\nhome: https://libvirt.org\nsources:\n  - https://libvirt.org/git/?p=libvirt.git;a=summary\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "libvirt/releasenotes/notes/libvirt-339936ca478fbf50.yaml",
    "content": "---\nfeatures:\n  - |\n    Make exporter container args configurable in values to make it possible to use\n    ghcr.io/inovex/prometheus-libvirt-exporter image which assumes\n    having no additional args.\n...\n"
  },
  {
    "path": "libvirt/templates/bin/_ceph-admin-keyring.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport HOME=/tmp\n\ncat > /etc/ceph/ceph.client.admin.keyring << EOF\n[client.admin]\n{{- if .Values.conf.ceph.admin_keyring }}\n    key = {{ .Values.conf.ceph.admin_keyring }}\n{{- else }}\n    key = $(cat /tmp/client-keyring)\n{{- end }}\nEOF\n\nexit 0\n"
  },
  {
    "path": "libvirt/templates/bin/_ceph-keyring.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport HOME=/tmp\n\ncp -fv /etc/ceph/ceph.conf.template /etc/ceph/ceph.conf\n\nKEYRING=/etc/ceph/ceph.client.${CEPH_CINDER_USER}.keyring\n{{- if .Values.conf.ceph.cinder.keyring }}\ncat > ${KEYRING} <<EOF\n[client.{{ .Values.conf.ceph.cinder.user }}]\n    key = {{ .Values.conf.ceph.cinder.keyring }}\nEOF\n{{- else }}\nif ! [ \"x${CEPH_CINDER_USER}\" == \"xadmin\" ]; then\n  #\n  # If user is not client.admin, check if it already exists. If not create\n  # the user. If the cephx user does not exist make sure the caps are set\n  # according to best practices\n  #\n  if USERINFO=$(ceph auth get client.${CEPH_CINDER_USER}); then\n    echo \"Cephx user client.${CEPH_CINDER_USER} already exist\"\n    echo \"Update user client.${CEPH_CINDER_USER} caps\"\n    ceph auth caps client.${CEPH_CINDER_USER} \\\n       mon \"profile rbd\" \\\n       osd \"profile rbd\"\n    ceph auth get client.${CEPH_CINDER_USER} -o ${KEYRING}\n  else\n    echo \"Creating Cephx user client.${CEPH_CINDER_USER}\"\n    ceph auth get-or-create client.${CEPH_CINDER_USER} \\\n      mon \"profile rbd\" \\\n      osd \"profile rbd\" \\\n      -o ${KEYRING}\n  fi\n  rm -f /etc/ceph/ceph.client.admin.keyring\nfi\n{{- end }}\n"
  },
  {
    "path": "libvirt/templates/bin/_libvirt.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n# NOTE(mnaser): This will move the VNC certificates into the expected location.\nif [ -f /tmp/vnc.crt ]; then\n  mkdir -p /etc/pki/libvirt-vnc\n  cp /tmp/vnc.key /etc/pki/libvirt-vnc/server-key.pem\n  cp /tmp/vnc.crt /etc/pki/libvirt-vnc/server-cert.pem\n  cp /tmp/vnc-ca.crt /etc/pki/libvirt-vnc/ca-cert.pem\nfi\n\nif [ -n \"$(cat /proc/*/comm 2>/dev/null | grep -w libvirtd)\" ]; then\n  set +x\n  for proc in $(ls /proc/*/comm 2>/dev/null); do\n    if [ \"x$(cat $proc 2>/dev/null | grep -w libvirtd)\" == \"xlibvirtd\" ]; then\n      set -x\n      libvirtpid=$(echo $proc | cut -f 3 -d '/')\n      echo \"WARNING: libvirtd daemon already running on host\" 1>&2\n      echo \"$(cat \"/proc/${libvirtpid}/status\" 2>/dev/null | grep State)\" 1>&2\n      kill -9 \"$libvirtpid\" || true\n      set +x\n    fi\n  done\n  set -x\nfi\n\nrm -f /var/run/libvirtd.pid\n\nif [[ -c /dev/kvm ]]; then\n    chmod 660 /dev/kvm\n    chown root:kvm /dev/kvm\nfi\n\n#Setup Cgroups to use when breaking out of Kubernetes defined groups\nCGROUPS=\"\"\nfor CGROUP in {{ .Values.conf.kubernetes.cgroup_controllers | include \"helm-toolkit.utils.joinListWithSpace\"  }}; do\n  if [ -d /sys/fs/cgroup/${CGROUP} ] || grep -w $CGROUP /sys/fs/cgroup/cgroup.controllers; then\n    CGROUPS+=\"${CGROUP},\"\n  fi\ndone\ncgcreate -g ${CGROUPS%,}:/osh-libvirt\n\n# We assume that if hugepage count > 0, then hugepages should be exposed to libvirt/qemu\nhp_count=\"$(cat /proc/meminfo | grep HugePages_Total | tr -cd '[:digit:]')\"\nif [ 0\"$hp_count\" -gt 0 ]; then\n\n  echo \"INFO: Detected hugepage count of '$hp_count'. Enabling hugepage settings for libvirt/qemu.\"\n\n  # Enable KVM hugepages for QEMU\n  if [ -n \"$(grep KVM_HUGEPAGES=0 /etc/default/qemu-kvm)\" ]; then\n    sed -i 's/.*KVM_HUGEPAGES=0.*/KVM_HUGEPAGES=1/g' /etc/default/qemu-kvm\n  else\n    echo KVM_HUGEPAGES=1 >> /etc/default/qemu-kvm\n  fi\n\n  # Ensure that the hugepage mount location is available/mapped inside the\n  # container. This assumes use of the default ubuntu dev-hugepages.mount\n  # systemd unit which mounts hugepages at this location.\n  if [ ! -d /dev/hugepages ]; then\n    echo \"ERROR: Hugepages configured in kernel, but libvirtd container cannot access /dev/hugepages\"\n    exit 1\n  fi\nfi\n\nif [ -n \"${LIBVIRT_CEPH_CINDER_SECRET_UUID}\" ] || [ -n \"${LIBVIRT_EXTERNAL_CEPH_CINDER_SECRET_UUID}\" ] ; then\n\n  cgexec -g ${CGROUPS%,}:/osh-libvirt systemd-run --scope --slice=system libvirtd --listen &\n\n  tmpsecret=$(mktemp --suffix .xml)\n  if [ -n \"${LIBVIRT_EXTERNAL_CEPH_CINDER_SECRET_UUID}\" ] ; then\n    tmpsecret2=$(mktemp --suffix .xml)\n  fi\n  function cleanup {\n    rm -f \"${tmpsecret}\"\n    if [ -n \"${LIBVIRT_EXTERNAL_CEPH_CINDER_SECRET_UUID}\" ] ; then\n      rm -f \"${tmpsecret2}\"\n    fi\n  }\n  trap cleanup EXIT\n\n  # Wait for the libvirtd is up\n  TIMEOUT=60\n  while [[ ! -f /var/run/libvirtd.pid ]]; do\n    if [[ ${TIMEOUT} -gt 0 ]]; then\n      let TIMEOUT-=1\n      sleep 1\n    else\n      echo \"ERROR: libvirt did not start in time (pid file missing)\"\n      exit 1\n    fi\n  done\n\n  # Even though we see the pid file the socket immediately (this is\n  # needed for virsh)\n  TIMEOUT=10\n  while [[ ! -e /var/run/libvirt/libvirt-sock ]]; do\n    if [[ ${TIMEOUT} -gt 0 ]]; then\n      let TIMEOUT-=1\n      sleep 1\n    else\n      echo \"ERROR: libvirt did not start in time (socket missing)\"\n      exit 1\n    fi\n  done\n\n  function create_virsh_libvirt_secret {\n    sec_user=$1\n    sec_uuid=$2\n    sec_ceph_keyring=$3\n    cat > ${tmpsecret} <<EOF\n<secret ephemeral='no' private='no'>\n  <uuid>${sec_uuid}</uuid>\n  <usage type='ceph'>\n    <name>client.${sec_user}. secret</name>\n  </usage>\n</secret>\nEOF\n    virsh secret-define --file ${tmpsecret}\n    virsh secret-set-value --secret \"${sec_uuid}\" --base64 \"${sec_ceph_keyring}\"\n  }\n\n  if [ -z \"${CEPH_CINDER_KEYRING}\" ] && [ -n \"${CEPH_CINDER_USER}\" ] ; then\n    CEPH_CINDER_KEYRING=$(awk '/key/{print $3}' /etc/ceph/ceph.client.${CEPH_CINDER_USER}.keyring)\n  fi\n  if [ -n \"${CEPH_CINDER_USER}\" ] ; then\n    create_virsh_libvirt_secret ${CEPH_CINDER_USER} ${LIBVIRT_CEPH_CINDER_SECRET_UUID} ${CEPH_CINDER_KEYRING}\n  fi\n\n  if [ -n \"${LIBVIRT_EXTERNAL_CEPH_CINDER_SECRET_UUID}\" ] ; then\n    EXTERNAL_CEPH_CINDER_KEYRING=$(cat /tmp/external-ceph-client-keyring)\n    create_virsh_libvirt_secret ${EXTERNAL_CEPH_CINDER_USER} ${LIBVIRT_EXTERNAL_CEPH_CINDER_SECRET_UUID} ${EXTERNAL_CEPH_CINDER_KEYRING}\n  fi\n\n  cleanup\n\n  # stop libvirtd; we needed it up to create secrets\n  LIBVIRTD_PID=$(cat /var/run/libvirtd.pid)\n  kill $LIBVIRTD_PID\n  tail --pid=$LIBVIRTD_PID -f /dev/null\n\nfi\n\n# NOTE(vsaienko): changing CGROUP is required as restart of the pod will cause domains restarts\ncgexec -g ${CGROUPS%,}:/osh-libvirt systemd-run --scope --slice=system libvirtd --listen\n"
  },
  {
    "path": "libvirt/templates/configmap-apparmor.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- dict \"envAll\" . \"component\" \"libvirt\" | include \"helm-toolkit.snippets.kubernetes_apparmor_configmap\" }}\n"
  },
  {
    "path": "libvirt/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"libvirt.configmap.bin\" }}\n{{- $configMapName := index . 0 }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ $configMapName }}\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n  libvirt.sh: |\n{{ tuple \"bin/_libvirt.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- if eq .Values.conf.qemu.vnc_tls \"1\" }}\n  cert-init.sh: |\n{{ tpl .Values.conf.vencrypt.cert_init_sh . | indent 4 }}\n{{- end }}\n{{- if .Values.conf.ceph.enabled }}\n  ceph-keyring.sh: |\n{{ tuple \"bin/_ceph-keyring.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ceph-admin-keyring.sh: |\n{{ tuple \"bin/_ceph-admin-keyring.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.init_modules.script \"key\" \"libvirt-init-modules.sh\") | indent 2 }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.dynamic_options.script \"key\" \"init-dynamic-options.sh\") | indent 2 }}\n{{- if .Values.conf.hooks.enabled }}\n{{- range $k, $v := .Values.conf.hooks.scripts }}\n  {{ $k }}: |\n{{ tpl $v . | indent 4 }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- if .Values.manifests.configmap_bin }}\n{{- list \"libvirt-bin\" . | include \"libvirt.configmap.bin\" }}\n{{- end }}\n"
  },
  {
    "path": "libvirt/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"libvirt.configmap.etc\" }}\n{{- $configMapName := index . 0 }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $configMapName }}\ntype: Opaque\ndata:\n  qemu.conf: {{ include \"libvirt.utils.to_libvirt_conf\" .Values.conf.qemu | b64enc }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- list \"libvirt-etc\" . | include \"libvirt.configmap.etc\" }}\n{{- end }}\n"
  },
  {
    "path": "libvirt/templates/daemonset-libvirt.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"libvirtProbeTemplate\" }}\nexec:\n  command:\n    - bash\n    - -c\n    - /usr/bin/virsh connect\n{{- end }}\n\n{{- define \"libvirt.daemonset\" }}\n{{- $daemonset := index . 0 }}\n{{- $configMapName := index . 1 }}\n{{- $serviceAccountName := index . 2 }}\n{{- $envAll := index . 3 }}\n{{- $ssl_enabled := false }}\n{{- if eq $envAll.Values.conf.libvirt.listen_tls \"1\" }}\n{{- $ssl_enabled = true }}\n{{- end }}\n{{- with $envAll }}\n\n{{- $mounts_libvirt := .Values.pod.mounts.libvirt.libvirt }}\n{{- $mounts_libvirt_init := .Values.pod.mounts.libvirt.init_container }}\n\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: libvirt\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll $daemonset | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{- dict \"envAll\" $envAll \"podName\" \"libvirt-libvirt-default\" \"containerNames\" (list \"libvirt\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"libvirt\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      nodeSelector:\n        {{ .Values.labels.agent.libvirt.node_selector_key }}: {{ .Values.labels.agent.libvirt.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.libvirt.enabled }}\n{{ tuple $envAll \"libvirt\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      hostNetwork: true\n      hostPID: true\n      hostIPC: true\n      dnsPolicy: {{ .Values.pod.dns_policy }}\n      initContainers:\n{{ tuple $envAll \"pod_dependency\" $mounts_libvirt_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n{{ dict \"envAll\" $envAll | include \"helm-toolkit.snippets.kubernetes_apparmor_loader_init_container\" | indent 8 }}\n{{- if .Values.conf.init_modules.enabled }}\n        - name: libvirt-init-modules\n{{ tuple $envAll \"libvirt\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"libvirt\" \"container\" \"libvirt_init_modules\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          terminationMessagePath: /var/log/termination-log\n          command:\n            - /tmp/libvirt-init-modules.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etc-modprobe-d\n              mountPath: /etc/modprobe.d_host\n            - name: host-rootfs\n              mountPath: /mnt/host-rootfs\n              mountPropagation: HostToContainer\n              readOnly: true\n            - name: libvirt-bin\n              mountPath: /tmp/libvirt-init-modules.sh\n              subPath: libvirt-init-modules.sh\n              readOnly: true\n{{- end }}\n        - name: init-dynamic-options\n{{ tuple $envAll \"libvirt\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"libvirt\" \"container\" \"init_dynamic_options\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          terminationMessagePath: /var/log/termination-log\n          command:\n            - /tmp/init-dynamic-options.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: libvirt-bin\n              mountPath: /tmp/init-dynamic-options.sh\n              subPath: init-dynamic-options.sh\n              readOnly: true\n{{- if eq .Values.conf.qemu.vnc_tls \"1\" }}\n        - name: cert-init-vnc\n{{ tuple $envAll \"kubectl\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"libvirt\" \"container\" \"cert_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/cert-init.sh\n          env:\n            - name: TYPE\n              value: vnc\n            - name: ISSUER_KIND\n              value: {{ .Values.conf.vencrypt.issuer.kind }}\n            - name: ISSUER_NAME\n              value: {{ .Values.conf.vencrypt.issuer.name }}\n            - name: POD_UID\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.uid\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.name\n            - name: POD_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: POD_IP\n              valueFrom:\n                fieldRef:\n                  fieldPath: status.podIP\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: libvirt-bin\n              mountPath: /tmp/cert-init.sh\n              subPath: cert-init.sh\n              readOnly: true\n{{- end }}\n{{- if .Values.conf.ceph.enabled }}\n        {{- if empty .Values.conf.ceph.cinder.keyring }}\n        - name: ceph-admin-keyring-placement\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"libvirt\" \"container\" \"ceph_admin_keyring_placement\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/ceph-admin-keyring.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: libvirt-bin\n              mountPath: /tmp/ceph-admin-keyring.sh\n              subPath: ceph-admin-keyring.sh\n              readOnly: true\n            {{- if empty .Values.conf.ceph.admin_keyring }}\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n            {{ end }}\n        {{ end }}\n        - name: ceph-keyring-placement\n{{ tuple $envAll \"ceph_config_helper\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"libvirt\" \"container\" \"ceph_keyring_placement\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: CEPH_CINDER_USER\n              value: \"{{ .Values.conf.ceph.cinder.user }}\"\n            {{- if .Values.conf.ceph.cinder.keyring }}\n            - name: CEPH_CINDER_KEYRING\n              value: \"{{ .Values.conf.ceph.cinder.keyring }}\"\n            {{ end }}\n            - name: LIBVIRT_CEPH_CINDER_SECRET_UUID\n              value: \"{{ .Values.conf.ceph.cinder.secret_uuid }}\"\n          command:\n            - /tmp/ceph-keyring.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: libvirt-bin\n              mountPath: /tmp/ceph-keyring.sh\n              subPath: ceph-keyring.sh\n              readOnly: true\n            - name: ceph-etc\n              mountPath: /etc/ceph/ceph.conf.template\n              subPath: ceph.conf\n              readOnly: true\n{{- end }}\n      containers:\n        - name: libvirt\n{{ tuple $envAll \"libvirt\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.libvirt | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"libvirt\" \"container\" \"libvirt\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n          {{- if .Values.conf.ceph.enabled }}\n            - name: CEPH_CINDER_USER\n              value: \"{{ .Values.conf.ceph.cinder.user }}\"\n            {{- if .Values.conf.ceph.cinder.keyring }}\n            - name: CEPH_CINDER_KEYRING\n              value: \"{{ .Values.conf.ceph.cinder.keyring }}\"\n            {{ end }}\n            - name: LIBVIRT_CEPH_CINDER_SECRET_UUID\n              value: \"{{ .Values.conf.ceph.cinder.secret_uuid }}\"\n          {{ end }}\n          {{- if .Values.conf.ceph.cinder.external_ceph.enabled }}\n            - name: EXTERNAL_CEPH_CINDER_USER\n              value: \"{{ .Values.conf.ceph.cinder.external_ceph.user }}\"\n            - name: LIBVIRT_EXTERNAL_CEPH_CINDER_SECRET_UUID\n              value: \"{{ .Values.conf.ceph.cinder.external_ceph.secret_uuid }}\"\n            {{ end }}\n{{ dict \"envAll\" . \"component\" \"libvirt\" \"container\" \"libvirt\" \"type\" \"readiness\" \"probeTemplate\" (include \"libvirtProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" . \"component\" \"libvirt\" \"container\" \"libvirt\" \"type\" \"liveness\" \"probeTemplate\" (include \"libvirtProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          command:\n            - /tmp/libvirt.sh\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - bash\n                  - -c\n                  - |-\n                    kill $(cat /var/run/libvirtd.pid)\n          volumeMounts:\n            {{ dict \"enabled\" $ssl_enabled \"name\" \"ssl-client\" \"path\" \"/etc/pki/libvirt\" \"certs\" (tuple \"clientcert.pem\" \"clientkey.pem\" ) | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n            {{ dict \"enabled\" $ssl_enabled \"name\" \"ssl-server-cert\" \"path\" \"/etc/pki/libvirt\" \"certs\" (tuple \"servercert.pem\" ) | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n            {{ dict \"enabled\" $ssl_enabled \"name\" \"ssl-server-key\" \"path\" \"/etc/pki/libvirt/private\" \"certs\" (tuple \"serverkey.pem\" ) | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n            {{ dict \"enabled\" $ssl_enabled \"name\" \"ssl-ca-cert\" \"path\" \"/etc/pki/CA\" \"certs\" (tuple \"cacert.pem\" ) | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: libvirt-bin\n              mountPath: /tmp/libvirt.sh\n              subPath: libvirt.sh\n              readOnly: true\n            - name: pod-shared\n              mountPath: /etc/libvirt/libvirtd.conf\n              subPath: libvirtd.conf\n              readOnly: true\n            - name: libvirt-etc\n              mountPath: /etc/libvirt/qemu.conf\n              subPath: qemu.conf\n              readOnly: true\n            - name: etc-libvirt-qemu\n              mountPath: /etc/libvirt/qemu\n            - mountPath: /lib/modules\n              name: libmodules\n              readOnly: true\n            - name: var-lib-libvirt\n              mountPath: /var/lib/libvirt\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: Bidirectional\n              {{- end }}\n            - name: var-lib-nova\n              mountPath: /var/lib/nova\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: Bidirectional\n              {{- end }}\n            - name: run\n              mountPath: /run\n            - name: dev\n              mountPath: /dev\n            - name: cgroup\n              mountPath: /sys/fs/cgroup\n            - name: logs\n              mountPath: /var/log/libvirt\n            - name: machine-id\n              mountPath: /etc/machine-id\n              readOnly: true\n            {{- if .Values.conf.ceph.enabled }}\n            - name: etcceph\n              mountPath: /etc/ceph\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: Bidirectional\n              {{- end }}\n            {{- if empty .Values.conf.ceph.cinder.keyring }}\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            {{- if .Values.conf.ceph.cinder.external_ceph.enabled }}\n            - name: external-ceph-keyring\n              mountPath: /tmp/external-ceph-client-keyring\n              subPath: key\n              readOnly: true\n            {{- end }}\n            {{- if .Values.conf.hooks.enabled }}\n            {{- range $k, $v := .Values.conf.hooks.scripts }}\n            - name: libvirt-bin\n              mountPath: /etc/libvirt/hooks/{{ $k }}\n              subPath: {{ $k }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n{{ if $mounts_libvirt.volumeMounts }}{{ toYaml $mounts_libvirt.volumeMounts | indent 12 }}{{ end }}\n        {{- with .Values.libvirt.extraContainers }}\n        {{- tpl (toYaml .) $envAll | nindent 8 }}\n        {{- end }}\n      volumes:\n        {{ dict \"enabled\" $ssl_enabled \"secretName\" $envAll.Values.secrets.tls.client \"name\" \"ssl-client\" \"path\" \"/etc/pki/libvirt\" \"certs\" (tuple \"clientcert.pem\" \"clientkey.pem\" ) | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n        {{ dict \"enabled\" $ssl_enabled \"secretName\" $envAll.Values.secrets.tls.server \"name\" \"ssl-server-cert\" \"path\" \"/etc/pki/libvirt\" \"certs\" (tuple \"servercert.pem\" ) | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n        {{ dict \"enabled\" $ssl_enabled \"secretName\" $envAll.Values.secrets.tls.server \"name\" \"ssl-server-key\" \"path\" \"/etc/pki/libvirt/private\" \"certs\" (tuple \"serverkey.pem\" ) | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n        {{ dict \"enabled\" $ssl_enabled \"secretName\" $envAll.Values.secrets.tls.server \"name\" \"ssl-ca-cert\" \"path\" \"/etc/pki/CA\" \"certs\" (tuple \"cacert.pem\" ) | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n        - name: pod-tmp\n          emptyDir: {}\n        - name: libvirt-bin\n          configMap:\n            name: libvirt-bin\n            defaultMode: 0555\n        - name: libvirt-etc\n          secret:\n            secretName: {{ $configMapName }}\n            defaultMode: 0444\n        {{- if .Values.conf.ceph.enabled }}\n        - name: etcceph\n          hostPath:\n            path: /var/lib/openstack-helm/compute/libvirt\n        - name: ceph-etc\n          configMap:\n            name: {{ .Values.ceph_client.configmap }}\n            defaultMode: 0444\n        {{- if empty .Values.conf.ceph.cinder.keyring }}\n        - name: ceph-keyring\n          secret:\n            secretName: {{ .Values.ceph_client.user_secret_name }}\n        {{ end }}\n        {{ end }}\n        {{- if .Values.conf.ceph.cinder.external_ceph.enabled }}\n        - name: external-ceph-keyring\n          secret:\n            secretName: {{ .Values.conf.ceph.cinder.external_ceph.user_secret_name }}\n        {{ end }}\n        - name: libmodules\n          hostPath:\n            path: /lib/modules\n        - name: var-lib-libvirt\n          hostPath:\n            path: /var/lib/libvirt\n        - name: var-lib-nova\n          hostPath:\n            path: /var/lib/nova\n        - name: run\n          hostPath:\n            path: /run\n        - name: dev\n          hostPath:\n            path: /dev\n        - name: logs\n          hostPath:\n            path: /var/log/libvirt\n        - name: cgroup\n          hostPath:\n            path: /sys/fs/cgroup\n        - name: machine-id\n          hostPath:\n            path: /etc/machine-id\n        - name: etc-libvirt-qemu\n          hostPath:\n            path: /etc/libvirt/qemu\n        - name: etc-modprobe-d\n          hostPath:\n            path: /etc/modprobe.d\n        - name: host-rootfs\n          hostPath:\n            path: /\n            type: Directory\n        - name: pod-shared\n          emptyDir: {}\n{{ dict \"envAll\" $envAll \"component\" \"libvirt\" \"requireSys\" true | include \"helm-toolkit.snippets.kubernetes_apparmor_volumes\" | indent 8 }}\n{{ if $mounts_libvirt.volumes }}{{ toYaml $mounts_libvirt.volumes | indent 8 }}{{ end }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.daemonset_libvirt }}\n\n{{- $envAll := . }}\n{{- $daemonset := \"libvirt\" }}\n{{- $configMapName := \"libvirt-etc\" }}\n{{- $serviceAccountName := \"libvirt\" }}\n\n{{- $dependencyOpts := dict \"envAll\" $envAll \"dependencyMixinParam\" $envAll.Values.network.backend \"dependencyKey\" \"libvirt\" -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_resolver\" $dependencyOpts | toString | fromYaml }}\n\n{{ tuple $envAll \"pod_dependency\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- $configmap_yaml := \"libvirt.configmap.etc\" }}\n\n{{/* Preffer using .Values.overrides rather than .Values.conf.overrides */}}\n{{- list $daemonset \"libvirt.daemonset\" $serviceAccountName $configmap_yaml $configMapName \"libvirt.configmap.bin\" \"libvirt-bin\" . | include \"helm-toolkit.utils.daemonset_overrides_root\" }}\n{{- end }}\n"
  },
  {
    "path": "libvirt/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "libvirt/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"libvirt\" -}}\n{{- if .Values.pod.tolerations.libvirt.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "libvirt/templates/network-policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"libvirt\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "libvirt/templates/pod-monitor.yaml",
    "content": "{{- if .Values.manifests.podmonitor -}}\napiVersion: monitoring.coreos.com/v1\nkind: PodMonitor\nmetadata:\n  name: libvirt-exporter\nspec:\n  namespaceSelector:\n    matchNames:\n    - {{ .Release.Namespace }}\n  podMetricsEndpoints:\n  - interval: 10s\n    path: /metrics\n    port: metrics\n    scheme: http\n  selector:\n    matchLabels:\n      app.kubernetes.io/name: libvirt\n{{- end -}}\n"
  },
  {
    "path": "libvirt/templates/role-cert-manager.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.role_cert_manager }}\n{{- $serviceAccountName := \"libvirt\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ .Release.Name }}-cert-manager\n  namespace: {{ .Release.Namespace }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ .Release.Name }}-cert-manager\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ .Release.Name }}-cert-manager\n  namespace: {{ .Release.Namespace }}\nrules:\n  - apiGroups:\n      - cert-manager.io\n    verbs:\n      - get\n      - list\n      - create\n      - watch\n    resources:\n      - certificates\n  - apiGroups:\n      - \"\"\n    verbs:\n      - get\n      - patch\n    resources:\n      - secrets\n{{- end -}}"
  },
  {
    "path": "libvirt/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "libvirt/templates/utils/_to_libvirt_conf.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nabstract: |\n  Builds a libvirt compatible config file.\nvalues: |\n  conf:\n    libvirt:\n      log_level: 3\n      cgroup_controllers:\n        - cpu\n        - cpuacct\nusage: |\n  {{ include \"libvirt.utils.to_libvirt_conf\" .Values.conf.libvirt }}\nreturn: |\n  cgroup_controllers = [ \"cpu\", \"cpuacct\" ]\n  log_level = 3\n*/}}\n\n{{- define \"libvirt.utils._to_libvirt_conf.list_to_string\" -}}\n{{- $local := dict \"first\" true -}}\n{{- range $k, $v := . -}}{{- if not $local.first -}}, {{ end -}}{{- $v | quote -}}{{- $_ := set $local \"first\" false -}}{{- end -}}\n{{- end -}}\n\n{{- define \"libvirt.utils.to_libvirt_conf\" -}}\n{{- range $key, $value :=  . -}}\n{{- if kindIs \"slice\" $value }}\n{{ $key }} = [ {{ include \"libvirt.utils._to_libvirt_conf.list_to_string\" $value }} ]\n{{- else if kindIs \"string\" $value }}\n{{- if regexMatch \"^[0-9]+$\" $value }}\n{{ $key }} = {{ $value }}\n{{- else }}\n{{ $key }} = {{ $value | quote }}\n{{- end }}\n{{- else }}\n{{ $key }} = {{ $value }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "libvirt/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for libvirt.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nrelease_group: null\n\nlabels:\n  agent:\n    libvirt:\n      node_selector_key: openstack-compute-node\n      node_selector_value: enabled\n\nimages:\n  tags:\n    libvirt: quay.io/airshipit/libvirt:2025.1-ubuntu_noble\n    ceph_config_helper: 'quay.io/airshipit/ceph-config-helper:ubuntu_jammy_20.2.1-1-20260407'\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n    kubectl: docker.io/bitnami/kubectl:latest\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nnetwork:\n  # provide what type of network wiring will be used\n  # possible options: openvswitch, linuxbridge, sriov\n  backend:\n    - openvswitch\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      libvirt:\n        username: libvirt\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n\nnetwork_policy:\n  libvirt:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nceph_client:\n  configmap: ceph-etc\n  user_secret_name: pvc-ceph-client-key\n\nconf:\n  ceph:\n    enabled: true\n    admin_keyring: null\n    cinder:\n      user: \"cinder\"\n      keyring: null\n      secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337\n      # Cinder Ceph backend that is not configured by the k8s cluter\n      external_ceph:\n        enabled: false\n        user: null\n        secret_uuid: null\n        user_secret_name: null\n  libvirt:\n    listen_tcp: \"1\"\n    listen_tls: \"0\"\n    auth_tcp: \"none\"\n    ca_file: \"/etc/pki/CA/cacert.pem\"\n    cert_file: \"/etc/pki/libvirt/servercert.pem\"\n    key_file: \"/etc/pki/libvirt/private/serverkey.pem\"\n    auth_unix_rw: \"none\"\n    listen_addr: \"${LISTEN_IP_ADDRESS}\"\n    log_level: \"3\"\n    log_outputs: \"1:file:/var/log/libvirt/libvirtd.log\"\n  # Modifies the config in which value is specified as the name of a variable\n  # that is computed in the script.\n  dynamic_options:\n    libvirt:\n      listen_interface: null\n      listen_address: 127.0.0.1\n    script: |\n      #!/bin/bash\n      set -ex\n\n      LIBVIRT_CONF_PATH=/tmp/pod-shared/libvirtd.conf\n\n      {{- if .Values.conf.dynamic_options.libvirt.listen_interface }}\n\n      LISTEN_INTERFACE=\"{{ .Values.conf.dynamic_options.libvirt.listen_interface }}\"\n      LISTEN_IP_ADDRESS=$(ip address show $LISTEN_INTERFACE | grep 'inet ' | awk '{print $2}' | awk -F \"/\" '{print $1}')\n      {{- else if .Values.conf.dynamic_options.libvirt.listen_address }}\n      LISTEN_IP_ADDRESS={{ .Values.conf.dynamic_options.libvirt.listen_address }}\n      {{- end }}\n\n      if [[ -z $LISTEN_IP_ADDRESS ]]; then\n          echo \"LISTEN_IP_ADDRESS is not set.\"\n          exit 1\n      fi\n\n      tee > ${LIBVIRT_CONF_PATH} << EOF\n      {{ include \"libvirt.utils.to_libvirt_conf\" .Values.conf.libvirt }}\n      EOF\n  qemu:\n    vnc_tls: \"0\"\n    vnc_tls_x509_verify: \"0\"\n    stdio_handler: \"file\"\n    user: \"nova\"\n    group: \"kvm\"\n  kubernetes:\n    cgroup: \"kubepods.slice\"\n    # List of cgroup controller we want to use when breaking out of\n    # Kubernetes defined groups\n    cgroup_controllers:\n      - blkio\n      - cpu\n      - devices\n      - freezer\n      - hugetlb\n      - memory\n      - net_cls\n      - perf_event\n      - rdma\n      - misc\n      - pids\n  init_modules:\n    enabled: false\n    script: |\n      #!/bin/bash\n\n      set -ex\n      export HOME=/tmp\n      KVM_QEMU_CONF_HOST=\"/etc/modprobe.d_host/qemu-system-x86.conf\"\n\n      if [[ ! -f \"${KVM_QEMU_CONF_HOST}\" ]]; then\n        if grep vmx /proc/cpuinfo; then\n          cat << EOF > ${KVM_QEMU_CONF_HOST}\n      options kvm_intel nested=1\n      options kvm_intel enable_apicv=1\n      options kvm_intel ept=1\n      EOF\n          modprobe -r kvm_intel || true\n          modprobe kvm_intel nested=1\n        elif grep svm /proc/cpuinfo; then\n          cat << EOF > ${KVM_QEMU_CONF_HOST}\n      options kvm_amd nested=1\n      EOF\n          modprobe -r kvm_amd || true\n          modprobe kvm_amd nested=1\n        else\n          echo \"Nested virtualization is not supported\"\n        fi\n      fi\n  vencrypt:\n    # Issuer to use for the vencrypt certs.\n    issuer:\n      kind: ClusterIssuer\n      name: ca-clusterissuer\n    # Script is included here (vs in bin/) to allow overriding, in the case that\n    # communication happens over an IP other than the pod IP for some reason.\n    cert_init_sh: |\n      #!/bin/bash\n      set -x\n\n      HOSTNAME_FQDN=$(hostname --fqdn)\n\n      # Script to create certs for each libvirt pod based on pod IP (by default).\n      cat <<EOF | kubectl apply -f -\n      apiVersion: cert-manager.io/v1\n      kind: Certificate\n      metadata:\n        name: ${POD_NAME}-${TYPE}\n        namespace: ${POD_NAMESPACE}\n        ownerReferences:\n          - apiVersion: v1\n            kind: Pod\n            name: ${POD_NAME}\n            uid: ${POD_UID}\n      spec:\n        secretName: ${POD_NAME}-${TYPE}\n        commonName: ${POD_IP}\n        usages:\n        - client auth\n        - server auth\n        dnsNames:\n        - ${HOSTNAME}\n        - ${HOSTNAME_FQDN}\n        ipAddresses:\n        - ${POD_IP}\n        issuerRef:\n          kind: ${ISSUER_KIND}\n          name: ${ISSUER_NAME}\n      EOF\n\n      kubectl -n ${POD_NAMESPACE} wait --for=condition=Ready --timeout=300s \\\n        certificate/${POD_NAME}-${TYPE}\n\n      # NOTE(mnaser): cert-manager does not clean-up the secrets when the certificate\n      #               is deleted, so we should add an owner reference to the secret\n      #               to ensure that it is cleaned up when the pod is deleted.\n      kubectl -n ${POD_NAMESPACE} patch secret ${POD_NAME}-${TYPE} \\\n        --type=json -p='[{\"op\": \"add\", \"path\": \"/metadata/ownerReferences\", \"value\": [{\"apiVersion\": \"v1\", \"kind\": \"Pod\", \"name\": \"'${POD_NAME}'\", \"uid\": \"'${POD_UID}'\"}]}]'\n\n      kubectl -n ${POD_NAMESPACE} get secret ${POD_NAME}-${TYPE} -o jsonpath='{.data.tls\\.crt}' | base64 -d > /tmp/${TYPE}.crt\n      kubectl -n ${POD_NAMESPACE} get secret ${POD_NAME}-${TYPE} -o jsonpath='{.data.tls\\.key}' | base64 -d > /tmp/${TYPE}.key\n      kubectl -n ${POD_NAMESPACE} get secret ${POD_NAME}-${TYPE} -o jsonpath='{.data.ca\\.crt}' | base64 -d > /tmp/${TYPE}-ca.crt\n  hooks:\n    # Libvirt hook scripts, that are placed in /etc/libvirt/hooks\n    enabled: false\n    scripts:\n      # daemon:\n      # qemu:\n      # lxc:\n      # libxl:\n      # bhyve:\n      # network:\n\npod:\n  probes:\n    libvirt:\n      libvirt:\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            periodSeconds: 60\n            timeoutSeconds: 5\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 15\n            periodSeconds: 60\n            timeoutSeconds: 5\n  security_context:\n    libvirt:\n      pod:\n        runAsUser: 0\n      container:\n        ceph_admin_keyring_placement:\n          readOnlyRootFilesystem: false\n        ceph_keyring_placement:\n          readOnlyRootFilesystem: false\n        libvirt:\n          privileged: true\n          readOnlyRootFilesystem: false\n        libvirt_init_modules:\n          readOnlyRootFilesystem: true\n          privileged: true\n          capabilities:\n            drop:\n              - ALL\n        init_dynamic_options:\n          runAsUser: 65534\n          runAsNonRoot: true\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n          capabilities:\n            drop:\n              - ALL\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    libvirt:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n        effect: NoSchedule\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n        effect: NoSchedule\n  dns_policy: \"ClusterFirstWithHostNet\"\n  mounts:\n    libvirt:\n      init_container: null\n      libvirt:\n  lifecycle:\n    upgrades:\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        libvirt:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n  resources:\n    enabled: false\n    libvirt:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - libvirt-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n    targeted:\n      ovn:\n        libvirt:\n          pod:\n            - requireSameNode: true\n              labels:\n                application: ovn\n                component: ovn-controller\n      openvswitch:\n        libvirt:\n          pod:\n            - requireSameNode: true\n              labels:\n                application: neutron\n                component: neutron-ovs-agent\n      linuxbridge:\n        libvirt:\n          pod:\n            - requireSameNode: true\n              labels:\n                application: neutron\n                component: neutron-lb-agent\n      sriov:\n        libvirt:\n          pod:\n            - requireSameNode: true\n              labels:\n                application: neutron\n                component: neutron-sriov-agent\n  static:\n    libvirt:\n      services: null\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\nlibvirt:\n  extraContainers: []\n\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  daemonset_libvirt: true\n  job_image_repo_sync: true\n  network_policy: false\n  role_cert_manager: false\n  secret_registry: true\n\nsecrets:\n  oci_image_registry:\n    libvirt: libvirt-oci-image-registry-key\n  tls:\n    server: libvirt-tls-server\n    client: libvirt-tls-client\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "local-storage/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Local Storage\nname: local-storage\nversion: 2025.2.0\nhome: https://kubernetes.io/docs/concepts/storage/volumes/#local\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "local-storage/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "local-storage/templates/persistent-volumes.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.persistent_volumes }}\n{{- $envAll := . }}\n{{- range .Values.conf.persistent_volumes }}\n---\napiVersion: v1\nkind: PersistentVolume\nmetadata:\n  name: {{ .name }}\n  labels:\n{{ tuple $envAll \"local-storage\" $envAll.Release.Name | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  capacity:\n    storage: {{ .storage_capacity }}\n  accessModes: {{ .access_modes }}\n  persistentVolumeReclaimPolicy: {{ .reclaim_policy }}\n  storageClassName: {{ $envAll.Release.Name }}\n  local:\n    path: {{ .local_path }}\n  nodeAffinity:\n    required:\n      nodeSelectorTerms:\n      - matchExpressions:\n        - key: {{ $envAll.Values.labels.node_affinity.node_selector_key }}\n          operator: In\n          values:\n            - {{ $envAll.Values.labels.node_affinity.node_selector_value }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "local-storage/templates/storage-class.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.storage_class }}\n{{- $envAll := . }}\n---\napiVersion: storage.k8s.io/v1\nkind: StorageClass\nmetadata:\n  name: {{ .Release.Name }}\n  labels:\n{{ tuple $envAll \"local-storage\" $envAll.Release.Name | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nprovisioner: kubernetes.io/no-provisioner\nvolumeBindingMode: WaitForFirstConsumer\n{{- end }}\n"
  },
  {
    "path": "local-storage/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nlabels:\n  node_affinity:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nconf:\n  persistent_volumes:\n  # For each mount path, one PV should be created.\n  # If there are two mount paths for local storage are available on two nodes,\n  # then two PVs details should be defined. Example:\n  # - name: local-pv-1 (name of the Persistent Volume 1)\n  #   reclaimpolicy: Retain (Reclaim Policy for the PV local-pv-1)\n  #   storage_capacity: \"100Gi\" (Storage capacity of the PV local-pv-1)\n  #   access_modes: [ \"ReadWriteOnce\" ] (Access mode for the PV local-pv-1)\n  #   local_path: /mnt/disk/vol1 (Mount path of the local disk, local-pv-1 will be created on)\n  # - name: local-pv-2 (name of the Persistent Volume 2)\n  #   reclaimpolicy: Retain (Reclaim Policy for the PV local-pv-2)\n  #   storage_capacity: \"100Gi\" (Storage capacity of the PV local-pv-2)\n  #   access_modes: [ \"ReadWriteOnce\" ] (Access mode for the PV local-pv-2)\n  #   local_path: /mnt/disk/vol2 (Mount path of the local disk, local-pv-2 will be created on)\n  # Similarly if three nodes each have disk mount path /var/lib/kubernetes\n  # which will be acting as local storage for each node, then Persistentvolumes\n  # should be updated with three entries.\n\nmanifests:\n  storage_class: true\n  persistent_volumes: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "local-volume-provisioner/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm local-volume-provisioner\nname: local-volume-provisioner\nversion: 2025.2.0\nhome: https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner\nsources:\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "local-volume-provisioner/templates/bin/_fakemount.py.tpl",
    "content": "#!/usr/bin/env python3\n#\n# Copyright 2019 Mirantis, Inc.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\"\"\"Fakemount python module\nThe module is aimed to crate fake mountpoints (--bind).\nExample:\n  python3  fakemount --config-file '/root/mymount.yml'\nAttributes:\n  config-file - file path to config file that contains fake mounts.\n\"\"\"\n__version__ = \"1.0\"\nimport argparse\nimport logging\nimport os\nimport re\nimport subprocess\nimport sys\nfrom collections import defaultdict\nimport yaml\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO)\nLOG = logging.getLogger(__name__)\nMOUNT_BIN = \"/bin/mount\"\n###Fork https://github.com/b10011/pyfstab/ #####################################\n#  Latest commit 828540d\nclass InvalidEntry(Exception):\n    \"\"\"\n    Raised when a string cannot be generated because of the Entry is invalid.\n    \"\"\"\nclass InvalidFstabLine(Exception):\n    \"\"\"\n    Raised when a line is invalid in fstab. This doesn't just mean that the\n    Entry will be invalid but also that the system can not process the fstab\n    file fully either.\n    \"\"\"\nclass Entry:\n    \"\"\"\n    Handles parsing and formatting fstab line entries.\n    :var device:\n        (str or None) -\n        Fstab device (1st parameter in the fstab entry)\n    :var dir:\n        (str or None) -\n        Fstab device (2nd parameter in the fstab entry)\n    :var type:\n        (str or None) -\n        Fstab device (3rd parameter in the fstab entry)\n    :var options:\n        (str or None) -\n        Fstab device (4th parameter in the fstab entry)\n    :var dump:\n        (int or None) -\n        Fstab device (5th parameter in the fstab entry)\n    :var fsck:\n        (int or None) -\n        Fstab device (6th parameter in the fstab entry)\n    :var valid:\n        (bool) -\n        Whether the Entry is valid or not. Can be checked with \"if entry:\".\n    \"\"\"\n    def __init__(\n            self,\n            _device=None,\n            _dir=None,\n            _type=None,\n            _options=None,\n            _dump=None,\n            _fsck=None,\n    ):\n        \"\"\"\n        :param _device: Fstab device (1st parameter in the fstab entry)\n        :type _device: str\n        :param _dir: Fstab device (2nd parameter in the fstab entry)\n        :type _dir: str\n        :param _type: Fstab device (3rd parameter in the fstab entry)\n        :type _type: str\n        :param _options: Fstab device (4th parameter in the fstab entry)\n        :type _options: str\n        :param _dump: Fstab device (5th parameter in the fstab entry)\n        :type _dump: int\n        :param _fsck: Fstab device (6th parameter in the fstab entry)\n        :type _fsck: int\n        \"\"\"\n        self.device = _device\n        self.dir = _dir\n        self.type = _type\n        self.options = _options\n        self.dump = _dump\n        self.fsck = _fsck\n        self.valid = True\n        self.valid &= self.device is not None\n        self.valid &= self.dir is not None\n        self.valid &= self.type is not None\n        self.valid &= self.options is not None\n        self.valid &= self.dump is not None\n        self.valid &= self.fsck is not None\n    def read_string(self, line):\n        \"\"\"\n        Parses an entry from a string\n        :param line: Fstab entry line.\n        :type line: str\n        :return: self\n        :rtype: Entry\n        :raises InvalidEntry: If the data in the string cannot be parsed.\n        \"\"\"\n        line = line.strip()\n        if line and not line[0] == \"#\":\n            parts = re.split(r\"\\s+\", line)\n            if len(parts) == 6:\n                [_device, _dir, _type, _options, _dump, _fsck] = parts\n                _dump = int(_dump)\n                _fsck = int(_fsck)\n                self.device = _device\n                self.dir = _dir\n                self.type = _type\n                self.options = _options\n                self.dump = _dump\n                self.fsck = _fsck\n                self.valid = True\n                return self\n            else:\n                raise InvalidFstabLine()\n        self.device = None\n        self.dir = None\n        self.type = None\n        self.options = None\n        self.dump = None\n        self.fsck = None\n        self.valid = False\n        raise InvalidEntry(\"Entry cannot be parsed\")\n    def write_string(self):\n        \"\"\"\n        Formats the Entry into fstab entry line.\n        :return: Fstab entry line.\n        :rtype: str\n        :raises InvalidEntry:\n            A string cannot be generated because the entry is invalid.\n        \"\"\"\n        if self:\n            return \"{} {} {} {} {} {}\".format(\n                self.device,\n                self.dir,\n                self.type,\n                self.options,\n                self.dump,\n                self.fsck,\n            )\n        else:\n            raise InvalidEntry(\"Entry cannot be formatted\")\n    def __bool__(self):\n        return self.valid\n    def __str__(self):\n        return self.write_string()\n    def __repr__(self):\n        try:\n            return \"<Entry {}>\".format(str(self))\n        except InvalidEntry:\n            return \"<Entry Invalid>\"\nclass Fstab:\n    \"\"\"\n    Handles reading, parsing, formatting and writing of fstab files.\n    :var entries:\n        (list[Entry]) -\n        List of entries.\n        When writing to a file, entries are listed from this list.\n    :var entries_by_device:\n        (dict[str, list[Entry]]) -\n        Fstab entries by device.\n    :var entry_by_dir:\n        (dict[str, Entry]) -\n        Fstab entry by directory.\n    :var entries_by_type:\n        (dict[str, list[Entry]]) -\n        Fstab entries by type.\n    \"\"\"\n    def __init__(self):\n        self.entries = []\n        # A single device can have multiple mountpoints\n        self.entries_by_device = defaultdict(list)\n        # If multiple devices have same mountpoint, only the last entry in the\n        # fstab file is taken into consideration\n        self.entry_by_dir = dict()\n        # And the most obvious one, many entries can have mountpoints of same\n        # type\n        self.entries_by_type = defaultdict(list)\n    def read_string(self, data, only_valid=False):\n        \"\"\"\n        Parses entries from a data string\n        :param data: Contents of the fstab file\n        :type data: str\n        :param only_valid:\n            Skip the entries that do not actually mount. For example, if device\n            A is mounted to directory X and later device B is mounted to\n            directory X, the A mount to X is undone by the system.\n        :type only_valid: bool\n        :return: self\n        :rtype: Fstab\n        \"\"\"\n        for line in reversed(data.splitlines()):\n            try:\n                entry = Entry().read_string(line)\n                if entry and (\n                        not only_valid or entry.dir not in self.entry_by_dir\n                ):\n                    self.entries.insert(0, entry)\n                    self.entries_by_device[entry.device].insert(0, entry)\n                    self.entry_by_dir[entry.dir] = entry\n                    self.entries_by_type[entry.type].insert(0, entry)\n            except InvalidEntry:\n                pass\n        return self\n    def write_string(self):\n        \"\"\"\n        Formats entries into a string.\n        :return: Formatted fstab file.\n        :rtype: str\n        :raises InvalidEntry:\n            A string cannot be generated because one of the entries is invalid.\n        \"\"\"\n        return \"\\n\".join(str(entry) for entry in self.entries)\n    def read_file(self, handle, only_valid=False):\n        \"\"\"\n        Parses entries from a file\n        :param handle: File handle\n        :type handle: file\n        :param only_valid:\n            Skip the entries that do not actually mount. For example, if device\n            A is mounted to directory X and later device B is mounted to\n            directory X, the A mount to X is undone by the system.\n        :type only_valid: bool\n        :return: self\n        :rtype: Fstab\n        \"\"\"\n        self.read_string(handle.read(), only_valid)\n        return self\n    def write_file(self, handle):\n        \"\"\"\n        Parses entries in data string\n        :param path: File handle\n        :type path: file\n        :return: self\n        :rtype: Fstab\n        \"\"\"\n        handle.write(str(self))\n        return self\n    def __bool__(self):\n        return len(self.entries) > 0\n    def __str__(self):\n        return self.write_string()\n    def __repr__(self):\n        res = \"<Fstab [{} entries]\".format(len(self.entries))\n        if self.entries:\n            res += \"\\n\"\n            for entry in self.entries:\n                res += \"  {}\\n\".format(entry)\n        res += \">\"\n        return res\n###End Fork https://github.com/b10011/pyfstab/ #################################\ndef fstab_bindmount(src, mountpoint, fstab_path=\"/mnt/host/fstab\", opts=None):\n    if opts is None:\n        opts = [\"bind\"]\n    mountpoint = os.path.normpath(mountpoint.strip())\n    with open(fstab_path, \"r\") as f:\n        fstab = Fstab().read_file(f)\n    if mountpoint in fstab.entry_by_dir:\n        LOG.info(f'Mount point {mountpoint} already defined in {fstab_path}')\n        return\n    fstab.entries.append(Entry(src, mountpoint, \"none\", \",\".join(opts), 0, 0))\n    str_fstab = str(fstab)\n    LOG.info(f'Attempt to overwrite file:{fstab_path}, with data:\\n'\n             f'{str_fstab}')\n    with open(fstab_path, \"w\") as f:\n        f.write(str_fstab)\ndef get_volumes(mount_point, i):\n    vol_template = \"vol%d%%d\" % i\n    volumes = mount_point.get(\"mounts\")\n    if volumes is not None:\n        return volumes\n    return [vol_template % vol_number for vol_number in\n            range(mount_point[\"volPerNode\"])]\ndef ensure_directories_exists(storage_class):\n    target_root = storage_class.get(\"mountDir\", storage_class[\"hostDir\"])\n    for i, bind_mount in enumerate(storage_class[\"bindMounts\"]):\n        for vol_name in get_volumes(bind_mount, i):\n            source = os.path.normpath(f\"{bind_mount['srcRoot']}/{vol_name}\")\n            target = os.path.normpath(f\"{target_root}/{vol_name}\")\n            os.makedirs(target, exist_ok=True)\n            os.makedirs(source, exist_ok=True)\ndef is_mount(directory):\n    # Do not use os.path.ismount due to bug\n    # https://bugs.python.org/issue29707\n    directory = os.path.normpath(directory.strip())\n    with open(\"/proc/mounts\") as f:\n        for line in f.readlines():\n            if line.split(\" \")[1] == directory:\n                return True\ndef mount_directories(storage_class):\n    failed_mounts = []\n    target_root = storage_class.get(\"mountDir\", storage_class[\"hostDir\"])\n    additional_opts = storage_class.get(\"additionalMountOptions\", [])\n    opts = [\"bind\"] + additional_opts\n    for i, bind_mount in enumerate(storage_class[\"bindMounts\"]):\n        for vol_name in get_volumes(bind_mount, i):\n            source = os.path.normpath(f\"{bind_mount['srcRoot']}/{vol_name}\")\n            target = os.path.normpath(f\"{target_root}/{vol_name}\")\n            LOG.info(f\"Trying to mount {source} to {target}\")\n            if is_mount(target):\n                LOG.info(\n                    f\"The directory {target} already mounted, skipping it...\")\n            else:\n                cmd = [MOUNT_BIN, \"-o\", \",\".join(opts), source, target]\n                LOG.info(f\"Running {cmd}\")\n                obj = None\n                try:\n                    obj = subprocess.run(\n                        cmd,\n                        stdout=subprocess.PIPE,\n                        stderr=subprocess.PIPE,\n                    )\n                    obj.check_returncode()\n                except Exception as e:\n                    LOG.exception(\n                        f\"Failed to mount {source} {target}\\n\"\n                        f\"stdout: {obj.stdout}\\n\"\n                        f\"stderr: {obj.stderr}\"\n                    )\n                    failed_mounts.append((source, target))\n                else:\n                    LOG.info(f\"Successfully mount {source} {target}\")\n            fstab_bindmount(source, target, opts=opts)\n    if failed_mounts:\n        raise Exception(f\"Failed to mount some directories: {failed_mounts}\")\ndef main():\n    parser = argparse.ArgumentParser(\n        description=\"Create fake mountpotins with specified directories.\"\n    )\n    group = parser.add_mutually_exclusive_group(required=True)\n    group.add_argument(\n        \"--config-file\", help=\"Path to file with image layout\",\n    )\n    parser.add_argument(\n        \"--create-only\",\n        help=\"Ensure target directories exists.\",\n        dest=\"create_only\",\n        action=\"store_true\",\n    )\n    parser.set_defaults(create_only=False)\n    args = parser.parse_args()\n    with open(args.config_file) as f:\n        data = yaml.safe_load(f)\n    if data is None:\n        LOG.exception(\"Invalid data supplied from the config file.\")\n        raise Exception\n    classes_data = data.get(\"classes\", [])\n    if isinstance(classes_data, list):\n        for storage_class in classes_data:\n            ensure_directories_exists(storage_class)\n        if not args.create_only:\n            for storage_class in classes_data:\n                mount_directories(storage_class)\nif __name__ == \"__main__\":\n    try:\n        main()\n    except Exception as e:\n        LOG.exception(\"Can't create volume mounts.\")\n        sys.exit(1)\n"
  },
  {
    "path": "local-volume-provisioner/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"lvp.configmap.bin\" }}\n{{- $configMapName := index . 0 }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ $configMapName }}\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n  fakemount.py: |\n{{ tuple \"bin/_fakemount.py.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  storageClassMap: |\n    {{- range $classConfig := $envAll.Values.conf.fake_mounts.classes }}\n    {{ $classConfig.name }}:\n      hostDir: {{ $classConfig.hostDir }}\n      mountDir: {{ $classConfig.mountDir | default $classConfig.hostDir }}\n      {{- if $classConfig.blockCleanerCommand }}\n      blockCleanerCommand:\n      {{- range $val := $classConfig.blockCleanerCommand }}\n        - {{ $val | quote }}\n      {{- end}}\n      {{- end }}\n      {{- if $classConfig.volumeMode }}\n      volumeMode: {{ $classConfig.volumeMode }}\n      {{- end }}\n      {{- if $classConfig.fsType }}\n      fsType: {{ $classConfig.fsType }}\n      {{- end }}\n      {{- if $classConfig.namePattern }}\n      namePattern: {{ $classConfig.namePattern | quote }}\n      {{- end }}\n    {{- end }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- list \"local-volume-provisioner-bin\" . | include \"lvp.configmap.bin\" }}\n{{- end }}\n"
  },
  {
    "path": "local-volume-provisioner/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"lvp.configmap.etc\" }}\n{{- $configMapName := index . 0 }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $configMapName }}\ntype: Opaque\ndata:\n  fake_mounts.conf: {{ $envAll.Values.conf.fake_mounts | toJson | b64enc }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- list \"local-volume-provisioner-etc\" . | include \"lvp.configmap.etc\" }}\n{{- end }}\n"
  },
  {
    "path": "local-volume-provisioner/templates/daemonset-lvp.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"lvp.daemonset\" }}\n{{- $daemonset := index . 0 }}\n{{- $configMapName := index . 1 }}\n{{- $serviceAccountName := index . 2 }}\n{{- $envAll := index . 3 }}\n\n{{- with $envAll }}\n\n{{- $mounts_lvp := $envAll.Values.pod.mounts.local_volume_provisioner.lvp }}\n{{- $mounts_lvp_init := $envAll.Values.pod.mounts.local_volume_provisioner.init_container }}\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: local-volume-provisioner\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll $daemonset | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{- dict \"envAll\" $envAll \"podName\" \"local-volume-provisioner\" \"containerNames\" (list \"lvp\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"local-volume-provisioner\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      nodeSelector:\n        {{ $envAll.Values.labels.local_volume_provisioner.node_selector_key }}: {{ $envAll.Values.labels.local_volume_provisioner.node_selector_value }}\n      initContainers:\n        - name: init-mounts\n{{ tuple $envAll \"local_volume_provisioner_mounts\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"local_volume_provisioner\" \"container\" \"init_mounts\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          terminationMessagePath: /var/log/termination-log\n          env:\n          - name: POD_NAME\n            valueFrom:\n              fieldRef:\n                apiVersion: v1\n                fieldPath: metadata.name\n          - name: NAMESPACE\n            valueFrom:\n              fieldRef:\n                apiVersion: v1\n                fieldPath: metadata.namespace\n          - name: PATH\n            value: /var/lib/openstack/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/\n          command:\n            - /tmp/fakemount.py\n            - --config-file\n            - /etc/provisioner/fake_mounts.conf\n          volumeMounts:\n            - name: fstab\n              mountPath: /mnt/host/fstab\n            - name: local-volume-provisioner-etc\n              mountPath: /etc/provisioner/fake_mounts.conf\n              subPath: fake_mounts.conf\n              readOnly: true\n            - name: local-volume-provisioner-bin\n              mountPath: /tmp/fakemount.py\n              subPath: fakemount.py\n              readOnly: true\n            {{- range $classConfig := $envAll.Values.conf.fake_mounts.classes }}\n              {{- range $bindMount := $classConfig.bindMounts }}\n            - mountPath: {{ $bindMount.srcRoot }}\n              mountPropagation: Bidirectional\n              name: {{ replace \"/\" \"\"  $bindMount.srcRoot }}\n              {{- end }}\n            - mountPath: {{ if $classConfig.mountDir }} {{- $classConfig.mountDir -}} {{ else }} {{- $classConfig.hostDir -}} {{ end }}\n              mountPropagation: Bidirectional\n              name: {{ $classConfig.name }}\n            {{- end }}\n            - mountPath: /run\n              name: run\n      containers:\n        - name: lvp\n{{ tuple $envAll \"local_volume_provisioner\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.local_volume_provisioner | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"local_volume_provisioner\" \"container\" \"lvp\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n          - name: MY_NODE_NAME\n            valueFrom:\n              fieldRef:\n                fieldPath: spec.nodeName\n          - name: NAMESPACE\n            valueFrom:\n              fieldRef:\n                apiVersion: v1\n                fieldPath: metadata.namespace\n          command:\n            - /local-provisioner\n          volumeMounts:\n            - name: local-volume-provisioner-bin\n              mountPath: /etc/provisioner/config/storageClassMap\n              subPath: storageClassMap\n              readOnly: true\n            - name: dev\n              mountPath: /dev\n            {{- range $classConfig := $envAll.Values.conf.fake_mounts.classes }}\n            - name: {{ $classConfig.name }}\n              mountPath: {{ $classConfig.mountDir | default $classConfig.hostDir }}\n              mountPropagation: HostToContainer\n            {{- end }}\n      volumes:\n        - name: fstab\n          hostPath:\n            type: File\n            path: /etc/fstab\n        - name: local-volume-provisioner-bin\n          configMap:\n            name: local-volume-provisioner-bin\n            defaultMode: 0555\n        - name: local-volume-provisioner-etc\n          secret:\n            secretName: {{ $configMapName }}\n            defaultMode: 0444\n        - name: run\n          hostPath:\n            path: /run\n        - name: dev\n          hostPath:\n            path: /dev\n          {{- range $classConfig := $envAll.Values.conf.fake_mounts.classes }}\n            {{- range $bindMount := $classConfig.bindMounts }}\n        - name: {{ replace \"/\" \"\"  $bindMount.srcRoot }}\n          hostPath:\n            path: {{ $bindMount.srcRoot }}\n            type: \"\"\n            {{- end }}\n          {{- end }}\n        {{- range $classConfig := $envAll.Values.conf.fake_mounts.classes }}\n        - name: {{ $classConfig.name }}\n          hostPath:\n            path: {{ $classConfig.hostDir }}\n        {{- end }}\n{{ if $mounts_lvp.volumes }}{{ toYaml $mounts_lvp.volumes | indent 8 }}{{ end }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.daemonset_local_volume_provisioner }}\n\n{{- $envAll := . }}\n{{- $daemonset := \"local_volume_provisioner\" }}\n{{- $configMapName := \"local_volume_provisioner-etc\" }}\n{{- $serviceAccountName := \"local-volume-provisioner\" }}\n\n{{ tuple $envAll \"lvp\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ $serviceAccountName }}-nodes\nrules:\n- apiGroups: [\"\"]\n  resources: [\"nodes\"]\n  verbs: [\"get\"]\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}-nodes\nsubjects:\n- kind: ServiceAccount\n  name: {{ $serviceAccountName }}\n  namespace: {{ .Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $serviceAccountName }}-nodes\n  apiGroup: rbac.authorization.k8s.io\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}-cluter-admin\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: cluster-admin\nsubjects:\n- kind: ServiceAccount\n  name: {{ $serviceAccountName }}\n  namespace: {{ .Release.Namespace }}\n\n{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include \"lvp.daemonset\" | toString | fromYaml }}\n{{- $configmap_yaml := \"lvp.configmap.etc\" }}\n{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include \"helm-toolkit.utils.daemonset_overrides\" }}\n\n{{- end }}\n"
  },
  {
    "path": "local-volume-provisioner/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "local-volume-provisioner/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"local-volume-provisioner\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "local-volume-provisioner/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "local-volume-provisioner/templates/storageclasses.yaml",
    "content": "{{- if .Values.manifests.storageclass }}\n{{- $envAll := . }}\n{{- range $val := $envAll.Values.conf.fake_mounts.classes }}\n{{- if $val.storageClass }}\n---\napiVersion: storage.k8s.io/v1\nkind: StorageClass\nmetadata:\n  name: {{ $val.name }}\n  {{- if kindIs \"map\" $val.storageClass }}\n  {{- if $val.storageClass.isDefaultClass }}\n  annotations:\n    storageclass.kubernetes.io/is-default-class: \"true\"\n  {{- end }}\n  {{- end }}\n  labels:\n{{ tuple $envAll $envAll.Chart.Name \"storageclass\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nprovisioner: kubernetes.io/no-provisioner\nvolumeBindingMode: WaitForFirstConsumer\n{{- if kindIs \"map\" $val.storageClass }}\nreclaimPolicy: {{ $val.storageClass.reclaimPolicy | default \"Delete\" }}\n{{- else }}\nreclaimPolicy: Delete\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "local-volume-provisioner/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for local-volume-provisioner.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nrelease_group: null\n\nlabels:\n  local_volume_provisioner:\n    node_selector_key: openstack-compute-node\n    node_selector_value: enabled\n\nimages:\n  tags:\n    local_volume_provisioner: mirantis.azurecr.io/bm/external/local-volume-provisioner:v2.4.0\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n    local_volume_provisioner_mounts: mirantis.azurecr.io/openstack/openstack-controller:0.1.1\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\ndependencies:\n  static: {}\n  dynamic: {}\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      local_volume_provisioner:\n        username: local_volume_provisioner\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n\nconf:\n  fake_mounts:\n    classes:\n      - bindMounts:\n        - mounts:\n          - vol1\n          - vol2\n          - vol3\n          - vol4\n          - vol5\n          - vol6\n          - vol7\n          - vol8\n          - vol9\n          - vol10\n          - vol11\n          - vol12\n          - vol13\n          - vol14\n          - vol15\n          srcRoot: /var/lib/local-volume-provisioner\n        hostDir: /mnt/local-volume-provisioner\n        mountDir: /mnt/local-volume-provisioner\n        name: lvp-fake-root\n        storageClass: true\n        volumeMode: Filesystem\npod:\n  security_context:\n    local_volume_provisioner:\n      pod:\n        runAsUser: 0\n      container:\n        lvp:\n          privileged: true\n          readOnlyRootFilesystem: true\n        init_mounts:\n          privileged: true\n          readOnlyRootFilesystem: true\n  dns_policy: \"ClusterFirstWithHostNet\"\n  mounts:\n    local_volume_provisioner:\n      init_container: null\n      lvp: null\n  lifecycle:\n    upgrades:\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        local_volume_provisioner:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n  resources:\n    enabled: false\n    local_volume_provisioner:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  daemonset_local_volume_provisioner: true\n  job_image_repo_sync: true\n  secret_registry: true\n  storageclass: true\n\nsecrets:\n  oci_image_registry:\n    local_volume_provisioner: local-volume-provisioner-oci-image-registry-key\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "magnum/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Magnum\nname: magnum\nversion: 2025.2.0\nhome: https://docs.openstack.org/magnum/latest/\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Magnum/OpenStack_Project_Magnum_vertical.png\nsources:\n  - https://opendev.org/openstack/magnum\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "magnum/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "magnum/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nmagnum-db-manage upgrade\n"
  },
  {
    "path": "magnum/templates/bin/_magnum-api.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec uwsgi --ini /etc/magnum/magnum-api-uwsgi.ini\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "magnum/templates/bin/_magnum-conductor-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\ntee /tmp/pod-shared/magnum.conf <<EOF\n[DEFAULT]\nhost = ${POD_NAME}\nEOF\n"
  },
  {
    "path": "magnum/templates/bin/_magnum-conductor.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexec magnum-conductor \\\n      --config-file /etc/magnum/magnum.conf \\\n      --config-file /tmp/pod-shared/magnum.conf\n"
  },
  {
    "path": "magnum/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: magnum-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  ks-domain-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_domain_user\" . | indent 4 }}\n  magnum-api.sh: |\n{{ tuple \"bin/_magnum-api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  magnum-conductor.sh: |\n{{ tuple \"bin/_magnum-conductor.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  magnum-conductor-init.sh: |\n{{ tuple \"bin/_magnum-conductor-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n{{- if empty .Values.conf.magnum.keystone_authtoken.auth_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.magnum.keystone_authtoken \"auth_uri\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.magnum.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.magnum.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.magnum.keystone_authtoken.region_name -}}\n{{- $_ := set .Values.conf.magnum.keystone_authtoken \"region_name\" .Values.endpoints.identity.auth.magnum.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.magnum.keystone_authtoken.project_name -}}\n{{- $_ := set .Values.conf.magnum.keystone_authtoken \"project_name\" .Values.endpoints.identity.auth.magnum.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.magnum.keystone_authtoken.project_domain_name -}}\n{{- $_ := set .Values.conf.magnum.keystone_authtoken \"project_domain_name\" .Values.endpoints.identity.auth.magnum.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.magnum.keystone_authtoken.user_domain_name -}}\n{{- $_ := set .Values.conf.magnum.keystone_authtoken \"user_domain_name\" .Values.endpoints.identity.auth.magnum.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.magnum.keystone_authtoken.username -}}\n{{- $_ := set .Values.conf.magnum.keystone_authtoken \"username\" .Values.endpoints.identity.auth.magnum.username -}}\n{{- end -}}\n{{- if empty .Values.conf.magnum.keystone_authtoken.password -}}\n{{- $_ := set .Values.conf.magnum.keystone_authtoken \"password\" .Values.endpoints.identity.auth.magnum.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.magnum.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.magnum.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n{{- if empty .Values.conf.magnum.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.magnum.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.magnum.database.connection)) (empty .Values.conf.magnum.database.connection) -}}\n{{- $_ := tuple \"oslo_db\" \"internal\" \"magnum\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\"| set .Values.conf.magnum.database \"connection\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.magnum.DEFAULT.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"magnum\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.magnum.DEFAULT \"transport_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.magnum.api.port -}}\n{{- $_ := tuple \"container-infra\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set .Values.conf.magnum.api \"port\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.magnum.trust.trustee_domain_name -}}\n{{- $_ := set .Values.conf.magnum.trust \"trustee_domain_name\" .Values.endpoints.identity.auth.magnum_stack_user.domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.magnum.trust.trustee_domain_admin_name -}}\n{{- $_ := set .Values.conf.magnum.trust \"trustee_domain_admin_name\" .Values.endpoints.identity.auth.magnum_stack_user.username -}}\n{{- end -}}\n{{- if empty .Values.conf.magnum.trust.trustee_domain_admin_password -}}\n{{- $_ := set .Values.conf.magnum.trust \"trustee_domain_admin_password\" .Values.endpoints.identity.auth.magnum_stack_user.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.magnum_api_uwsgi.uwsgi.processes -}}\n{{- $_ := set .Values.conf.magnum_api_uwsgi.uwsgi \"processes\" .Values.conf.magnum.api.workers -}}\n{{- end -}}\n{{- if empty (index .Values.conf.magnum_api_uwsgi.uwsgi \"http-socket\") -}}\n{{- $http_socket_port := tuple \"container-infra\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | toString }}\n{{- $http_socket := printf \"0.0.0.0:%s\" $http_socket_port }}\n{{- $_ := set .Values.conf.magnum_api_uwsgi.uwsgi \"http-socket\" $http_socket -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.handler_fluent) (has \"fluent\" .Values.conf.logging.handlers.keys) -}}\n{{- $fluentd_host := tuple \"fluentd\" \"internal\" $envAll | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\n{{- $fluentd_port := tuple \"fluentd\" \"internal\" \"service\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $fluent_args := printf \"('%s.%s', '%s', %s)\" .Release.Namespace .Release.Name $fluentd_host $fluentd_port }}\n{{- $handler_fluent := dict \"class\" \"fluent.handler.FluentHandler\" \"formatter\" \"fluent\" \"args\" $fluent_args -}}\n{{- $_ := set .Values.conf.logging \"handler_fluent\" $handler_fluent -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.formatter_fluent) (has \"fluent\" .Values.conf.logging.formatters.keys) -}}\n{{- $formatter_fluent := dict \"class\" \"oslo_log.formatters.FluentFormatter\" -}}\n{{- $_ := set .Values.conf.logging \"formatter_fluent\" $formatter_fluent -}}\n{{- end -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: magnum-etc\ntype: Opaque\ndata:\n  magnum.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.magnum | b64enc }}\n  magnum-api-uwsgi.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.magnum_api_uwsgi | b64enc }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.logging | b64enc }}\n  api-paste.ini: {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.paste | b64enc }}\n  policy.yaml: {{ toYaml .Values.conf.policy | b64enc }}\n  {{- if .Values.conf.capi.enabled }}\n  kubeconfig.conf: {{ include \"kubeconfig.tpl\" . | b64enc }}\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/deployment-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_api }}\n{{- $envAll := . }}\n\n{{- $mounts_magnum_api := .Values.pod.mounts.magnum_api.magnum_api }}\n{{- $mounts_magnum_api_init := .Values.pod.mounts.magnum_api.init_container }}\n\n{{- $serviceAccountName := \"magnum-api\" }}\n{{ tuple $envAll \"api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: magnum-api\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"magnum\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"magnum\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"magnum\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"magnum_api\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ tuple \"magnum_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"magnum_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"magnum\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.api.node_selector_key }}: {{ .Values.labels.api.node_selector_value }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.api.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"api\" $mounts_magnum_api_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: magnum-api\n{{ tuple $envAll \"magnum_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          securityContext:\n            runAsUser: {{ .Values.pod.user.magnum.uid }}\n          command:\n            - /tmp/magnum-api.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/magnum-api.sh\n                  - stop\n          ports:\n            - name: m-api\n              containerPort: {{ tuple \"container-infra\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            httpGet:\n              scheme: HTTP\n              path: /\n              port: {{ tuple \"container-infra\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 5\n            periodSeconds: 10\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.magnum.oslo_concurrency.lock_path }}\n            - name: magnum-bin\n              mountPath: /tmp/magnum-api.sh\n              subPath: magnum-api.sh\n              readOnly: true\n            - name: pod-etc-magnum\n              mountPath: /etc/magnum\n            - name: pod-var-cache-magnum\n              mountPath: /var/cache/magnum\n            - name: magnum-etc\n              mountPath: /etc/magnum/magnum.conf\n              subPath: magnum.conf\n              readOnly: true\n            - name: magnum-etc\n              mountPath: /etc/magnum/magnum-api-uwsgi.ini\n              subPath: magnum-api-uwsgi.ini\n              readOnly: true\n            {{- if .Values.conf.magnum.DEFAULT.log_config_append }}\n            - name: magnum-etc\n              mountPath: {{ .Values.conf.magnum.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.magnum.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: magnum-etc\n              mountPath: /etc/magnum/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: magnum-etc\n              mountPath: /etc/magnum/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            {{- if .Values.conf.capi.enabled }}\n            - name: magnum-etc\n              mountPath: /etc/magnum/kubeconfig.conf\n              subPath: kubeconfig.conf\n              readOnly: true\n            {{- end }}\n{{ if $mounts_magnum_api.volumeMounts }}{{ toYaml $mounts_magnum_api.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-magnum\n          emptyDir: {}\n        - name: pod-var-cache-magnum\n          emptyDir: {}\n        - name: magnum-bin\n          configMap:\n            name: magnum-bin\n            defaultMode: 0555\n        - name: magnum-etc\n          secret:\n            secretName: magnum-etc\n            defaultMode: 0444\n{{ if $mounts_magnum_api.volumes }}{{ toYaml $mounts_magnum_api.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "magnum/templates/ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_api .Values.network.api.ingress.public }}\n{{- $ingressOpts := dict \"envAll\" . \"backendServiceType\" \"container-infra\" \"backendPort\" \"m-api\" -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.bootstrap\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"5\"\n{{- end }}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $bootstrapJob := dict \"envAll\" . \"serviceName\" \"magnum\" \"keystoneUser\" .Values.bootstrap.ks_user \"logConfigFile\" .Values.conf.magnum.DEFAULT.log_config_append \"jobAnnotations\" (include \"metadata.annotations.job.bootstrap\" . | fromYaml) -}}\n{{ $bootstrapJob | include \"helm-toolkit.manifests.job_bootstrap\" }}\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" \"magnum\" -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"magnum\" \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"magnum\" \"podVolMounts\" .Values.pod.mounts.magnum_db_sync.magnum_db_sync.volumeMounts \"podVols\" .Values.pod.mounts.magnum_db_sync.magnum_db_sync.volumes \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.repo_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\n{{- end }}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"magnum\" \"jobAnnotations\" (include \"metadata.annotations.job.repo_sync\" . | fromYaml) -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_endpoints\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-2\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"magnum\" \"serviceTypes\" ( tuple \"container-infra\" ) \"jobAnnotations\" (include \"metadata.annotations.job.ks_endpoints\" . | fromYaml) -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_service\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"magnum\" \"serviceTypes\" ( tuple \"container-infra\" ) \"jobAnnotations\" (include \"metadata.annotations.job.ks_service\" . | fromYaml) -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/job-ks-user-domain.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ks_user_domain }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"magnum-ks-user-domain\" }}\n{{ tuple $envAll \"ks_user\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: magnum-domain-ks-user\n  annotations:\n    \"helm.sh/hook\": post-install,post-upgrade\n    \"helm.sh/hook-delete-policy\": before-hook-creation\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"magnum\" \"ks-user\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"ks_user\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: magnum-ks-domain-user\n{{ tuple $envAll \"ks_user\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n          command:\n            - /tmp/ks-domain-user.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ks-user-sh\n              mountPath: /tmp/ks-domain-user.sh\n              subPath: ks-domain-user.sh\n              readOnly: true\n          env:\n{{- with $env := dict \"ksUserSecret\" $envAll.Values.secrets.identity.admin }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n            - name: SERVICE_OS_SERVICE_NAME\n              value: \"magnum\"\n            - name: SERVICE_OS_REGION_NAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.identity.magnum_stack_user }}\n                  key: OS_REGION_NAME\n            - name: SERVICE_OS_DOMAIN_NAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.identity.magnum_stack_user }}\n                  key: OS_DOMAIN_NAME\n            - name: SERVICE_OS_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.identity.magnum_stack_user }}\n                  key: OS_USERNAME\n            - name: SERVICE_OS_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.identity.magnum_stack_user }}\n                  key: OS_PASSWORD\n            - name: SERVICE_OS_ROLE\n              value: {{ .Values.endpoints.identity.auth.magnum_stack_user.role | quote }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: ks-user-sh\n          configMap:\n            name: magnum-bin\n            defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"magnum\" \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/job-rabbit-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.rabbit_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rmqUserJob := dict \"envAll\" . \"serviceName\" \"magnum\" \"jobAnnotations\" (include \"metadata.annotations.job.rabbit_init\" . | fromYaml) -}}\n{{ $rmqUserJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/kubeconfig.tpl",
    "content": "{{- define \"kubeconfig.tpl\" }}\napiVersion: v1\nkind: Config\nclusters:\n- name: {{ .Values.conf.capi.clusterName }}\n  cluster:\n    server: {{ .Values.conf.capi.apiServer }}\n    certificate-authority-data: {{ .Values.conf.capi.certificateAuthorityData | quote }}\ncontexts:\n- name: {{ .Values.conf.capi.contextName }}\n  context:\n    cluster: {{ .Values.conf.capi.clusterName }}\n    user: {{ .Values.conf.capi.userName }}\ncurrent-context: {{ .Values.conf.capi.contextName }}\nusers:\n- name: {{ .Values.conf.capi.userName }}\n  user:\n    client-certificate-data: {{ .Values.conf.capi.clientCertificateData | quote }}\n    client-key-data: {{ .Values.conf.capi.clientKeyData | quote }}\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/network_policy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"magnum\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "magnum/templates/pdb-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_api }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: magnum-api\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.api.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"magnum\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"magnum\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  DB_CONNECTION: {{ tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"magnum\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $envAll.Values.secrets.identity.magnum_stack_user }}\n  annotations:\n{{ tuple \"identity\" \"magnum_stack_user\" $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  OS_AUTH_URL: {{ tuple \"identity\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | b64enc }}\n  OS_REGION_NAME: {{ .Values.endpoints.identity.auth.magnum_stack_user.region_name | b64enc }}\n  OS_DOMAIN_NAME: {{ .Values.endpoints.identity.auth.magnum_stack_user.domain_name | b64enc }}\n  OS_USERNAME: {{ .Values.endpoints.identity.auth.magnum_stack_user.username | b64enc }}\n  OS_PASSWORD: {{ .Values.endpoints.identity.auth.magnum_stack_user.password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"magnum\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_messaging\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass \"http\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/service-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_api }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"container-infra\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: m-api\n      port: {{ tuple \"container-infra\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.api.node_port.enabled }}\n      nodePort: {{ .Values.network.api.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"magnum\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.api.node_port.enabled }}\n  type: NodePort\n  {{ if .Values.network.api.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{ end }}\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/service-ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_api .Values.network.api.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"container-infra\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "magnum/templates/statefulset-conductor.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.statefulset_conductor }}\n{{- $envAll := . }}\n\n{{- $mounts_magnum_conductor := .Values.pod.mounts.magnum_conductor.magnum_conductor }}\n{{- $mounts_magnum_conductor_init := .Values.pod.mounts.magnum_conductor.init_container }}\n\n{{- $serviceAccountName := \"magnum-conductor\" }}\n{{ tuple $envAll \"conductor\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  name: magnum-conductor\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"magnum\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  serviceName: magnum-conductor\n  podManagementPolicy: \"Parallel\"\n  replicas: {{ .Values.pod.replicas.conductor }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"magnum\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"magnum\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n{{ tuple \"magnum_conductor\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"magnum_conductor\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"magnum\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.conductor.node_selector_key }}: {{ .Values.labels.conductor.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"conductor\" $mounts_magnum_conductor_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: magnum-conductor-init\n{{ tuple $envAll \"magnum_conductor\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.conductor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          securityContext:\n            runAsUser: {{ .Values.pod.user.magnum.uid }}\n          env:\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.name\n          command:\n            - /tmp/magnum-conductor-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: magnum-bin\n              mountPath: /tmp/magnum-conductor-init.sh\n              subPath: magnum-conductor-init.sh\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n      containers:\n        - name: magnum-conductor\n{{ tuple $envAll \"magnum_conductor\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.conductor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          securityContext:\n            runAsUser: {{ .Values.pod.user.magnum.uid }}\n          command:\n            - /tmp/magnum-conductor.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.magnum.oslo_concurrency.lock_path }}\n            - name: magnum-bin\n              mountPath: /tmp/magnum-conductor.sh\n              subPath: magnum-conductor.sh\n              readOnly: true\n            - name: pod-etc-magnum\n              mountPath: /etc/magnum\n            - name: magnum-etc\n              mountPath: /etc/magnum/magnum.conf\n              subPath: magnum.conf\n              readOnly: true\n            {{- if .Values.conf.magnum.DEFAULT.log_config_append }}\n            - name: magnum-etc\n              mountPath: {{ .Values.conf.magnum.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.magnum.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            {{- if .Values.conf.capi.enabled }}\n            - name: magnum-etc\n              mountPath: /etc/magnum/kubeconfig.conf\n              subPath: kubeconfig.conf\n              readOnly: true\n            {{- end }}\n            - name: magnum-etc\n              mountPath: /etc/magnum/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: magnum-certificate-cache\n              mountPath: {{ .Values.conf.magnum.cluster.temp_cache_dir }}\n{{ if $mounts_magnum_conductor.volumeMounts }}{{ toYaml $mounts_magnum_conductor.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-magnum\n          emptyDir: {}\n        - name: pod-shared\n          emptyDir: {}\n        - name: magnum-bin\n          configMap:\n            name: magnum-bin\n            defaultMode: 0555\n        - name: magnum-etc\n          secret:\n            secretName: magnum-etc\n            defaultMode: 0444\n        - name: magnum-certificate-cache\n          emptyDir: {}\n{{ if $mounts_magnum_conductor.volumes }}{{ toYaml $mounts_magnum_conductor.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "magnum/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for magnum.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nrelease_group: null\n\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  conductor:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    magnum_db_sync: quay.io/airshipit/magnum:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    magnum_api: quay.io/airshipit/magnum:2025.1-ubuntu_noble\n    magnum_conductor: quay.io/airshipit/magnum:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: docker.io/docker:17.07.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nconf:\n  capi:\n    enabled: false\n    clusterName: cluster.local\n    apiServer: https://localhost:6443\n    certificateAuthorityData: null\n    contextName: default\n    userName: clusterUse\n    clientCertificateData: null\n    clientKeyData: null\n  paste:\n    pipeline:main:\n      pipeline: cors healthcheck request_id authtoken api_v1\n    app:api_v1:\n      paste.app_factory: magnum.api.app:app_factory\n    filter:authtoken:\n      acl_public_routes: /, /v1\n      paste.filter_factory: magnum.api.middleware.auth_token:AuthTokenMiddleware.factory\n    filter:request_id:\n      paste.filter_factory: oslo_middleware:RequestId.factory\n    filter:cors:\n      paste.filter_factory: oslo_middleware.cors:filter_factory\n      oslo_config_project: magnum\n    filter:healthcheck:\n      paste.filter_factory: oslo_middleware:Healthcheck.factory\n      backends: disable_by_file\n      disable_by_file_path: /etc/magnum/healthcheck_disable\n  policy: {}\n  magnum:\n    DEFAULT:\n      log_config_append: /etc/magnum/logging.conf\n      transport_url: null\n    cluster:\n      temp_cache_dir: /var/lib/magnum/certificate-cache\n    capi-helm:\n      kubeconfig_file: /etc/magnum/kubeconfig.conf\n    oslo_messaging_notifications:\n      driver: messaging\n    oslo_concurrency:\n      lock_path: /var/lock\n    oslo_policy:\n      policy_file: /etc/magnum/policy.yaml\n    certificates:\n      cert_manager_type: barbican\n    database:\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    trust:\n      trustee_domain_name: null\n    keystone_authtoken:\n      auth_type: password\n      auth_version: v3\n      memcache_security_strategy: ENCRYPT\n      service_type: container-infra\n    api:\n      # NOTE(portdirect): the bind port should not be defined, and is manipulated\n      # via the endpoints section.\n      port: null\n      host: 0.0.0.0\n  logging:\n    loggers:\n      keys:\n        - root\n        - magnum\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: WARNING\n      handlers: 'null'\n    logger_magnum:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: magnum\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    formatter_default:\n      format: \"%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n  magnum_api_uwsgi:\n    uwsgi:\n      add-header: \"Connection: close\"\n      buffer-size: 65535\n      die-on-term: true\n      enable-threads: true\n      exit-on-reload: false\n      hook-master-start: unix_signal:15 gracefully_kill_them_all\n      lazy-apps: true\n      log-x-forwarded-for: true\n      master: true\n      procname-prefix-spaced: \"magnum-api:\"\n      route-user-agent: '^kube-probe.* donotlog:'\n      thunder-lock: true\n      worker-reload-mercy: 80\n      wsgi-file: /var/lib/openstack/bin/magnum-api-wsgi\n      stats: 0.0.0.0:1717\n      stats-http: true\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30511\n\nbootstrap:\n  enabled: false\n  ks_user: magnum\n  script: |\n    openstack token issue\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - magnum-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    api:\n      jobs:\n        - magnum-db-sync\n        - magnum-ks-user\n        - magnum-domain-ks-user\n        - magnum-ks-endpoints\n        - magnum-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: key_manager\n        - endpoint: internal\n          service: orchestration\n    conductor:\n      jobs:\n        - magnum-db-sync\n        - magnum-ks-user\n        - magnum-domain-ks-user\n        - magnum-ks-endpoints\n        - magnum-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: key_manager\n        - endpoint: internal\n          service: orchestration\n    db_drop:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - magnum-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    ks_endpoints:\n      jobs:\n        - magnum-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    rabbit_init:\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\n# Names of secrets used by bootstrap and environmental checks\nsecrets:\n  identity:\n    admin: magnum-keystone-admin\n    magnum: magnum-keystone-user\n    magnum_stack_user: magnum-keystone-stack-user\n  oslo_db:\n    admin: magnum-db-admin\n    magnum: magnum-db-user\n  oslo_messaging:\n    admin: magnum-rabbitmq-admin\n    magnum: magnum-rabbitmq-user\n  oci_image_registry:\n    magnum: magnum-oci-image-registry\n\n# typically overridden by environmental\n# values, but should include all endpoints\n# required by this chart\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      magnum:\n        username: magnum\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      magnum:\n        role: admin\n        region_name: RegionOne\n        username: magnum\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      magnum_stack_user:\n        role: admin\n        region_name: RegionOne\n        username: magnum-domain\n        password: password\n        domain_name: magnum\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  container_infra:\n    name: magnum\n    hosts:\n      default: magnum-api\n      public: magnum\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v1\n    scheme:\n      default: http\n    port:\n      api:\n        default: 9511\n        public: 80\n  key_manager:\n    name: barbican\n    hosts:\n      default: barbican-api\n      public: barbican\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v1\n    scheme:\n      default: http\n    port:\n      api:\n        default: 9311\n        public: 80\n  orchestration:\n    name: heat\n    hosts:\n      default: heat-api\n      public: heat\n    host_fqdn_override:\n      default: null\n    path:\n      default: '/v1/%(project_id)s'\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 8004\n        public: 80\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n      magnum:\n        username: magnum\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /magnum\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_cache:\n    auth:\n      # NOTE(portdirect): this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  oslo_messaging:\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n      magnum:\n        username: magnum\n        password: password\n    statefulset:\n      replicas: 2\n      name: rabbitmq-rabbitmq\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /magnum\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n  fluentd:\n    namespace: null\n    name: fluentd\n    hosts:\n      default: fluentd-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: 'http'\n    port:\n      service:\n        default: 24224\n      metrics:\n        default: 24220\n\npod:\n  user:\n    magnum:\n      uid: 42424\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  mounts:\n    magnum_api:\n      init_container: null\n      magnum_api:\n        volumeMounts:\n        volumes:\n    magnum_conductor:\n      init_container: null\n      magnum_conductor:\n        volumeMounts:\n        volumes:\n    magnum_bootstrap:\n      init_container: null\n      magnum_bootstrap:\n        volumeMounts:\n        volumes:\n    magnum_db_sync:\n      magnum_db_sync:\n        volumeMounts:\n        volumes:\n  replicas:\n    api: 1\n    conductor: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    disruption_budget:\n      api:\n        min_available: 0\n    termination_grace_period:\n      api:\n        timeout: 30\n  resources:\n    enabled: false\n    api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    conductor:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rabbit_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\n\nnetwork_policy:\n  magnum:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  deployment_api: true\n  ingress_api: true\n  job_bootstrap: true\n  job_db_init: true\n  job_db_sync: true\n  job_db_drop: false\n  job_image_repo_sync: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user_domain: true\n  job_ks_user: true\n  job_rabbit_init: true\n  pdb_api: true\n  network_policy: false\n  secret_db: true\n  secret_keystone: true\n  secret_rabbitmq: true\n  secret_registry: true\n  service_api: true\n  service_ingress_api: true\n  statefulset_conductor: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "manila/.helmignore",
    "content": "values_overrides\n"
  },
  {
    "path": "manila/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Manila\nname: manila\nversion: 2025.2.0\nhome: https://docs.openstack.org/manila/latest/\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Manila/OpenStack_Project_Manila_vertical.png\nsources:\n  - https://opendev.org/openstack/manila\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "manila/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport HOME=/tmp\n\ncd $HOME\n\n{{ range .Values.bootstrap.structured.images }}\nopenstack image show {{ .name  | quote }} || \\\n  (curl --fail -sSL -O {{ .source_url }}{{ .image_file }}; \\\n  openstack image create {{ .name | quote }} \\\n  {{ if .id -}} --id {{ .id }} {{ end -}} \\\n  --disk-format {{ .image_type }} \\\n  --file {{ .image_file }} \\\n  {{ if .properties -}} {{ range $key, $value := .properties }}--property {{$key}}={{$value}} {{ end }}{{ end -}} \\\n  --container-format {{ .container_format | quote }} \\\n  {{ if .private -}}\n  --private\n  {{- else -}}\n  --public\n  {{- end -}};)\n{{ end }}\n\n{{ range .Values.bootstrap.structured.flavors }}\nopenstack flavor show {{ .name  | quote }} || \\\n  openstack flavor create {{ .name | quote }} \\\n  {{ if .id -}} --id {{ .id }} {{ end -}} \\\n  --ram {{ .ram }} \\\n  --vcpus {{ .vcpus }} \\\n  --disk {{ .disk }} \\\n  --ephemeral {{ .ephemeral }} \\\n  {{ if .public -}}\n  --public\n  {{- else -}}\n  --private\n  {{- end -}};\n{{ end }}\n\nopenstack share type show default || \\\n  openstack share type create default true \\\n  --public true --description \"default generic share type\"\nopenstack share group type show default || \\\n  openstack share group type create default default --public true\n\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "manila/templates/bin/_ceph-keyring.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport HOME=/tmp\n\ncat > /etc/ceph/ceph.client.${CEPHFS_AUTH_ID}.keyring <<EOF\n[client.${CEPHFS_AUTH_ID}]\n    key = $(cat /tmp/client-keyring)\nEOF\n"
  },
  {
    "path": "manila/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec manila-manage db sync\n"
  },
  {
    "path": "manila/templates/bin/_manila-api.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec uwsgi --ini /etc/manila/manila-api-uwsgi.ini\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "manila/templates/bin/_manila-data.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexec manila-data \\\n     --config-file /etc/manila/manila.conf \\\n     --config-dir /etc/manila/manila.conf.d\n"
  },
  {
    "path": "manila/templates/bin/_manila-scheduler.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexec manila-scheduler \\\n     --config-file /etc/manila/manila.conf \\\n     --config-dir /etc/manila/manila.conf.d\n"
  },
  {
    "path": "manila/templates/bin/_manila-share-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n{{- if and ( empty .Values.conf.manila.generic.service_network_host ) ( .Values.pod.use_fqdn.share ) }}\ntee > /tmp/pod-shared/manila-share-fqdn.conf << EOF\n[generic]\nservice_network_host = $(hostname --fqdn)\nEOF\n{{- end }}\n"
  },
  {
    "path": "manila/templates/bin/_manila-share.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexec manila-share \\\n     --config-file /etc/manila/manila.conf \\\n     --config-dir /etc/manila/manila.conf.d \\\n{{- if and ( empty .Values.conf.manila.generic.service_network_host ) ( .Values.pod.use_fqdn.share ) }}\n     --config-file /tmp/pod-shared/manila-share-fqdn.conf\n{{- end }}\n"
  },
  {
    "path": "manila/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n{{- $rallyTests := .Values.conf.rally_tests }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: manila-bin\ndata:\n  rally-test.sh: |\n{{ tuple $rallyTests | include \"helm-toolkit.scripts.rally_test\" | indent 4 }}\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" . | indent 4 }}\n  manila-share-init.sh: |\n{{ tuple \"bin/_manila-share-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  manila-api.sh: |\n{{ tuple \"bin/_manila-api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  manila-data.sh: |\n{{ tuple \"bin/_manila-data.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  manila-scheduler.sh: |\n{{ tuple \"bin/_manila-scheduler.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  manila-share.sh: |\n{{ tuple \"bin/_manila-share.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ceph-keyring.sh: |\n{{ tuple \"bin/_ceph-keyring.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n{{- $manila_auth_url := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" }}\n{{- $manila_region_name := .Values.endpoints.identity.auth.manila.region_name }}\n{{- $manila_project_name := .Values.endpoints.identity.auth.manila.project_name }}\n{{- $manila_project_domain_name := .Values.endpoints.identity.auth.manila.project_domain_name }}\n{{- $manila_user_domain_name := .Values.endpoints.identity.auth.manila.user_domain_name }}\n{{- $manila_username := .Values.endpoints.identity.auth.manila.username }}\n{{- $manila_password := .Values.endpoints.identity.auth.manila.password }}\n{{- $memcached_servers := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n{{- $memcache_secret_key := default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key }}\n\n{{- if empty .Values.conf.manila.keystone_authtoken.auth_uri -}}\n{{- $_ := set .Values.conf.manila.keystone_authtoken \"auth_uri\" $manila_auth_url -}}\n{{- end -}}\n\n{{- if empty .Values.conf.manila.keystone_authtoken.auth_url -}}\n{{- $_ := set .Values.conf.manila.keystone_authtoken \"auth_url\" $manila_auth_url -}}\n{{- end -}}\n\n{{- if empty .Values.conf.manila.keystone_authtoken.region_name -}}\n{{- $_ := set .Values.conf.manila.keystone_authtoken \"region_name\" $manila_region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.keystone_authtoken.project_name -}}\n{{- $_ := set .Values.conf.manila.keystone_authtoken \"project_name\" $manila_project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.keystone_authtoken.project_domain_name -}}\n{{- $_ := set .Values.conf.manila.keystone_authtoken \"project_domain_name\" $manila_project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.keystone_authtoken.user_domain_name -}}\n{{- $_ := set .Values.conf.manila.keystone_authtoken \"user_domain_name\" $manila_user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.keystone_authtoken.username -}}\n{{- $_ := set .Values.conf.manila.keystone_authtoken \"username\" $manila_username -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.keystone_authtoken.password -}}\n{{- $_ := set .Values.conf.manila.keystone_authtoken \"password\" $manila_password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.manila.keystone_authtoken.memcached_servers -}}\n{{- $_ := set .Values.conf.manila.keystone_authtoken \"memcached_servers\" $memcached_servers -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.manila.keystone_authtoken \"memcache_secret_key\" $memcache_secret_key -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.manila.database.connection)) (empty .Values.conf.manila.database.connection) -}}\n{{- $_ := tuple \"oslo_db\" \"internal\" \"manila\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\"| set .Values.conf.manila.database \"connection\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.manila.DEFAULT.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"manila\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.manila.DEFAULT \"transport_url\" -}}\n{{- end -}}\n\n# neutron\n{{- if empty .Values.conf.manila.neutron.auth_uri -}}\n{{- $_ := set .Values.conf.manila.neutron \"auth_uri\" $manila_auth_url -}}\n{{- end -}}\n\n{{- if empty .Values.conf.manila.neutron.auth_url -}}\n{{- $_ := set .Values.conf.manila.neutron \"auth_url\" $manila_auth_url -}}\n{{- end -}}\n\n{{- if empty .Values.conf.manila.neutron.region_name -}}\n{{- $_ := set .Values.conf.manila.neutron \"region_name\" .Values.endpoints.identity.auth.manila.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.neutron.project_name -}}\n{{- $_ := set .Values.conf.manila.neutron \"project_name\" $manila_project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.neutron.project_domain_name -}}\n{{- $_ := set .Values.conf.manila.neutron \"project_domain_name\" $manila_project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.neutron.user_domain_name -}}\n{{- $_ := set .Values.conf.manila.neutron \"user_domain_name\" $manila_user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.neutron.username -}}\n{{- $_ := set .Values.conf.manila.neutron \"username\" $manila_username -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.neutron.password -}}\n{{- $_ := set .Values.conf.manila.neutron \"password\" $manila_password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.manila.neutron.memcached_servers -}}\n{{- $_ := set .Values.conf.manila.neutron \"memcached_servers\" $memcached_servers -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.neutron.memcache_secret_key -}}\n{{- $_ := set .Values.conf.manila.neutron \"memcache_secret_key\" $memcache_secret_key -}}\n{{- end -}}\n\n# nova\n{{- if empty .Values.conf.manila.nova.auth_uri -}}\n{{- $_ := set .Values.conf.manila.nova \"auth_uri\" $manila_auth_url -}}\n{{- end -}}\n\n{{- if empty .Values.conf.manila.nova.auth_url -}}\n{{- $_ := set .Values.conf.manila.nova \"auth_url\" $manila_auth_url -}}\n{{- end -}}\n\n{{- if empty .Values.conf.manila.nova.region_name -}}\n{{- $_ := set .Values.conf.manila.nova \"region_name\" .Values.endpoints.identity.auth.manila.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.nova.project_name -}}\n{{- $_ := set .Values.conf.manila.nova \"project_name\" $manila_project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.nova.project_domain_name -}}\n{{- $_ := set .Values.conf.manila.nova \"project_domain_name\" $manila_project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.nova.user_domain_name -}}\n{{- $_ := set .Values.conf.manila.nova \"user_domain_name\" $manila_user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.nova.username -}}\n{{- $_ := set .Values.conf.manila.nova \"username\" $manila_username -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.nova.password -}}\n{{- $_ := set .Values.conf.manila.nova \"password\" $manila_password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.manila.nova.memcached_servers -}}\n{{- $_ := set .Values.conf.manila.nova \"memcached_servers\" $memcached_servers -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.nova.memcache_secret_key -}}\n{{- $_ := set .Values.conf.manila.nova \"memcache_secret_key\" $memcache_secret_key -}}\n{{- end -}}\n\n# cinder\n{{- if empty .Values.conf.manila.cinder.auth_uri -}}\n{{- $_ := set .Values.conf.manila.cinder \"auth_uri\" $manila_auth_url -}}\n{{- end -}}\n\n{{- if empty .Values.conf.manila.cinder.auth_url -}}\n{{- $_ := set .Values.conf.manila.cinder \"auth_url\" $manila_auth_url -}}\n{{- end -}}\n\n{{- if empty .Values.conf.manila.cinder.region_name -}}\n{{- $_ := set .Values.conf.manila.cinder \"region_name\" .Values.endpoints.identity.auth.manila.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.cinder.project_name -}}\n{{- $_ := set .Values.conf.manila.cinder \"project_name\" $manila_project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.cinder.project_domain_name -}}\n{{- $_ := set .Values.conf.manila.cinder \"project_domain_name\" $manila_project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.cinder.user_domain_name -}}\n{{- $_ := set .Values.conf.manila.cinder \"user_domain_name\" $manila_user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.cinder.username -}}\n{{- $_ := set .Values.conf.manila.cinder \"username\" $manila_username -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.cinder.password -}}\n{{- $_ := set .Values.conf.manila.cinder \"password\" $manila_password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.manila.cinder.memcached_servers -}}\n{{- $_ := set .Values.conf.manila.cinder \"memcached_servers\" $memcached_servers -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.cinder.memcache_secret_key -}}\n{{- $_ := set .Values.conf.manila.cinder \"memcache_secret_key\" $memcache_secret_key -}}\n{{- end -}}\n\n# glance\n{{- if empty .Values.conf.manila.glance.auth_uri -}}\n{{- $_ := set .Values.conf.manila.glance \"auth_uri\" $manila_auth_url -}}\n{{- end -}}\n\n{{- if empty .Values.conf.manila.glance.auth_url -}}\n{{- $_ := set .Values.conf.manila.glance \"auth_url\" $manila_auth_url -}}\n{{- end -}}\n\n{{- if empty .Values.conf.manila.glance.region_name -}}\n{{- $_ := set .Values.conf.manila.glance \"region_name\" .Values.endpoints.identity.auth.manila.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.glance.project_name -}}\n{{- $_ := set .Values.conf.manila.glance \"project_name\" $manila_project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.glance.project_domain_name -}}\n{{- $_ := set .Values.conf.manila.glance \"project_domain_name\" $manila_project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.glance.user_domain_name -}}\n{{- $_ := set .Values.conf.manila.glance \"user_domain_name\" $manila_user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.glance.username -}}\n{{- $_ := set .Values.conf.manila.glance \"username\" $manila_username -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.glance.password -}}\n{{- $_ := set .Values.conf.manila.glance \"password\" $manila_password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.manila.glance.memcached_servers -}}\n{{- $_ := set .Values.conf.manila.glance \"memcached_servers\" $memcached_servers -}}\n{{- end -}}\n{{- if empty .Values.conf.manila.glance.memcache_secret_key -}}\n{{- $_ := set .Values.conf.manila.glance \"memcache_secret_key\" $memcache_secret_key -}}\n{{- end -}}\n\n{{- if empty .Values.conf.manila_api_uwsgi.uwsgi.processes -}}\n{{- $_ := set .Values.conf.manila_api_uwsgi.uwsgi \"processes\" .Values.conf.manila.DEFAULT.osapi_share_workers -}}\n{{- end -}}\n{{- if empty (index .Values.conf.manila_api_uwsgi.uwsgi \"http-socket\") -}}\n{{- $http_socket_port := tuple \"sharev2\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | toString }}\n{{- $http_socket := printf \"0.0.0.0:%s\" $http_socket_port }}\n{{- $_ := set .Values.conf.manila_api_uwsgi.uwsgi \"http-socket\" $http_socket -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.handler_fluent) (has \"fluent\" .Values.conf.logging.handlers.keys) -}}\n{{- $fluentd_host := tuple \"fluentd\" \"internal\" $envAll | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\n{{- $fluentd_port := tuple \"fluentd\" \"internal\" \"service\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $fluent_args := printf \"('%s.%s', '%s', %s)\" .Release.Namespace .Release.Name $fluentd_host $fluentd_port }}\n{{- $handler_fluent := dict \"class\" \"fluent.handler.FluentHandler\" \"formatter\" \"fluent\" \"args\" $fluent_args -}}\n{{- $_ := set .Values.conf.logging \"handler_fluent\" $handler_fluent -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.formatter_fluent) (has \"fluent\" .Values.conf.logging.formatters.keys) -}}\n{{- $formatter_fluent := dict \"class\" \"oslo_log.formatters.FluentFormatter\" -}}\n{{- $_ := set .Values.conf.logging \"formatter_fluent\" $formatter_fluent -}}\n{{- end -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: manila-etc\ntype: Opaque\ndata:\n  rally_tests.yaml: {{ toYaml .Values.conf.rally_tests.tests | b64enc }}\n  manila.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.manila | b64enc }}\n  manila-api-uwsgi.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.manila_api_uwsgi | b64enc }}\n{{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n{{- $filePrefix := replace \"_\" \"-\"  $key }}\n  {{ printf \"%s.filters\" $filePrefix }}: {{ $value.content | b64enc }}\n{{- end }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.logging | b64enc }}\n  api-paste.ini: {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.paste | b64enc }}\n  policy.yaml: {{ toYaml .Values.conf.policy | b64enc }}\n  manila_sudoers: {{ $envAll.Values.conf.manila_sudoers | b64enc }}\n  rootwrap.conf: {{ $envAll.Values.conf.rootwrap | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/deployment-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_api }}\n{{- $envAll := . }}\n\n{{- $mounts_manila_api := .Values.pod.mounts.manila_api.manila_api }}\n{{- $mounts_manila_api_init := .Values.pod.mounts.manila_api.init_container }}\n{{- $etcSources := .Values.pod.etcSources.manila_api }}\n\n{{- $serviceAccountName := \"manila-api\" }}\n{{ tuple $envAll \"api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: manila-api\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"manila\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"manila\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"manila\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"manila_api\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"manila-api\" \"containerNames\" (list \"init\" \"manila-api\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"manila\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"manila_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"manila_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"manila\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.api.node_selector_key }}: {{ .Values.labels.api.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.manila.enabled }}\n{{ tuple $envAll \"manila\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"api\" $mounts_manila_api_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: manila-api\n{{ tuple $envAll \"manila_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"manila\" \"container\" \"manila_api\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/manila-api.sh\n            - start\n          env:\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/manila/certs/ca.crt\"\n{{- end }}\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/manila-api.sh\n                  - stop\n          ports:\n            - name: m-api\n              containerPort: {{ tuple \"sharev2\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            httpGet:\n              scheme: HTTP\n              path: {{ tuple \"sharev2\" \"healthcheck\" \"\" . | include \"helm-toolkit.endpoints.keystone_endpoint_path_lookup\" }}\n              port: {{ tuple \"sharev2\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.manila.oslo_concurrency.lock_path }}\n            - name: state-tmp\n              mountPath: /var/lib/manila\n            - name: etcmanila\n              mountPath: /etc/manila\n            - name: manila-etc\n              mountPath: /etc/manila/manila.conf\n              subPath: manila.conf\n              readOnly: true\n            - name: manila-etc-snippets\n              mountPath: /etc/manila/manila.conf.d/\n              readOnly: true\n            - name: manila-etc\n              mountPath: /etc/manila/manila-api-uwsgi.ini\n              subPath: manila-api-uwsgi.ini\n              readOnly: true\n            {{- if .Values.conf.manila.DEFAULT.log_config_append }}\n            - name: manila-etc\n              mountPath: {{ .Values.conf.manila.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.manila.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: manila-etc\n              mountPath: /etc/manila/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: manila-etc\n              mountPath: /etc/manila/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: manila-bin\n              mountPath: /tmp/manila-api.sh\n              subPath: manila-api.sh\n              readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.share.api.internal \"path\" \"/etc/manila/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n\n{{ if $mounts_manila_api.volumeMounts }}{{ toYaml $mounts_manila_api.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: state-tmp\n          emptyDir: {}\n        - name: etcmanila\n          emptyDir: {}\n        - name: manila-etc\n          secret:\n            secretName: manila-etc\n            defaultMode: 0444\n        - name: manila-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: manila-bin\n          configMap:\n            name: manila-bin\n            defaultMode: 0555\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.share.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n\n{{ if $mounts_manila_api.volumes }}{{ toYaml $mounts_manila_api.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/deployment-data.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_data }}\n{{- $envAll := . }}\n\n{{- $mounts_manila_data := .Values.pod.mounts.manila_data.manila_data }}\n{{- $mounts_manila_data_init := .Values.pod.mounts.manila_data.init_container }}\n{{- $etcSources := .Values.pod.etcSources.manila_data }}\n\n{{- $serviceAccountName := \"manila-data\" }}\n{{ tuple $envAll \"data\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: manila-data\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"manila\" \"data\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.data }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"manila\" \"data\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"manila\" \"data\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"manila_data\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"manila-data\" \"containerNames\" (list \"init\" \"manila-data\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"manila\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"manila_data\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"manila_data\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"manila\" \"data\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.data.node_selector_key }}: {{ .Values.labels.data.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.manila.enabled }}\n{{ tuple $envAll \"manila\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"data\" $mounts_manila_data_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: manila-data\n{{ tuple $envAll \"manila_data\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.data | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"manila\" \"container\" \"manila_data\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/manila-data.sh\n          env:\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/manila/certs/ca.crt\"\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.manila.oslo_concurrency.lock_path }}\n            - name: etcmanila\n              mountPath: /etc/manila\n            - name: state-tmp\n              mountPath: /var/lib/manila\n            - name: manila-etc\n              mountPath: /etc/manila/manila.conf\n              subPath: manila.conf\n              readOnly: true\n            - name: manila-etc-snippets\n              mountPath: /etc/manila/manila.conf.d/\n              readOnly: true\n            {{- if .Values.conf.manila.DEFAULT.log_config_append }}\n            - name: manila-etc\n              mountPath: {{ .Values.conf.manila.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.manila.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: manila-etc\n              mountPath: /etc/manila/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: manila-bin\n              mountPath: /tmp/manila-data.sh\n              subPath: manila-data.sh\n              readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.share.api.internal \"path\" \"/etc/manila/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n\n{{ if $mounts_manila_data.volumeMounts }}{{ toYaml $mounts_manila_data.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: state-tmp\n          emptyDir: {}\n        - name: etcmanila\n          emptyDir: {}\n        - name: manila-etc\n          secret:\n            secretName: manila-etc\n            defaultMode: 0444\n        - name: manila-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: manila-bin\n          configMap:\n            name: manila-bin\n            defaultMode: 0555\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.share.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n\n{{ if $mounts_manila_data.volumes }}{{ toYaml $mounts_manila_data.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/deployment-scheduler.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_scheduler }}\n{{- $envAll := . }}\n\n{{- $mounts_manila_scheduler := .Values.pod.mounts.manila_scheduler.manila_scheduler }}\n{{- $mounts_manila_scheduler_init := .Values.pod.mounts.manila_scheduler.init_container }}\n{{- $etcSources := .Values.pod.etcSources.manila_scheduler }}\n\n{{- $serviceAccountName := \"manila-scheduler\" }}\n{{ tuple $envAll \"scheduler\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: manila-scheduler\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"manila\" \"scheduler\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.scheduler }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"manila\" \"scheduler\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"manila\" \"scheduler\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"manila_scheduler\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"manila-scheduler\" \"containerNames\" (list \"init\" \"manila-scheduler\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"manila\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"manila_scheduler\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"manila_scheduler\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"manila\" \"scheduler\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.scheduler.node_selector_key }}: {{ .Values.labels.scheduler.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.manila.enabled }}\n{{ tuple $envAll \"manila\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"scheduler\" $mounts_manila_scheduler_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: manila-scheduler\n{{ tuple $envAll \"manila_scheduler\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.scheduler | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"manila\" \"container\" \"manila_scheduler\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/manila-scheduler.sh\n          env:\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/manila/certs/ca.crt\"\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.manila.oslo_concurrency.lock_path }}\n            - name: etcmanila\n              mountPath: /etc/manila\n            - name: state-tmp\n              mountPath: /var/lib/manila\n            - name: manila-etc\n              mountPath: /etc/manila/manila.conf\n              subPath: manila.conf\n              readOnly: true\n            - name: manila-etc-snippets\n              mountPath: /etc/manila/manila.conf.d/\n              readOnly: true\n            {{- if .Values.conf.manila.DEFAULT.log_config_append }}\n            - name: manila-etc\n              mountPath: {{ .Values.conf.manila.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.manila.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: manila-etc\n              mountPath: /etc/manila/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: manila-bin\n              mountPath: /tmp/manila-scheduler.sh\n              subPath: manila-scheduler.sh\n              readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.share.api.internal \"path\" \"/etc/manila/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n\n{{ if $mounts_manila_scheduler.volumeMounts }}{{ toYaml $mounts_manila_scheduler.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: state-tmp\n          emptyDir: {}\n        - name: etcmanila\n          emptyDir: {}\n        - name: manila-etc\n          secret:\n            secretName: manila-etc\n            defaultMode: 0444\n        - name: manila-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: manila-bin\n          configMap:\n            name: manila-bin\n            defaultMode: 0555\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.share.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n\n{{ if $mounts_manila_scheduler.volumes }}{{ toYaml $mounts_manila_scheduler.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/deployment-share.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_share }}\n{{- $envAll := . }}\n\n{{- $mounts_manila_share := .Values.pod.mounts.manila_share.manila_share }}\n{{- $mounts_manila_share_init := .Values.pod.mounts.manila_share.init_container }}\n{{- $etcSources := .Values.pod.etcSources.manila_share }}\n\n{{- $serviceAccountName := \"manila-share\" }}\n{{ tuple $envAll \"share\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $enabledBackends := default \"\" .Values.conf.manila.DEFAULT.enabled_share_backends }}\n{{- $isCephfsEnabled := (contains \"cephfs\" $enabledBackends) }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: manila-share\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"manila\" \"share\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.share }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"manila\" \"share\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"manila\" \"share\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"manila_share\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"manila-share\" \"containerNames\" (list \"init\" \"manila-share\" \"manila-share-init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"manila\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"manila_share\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"manila_share\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"manila\" \"share\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.share.node_selector_key }}: {{ .Values.labels.share.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.manila.enabled }}\n{{ tuple $envAll \"manila\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      dnsPolicy: ClusterFirstWithHostNet\n      hostNetwork: true\n      initContainers:\n{{ tuple $envAll \"share\" $mounts_manila_share_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: manila-share-init\n{{ tuple $envAll \"manila_share\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"manila\" \"container\" \"manila_share\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env: []\n          command:\n            - /tmp/manila-share-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: manila-bin\n              mountPath: /tmp/manila-share-init.sh\n              subPath: manila-share-init.sh\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n{{ if $mounts_manila_share.volumeMounts }}{{ toYaml $mounts_manila_share.volumeMounts | indent 12 }}{{ end }}\n        {{ if $isCephfsEnabled }}\n        - name: ceph-keyring-placement\n{{ tuple $envAll \"manila_share\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"manila\" \"container\" \"ceph_keyring_placement\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: CEPHFS_AUTH_ID\n              value: {{ .Values.conf.manila.DEFAULT.cephfs_auth_id | quote }}\n          command:\n            - /tmp/ceph-keyring.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: manila-bin\n              mountPath: /tmp/ceph-keyring.sh\n              subPath: ceph-keyring.sh\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n        {{ end }}\n      containers:\n        - name: manila-share\n{{ tuple $envAll \"manila_share\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.share | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"manila\" \"container\" \"manila_share\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/manila-share.sh\n          env:\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/manila/certs/ca.crt\"\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.manila.oslo_concurrency.lock_path }}\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: etcmanila\n              mountPath: /etc/manila\n            - name: state-tmp\n              mountPath: /var/lib/manila\n            - name: manila-etc\n              mountPath: /etc/manila/manila.conf\n              subPath: manila.conf\n              readOnly: true\n            - name: manila-etc-snippets\n              mountPath: /etc/manila/manila.conf.d/\n              readOnly: true\n            - name: manila-etc\n              mountPath: /etc/manila/rootwrap.conf\n              subPath: rootwrap.conf\n            - name: manila-etc\n              mountPath: /etc/sudoers.d/kolla_manila_sudoers\n              subPath: manila_sudoers\n              readOnly: true\n            - name: manila-etc\n              mountPath: /etc/sudoers.d/kolla_manila_volume_sudoers\n              subPath: manila_sudoers\n              readOnly: true\n            - mountPath: /run/openvswitch\n              name: run-openvswitch\n              readOnly: true\n            {{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n            {{- if ( has \"share\" $value.pods ) }}\n            {{- $filePrefix := replace \"_\" \"-\"  $key }}\n            {{- $rootwrapFile := printf \"/etc/manila/rootwrap.d/%s.filters\" $filePrefix }}\n            - name: manila-etc\n              mountPath: {{ $rootwrapFile }}\n              subPath: {{ base $rootwrapFile }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            {{- if .Values.conf.manila.DEFAULT.log_config_append }}\n            - name: manila-etc\n              mountPath: {{ .Values.conf.manila.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.manila.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: manila-etc\n              mountPath: /etc/manila/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: manila-bin\n              mountPath: /tmp/manila-share.sh\n              subPath: manila-share.sh\n              readOnly: true\n{{- if $isCephfsEnabled }}\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: ceph-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n{{- end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.share.api.internal \"path\" \"/etc/manila/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n\n{{ if $mounts_manila_share.volumeMounts }}{{ toYaml $mounts_manila_share.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-shared\n          emptyDir: {}\n        - name: state-tmp\n          emptyDir: {}\n        - name: etcmanila\n          emptyDir: {}\n        - name: run-openvswitch\n          hostPath:\n            path: /run/openvswitch\n            type: Directory\n        - name: manila-etc\n          secret:\n            secretName: manila-etc\n            defaultMode: 0444\n        - name: manila-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: manila-bin\n          configMap:\n            name: manila-bin\n            defaultMode: 0555\n{{- if $isCephfsEnabled }}\n        - name: etcceph\n          emptyDir: {}\n        - name: ceph-etc\n          configMap:\n            name: {{ .Values.ceph_client.configmap }}\n            defaultMode: 0444\n        - name: ceph-keyring\n          secret:\n            secretName: {{ .Values.secrets.rbd | quote }}\n{{- end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.share.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n\n{{ if $mounts_manila_share.volumes }}{{ toYaml $mounts_manila_share.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "manila/templates/ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_api .Values.network.api.ingress.public }}\n{{- $ingressOpts := dict \"envAll\" . \"backendServiceType\" \"sharev2\" \"backendPort\" \"m-api\" -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.bootstrap\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"5\"\n{{- end }}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $bootstrapJob := dict \"envAll\" . \"serviceName\" \"manila\" \"keystoneUser\" .Values.bootstrap.ks_user \"logConfigFile\" .Values.conf.manila.DEFAULT.log_config_append \"jobAnnotations\" (include \"metadata.annotations.job.bootstrap\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.manila.enabled -}}\n{{- $_ := set $bootstrapJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $bootstrapJob \"tlsSecret\" .Values.secrets.tls.share.api.internal -}}\n{{- end -}}\n{{ $bootstrapJob | include \"helm-toolkit.manifests.job_bootstrap\" }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" \"manila\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbDropJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- if .Values.pod.tolerations.manila.enabled -}}\n{{- $_ := set $dbDropJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"manila\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbInitJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbInitJob \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.manila.enabled -}}\n{{- $_ := set $dbInitJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"manila\" \"podVolMounts\" .Values.pod.mounts.manila_db_sync.manila_db_sync.volumeMounts \"podVols\" .Values.pod.mounts.manila_db_sync.manila_db_sync.volumes -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbSyncJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.manila.enabled -}}\n{{- $_ := set $dbSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.repo_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\n{{- end }}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"manila\" \"jobAnnotations\" (include \"metadata.annotations.job.repo_sync\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.manila.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_endpoints\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-2\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"manila\" \"serviceTypes\" ( tuple \"sharev2\" ) \"jobAnnotations\" (include \"metadata.annotations.job.ks_endpoints\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.manila.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.share.api.internal -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_service\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"manila\" \"serviceTypes\" ( tuple \"sharev2\" ) \"jobAnnotations\" (include \"metadata.annotations.job.ks_service\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.manila.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.share.api.internal -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"manila\" \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.manila.enabled -}}\n{{- $_ := set $ksUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksUserJob \"tlsSecret\" .Values.secrets.tls.share.api.internal -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/job-rabbit-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.rabbit_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rmqUserJob := dict \"envAll\" . \"serviceName\" \"manila\" \"jobAnnotations\" (include \"metadata.annotations.job.rabbit_init\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.manila.enabled -}}\n{{- $_ := set $rmqUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $rmqUserJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/network_policy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"manila\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "manila/templates/pdb-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_api }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: manila-api\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.api.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"manila\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/pod-rally-test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if .Values.manifests.pod_rally_test }}\n{{- $envAll := . }}\n\n{{- $mounts_tests := .Values.pod.mounts.manila_tests.manila_tests }}\n{{- $mounts_tests_init := .Values.pod.mounts.manila_tests.init_container }}\n\n{{- $serviceAccountName := print $envAll.deployment_name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: {{ print $envAll.deployment_name \"-test\" }}\n  labels:\n{{ tuple $envAll \"manila\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"manila-test\" \"containerNames\" (list \"init\" \"manila-test\" \"manila-test-ks-user\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n  restartPolicy: Never\n{{ dict \"envAll\" $envAll \"application\" \"test\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 2 }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n{{ tuple \"manila_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 2 }}\n{{ tuple \"manila_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 2 }}\n  serviceAccountName: {{ $serviceAccountName }}\n  initContainers:\n{{ tuple $envAll \"tests\" $mounts_tests_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n    - name: manila-test-ks-user\n{{ tuple $envAll \"ks_user\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.ks_user | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" \"container\" \"manila_test_ks_user\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      command:\n        - /tmp/ks-user.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: manila-bin\n          mountPath: /tmp/ks-user.sh\n          subPath: ks-user.sh\n          readOnly: true\n{{- if and .Values.manifests.certificates .Values.secrets.tls.share.api.internal }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.share.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\"  | indent 8 }}\n{{- end }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" (and .Values.manifests.certificates .Values.secrets.tls.share.api.internal) }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_SERVICE_NAME\n          value: \"test\"\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.test }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_ROLE\n          value: {{ .Values.endpoints.identity.auth.test.role | quote }}\n  containers:\n    - name: manila-test\n{{ tuple $envAll \"test\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" \"container\" \"manila_test\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6}}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" (and .Values.manifests.certificates .Values.secrets.tls.share.api.internal) }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.test }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: RALLY_ENV_NAME\n          value: {{.deployment_name}}\n      command:\n        - /tmp/rally-test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: manila-etc\n          mountPath: /etc/rally/rally_tests.yaml\n          subPath: rally_tests.yaml\n          readOnly: true\n        - name: manila-bin\n          mountPath: /tmp/rally-test.sh\n          subPath: rally-test.sh\n          readOnly: true\n        - name: rally-db\n          mountPath: /var/lib/rally\n        - name: rally-work\n          mountPath: /home/rally/.rally\n{{- if and  .Values.manifests.certificates .Values.secrets.tls.share.api.internal }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.share.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\"  | indent 8 }}\n{{- end }}\n{{ if $mounts_tests.volumeMounts }}{{ toYaml $mounts_tests.volumeMounts | indent 8 }}{{ end }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: manila-etc\n      secret:\n        secretName: manila-etc\n        defaultMode: 0444\n    - name: manila-bin\n      configMap:\n        name: manila-bin\n        defaultMode: 0555\n    - name: rally-db\n      emptyDir: {}\n    - name: rally-work\n      emptyDir: {}\n{{- if and .Values.manifests.certificates .Values.secrets.tls.share.api.internal }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.share.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 4 }}\n{{- end }}\n{{ if $mounts_tests.volumes }}{{ toYaml $mounts_tests.volumes | indent 4 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"manila\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n{{- $connection := tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- if $envAll.Values.manifests.certificates }}\n  DB_CONNECTION: {{ (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | b64enc -}}\n{{- else }}\n  DB_CONNECTION: {{  $connection | b64enc -}}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{- include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"sharev2\" ) }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"manila\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"manila\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_messaging\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass \"http\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/service-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_api }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"sharev2\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: m-api\n    port: {{ tuple \"sharev2\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.api.node_port.enabled }}\n    nodePort: {{ .Values.network.api.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"manila\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.api.node_port.enabled }}\n  type: NodePort\n  {{ if .Values.network.api.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{ end }}\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "manila/templates/service-ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_api .Values.network.api.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"sharev2\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "manila/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for manila.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  data:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  scheduler:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  share:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nrelease_group: null\n\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    manila_db_sync: quay.io/airshipit/manila:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    manila_api: quay.io/airshipit/manila:2025.1-ubuntu_noble\n    manila_data: quay.io/airshipit/manila:2025.1-ubuntu_noble\n    manila_scheduler: quay.io/airshipit/manila:2025.1-ubuntu_noble\n    manila_share: quay.io/airshipit/manila:2025.1-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    image_repo_sync: docker.io/docker:17.07.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\npod:\n  security_context:\n    manila:\n      pod:\n        runAsUser: 42424\n      container:\n        manila_api:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        manila_data:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        manila_scheduler:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        manila_share:\n          readOnlyRootFilesystem: true\n          privileged: true\n    test:\n      pod:\n        runAsUser: 42424\n      container:\n        manila_test:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n  use_fqdn:\n    # NOTE: Setting the option here to true will cause use $(hostname --fqdn)\n    # as the host name by default. If the short name is desired\n    # $(hostname --short), set the option to false. Specifying a host in the\n    # manila.conf via the conf section will supersede the value of this option.\n    share: true\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    manila:\n      enabled: false\n      tolerations:\n        - key: node-role.kubernetes.io/master\n          operator: Exists\n          effect: NoSchedule\n  mounts:\n    manila_api:\n      init_container: null\n      manila_api:\n        volumeMounts:\n        volumes:\n    manila_scheduler:\n      init_container: null\n      manila_scheduler:\n        volumeMounts:\n        volumes:\n    manila_data:\n      init_container: null\n      manila_data:\n        volumeMounts:\n        volumes:\n    manila_share:\n      init_container: null\n      manila_share:\n        volumeMounts:\n        volumes:\n    manila_bootstrap:\n      init_container: null\n      manila_bootstrap:\n        volumeMounts:\n        volumes:\n    manila_tests:\n      init_container: null\n      manila_tests:\n        volumeMounts:\n        volumes:\n    manila_db_sync:\n      manila_db_sync:\n        volumeMounts:\n        volumes:\n  # -- This allows users to add Kubernetes Projected Volumes to be mounted at /etc/manila/manila.conf.d/\n  ## This is a list of projected volume source objects for each deployment/statefulset/daemonset/cronjob\n  ## https://kubernetes.io/docs/concepts/storage/projected-volumes/\n  etcSources:\n    manila_api: []\n    manila_scheduler: []\n    manila_data: []\n    manila_share: []\n    manila_db_sync: []\n  replicas:\n    api: 1\n    data: 1\n    scheduler: 1\n    share: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    disruption_budget:\n      api:\n        min_available: 0\n      sheduler:\n        min_available: 0\n      share:\n        min_available: 0\n  resources:\n    enabled: false\n    api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    data:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    scheduler:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    share:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rabbit_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30486\n\nnetwork_policy:\n  manila:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nbootstrap:\n  enabled: true\n  ks_user: admin\n  script: null\n  structured:\n    flavors:\n      manila-service-flavor:\n        id: 100\n        name: \"manila-service-flavor\"\n        ram: 512\n        vcpus: 1\n        disk: 5\n        ephemeral: 0\n        public: true\n    images:\n      manila-service-image:\n        id: null\n        name: \"manila-service-image\"\n        source_url: \"https://tarballs.opendev.org/openstack/manila-image-elements/images/\"\n        image_file: \"manila-service-image-master.qcow2\"\n        image_type: qcow2\n        container_format: bare\n        private: false\n\nceph_client:\n  configmap: ceph-etc\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - manila-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    api:\n      jobs:\n        - manila-db-sync\n        - manila-ks-user\n        - manila-ks-endpoints\n        - manila-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_messaging\n    data:\n      jobs:\n        - manila-db-sync\n        - manila-ks-user\n        - manila-ks-endpoints\n        - manila-rabbit-init\n    scheduler:\n      jobs:\n        - manila-db-sync\n        - manila-ks-user\n        - manila-ks-endpoints\n        - manila-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_messaging\n    share:\n      # pod:\n      #   - requireSameNode: true\n      #     labels:\n      #       application: openvswitch\n      #       component: server\n      jobs:\n        - manila-db-sync\n        - manila-ks-user\n        - manila-ks-endpoints\n        - manila-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_messaging\n    db_drop:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - manila-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    ks_endpoints:\n      jobs:\n        - manila-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    rabbit_init:\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n\nconf:\n  paste:\n    composite:osapi_share:\n      use: call:manila.api:root_app_factory\n      /: apiversions\n      /healthcheck: healthcheck\n      /v1: openstack_share_api\n      /v2: openstack_share_api_v2\n    composite:openstack_share_api:\n      use: call:manila.api.middleware.auth:pipeline_factory\n      noauth: cors faultwrap http_proxy_to_wsgi sizelimit osprofiler noauth api\n      keystone: cors faultwrap http_proxy_to_wsgi sizelimit osprofiler authtoken keystonecontext api\n      keystone_nolimit: cors faultwrap http_proxy_to_wsgi sizelimit osprofiler authtoken keystonecontext api\n    composite:openstack_share_api_v2:\n      use: call:manila.api.middleware.auth:pipeline_factory\n      noauth: cors faultwrap http_proxy_to_wsgi sizelimit osprofiler noauth apiv2\n      noauthv2: cors faultwrap http_proxy_to_wsgi sizelimit osprofiler noauthv2 apiv2\n      keystone: cors faultwrap http_proxy_to_wsgi sizelimit osprofiler authtoken keystonecontext apiv2\n      keystone_nolimit: cors faultwrap http_proxy_to_wsgi sizelimit osprofiler authtoken keystonecontext apiv2\n    filter:faultwrap:\n      paste.filter_factory: manila.api.middleware.fault:FaultWrapper.factory\n    filter:noauth:\n      paste.filter_factory: manila.api.middleware.auth:NoAuthMiddleware.factory\n    filter:noauthv2:\n      paste.filter_factory: manila.api.middleware.auth:NoAuthMiddlewarev2_60.factory\n    filter:sizelimit:\n      paste.filter_factory: oslo_middleware.sizelimit:RequestBodySizeLimiter.factory\n    filter:osprofiler:\n      paste.filter_factory: osprofiler.web:WsgiMiddleware.factory\n    filter:http_proxy_to_wsgi:\n      paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory\n    app:api:\n      paste.app_factory: manila.api.v1.router:APIRouter.factory\n    app:apiv2:\n      paste.app_factory: manila.api.v2.router:APIRouter.factory\n    pipeline:apiversions:\n      pipeline: cors faultwrap http_proxy_to_wsgi osshareversionapp\n    app:osshareversionapp:\n      paste.app_factory: manila.api.versions:VersionsRouter.factory\n    filter:keystonecontext:\n      paste.filter_factory: manila.api.middleware.auth:ManilaKeystoneContext.factory\n    filter:authtoken:\n      paste.filter_factory: keystonemiddleware.auth_token:filter_factory\n    filter:cors:\n      paste.filter_factory: oslo_middleware.cors:filter_factory\n      oslo_config_project: manila\n    app:healthcheck:\n      paste.app_factory: oslo_middleware:Healthcheck.app_factory\n      backends: disable_by_file\n      disable_by_file_path: /etc/manila/healthcheck_disable\n  policy: {}\n  manila_sudoers: |\n    # This sudoers file supports rootwrap for both Kolla and LOCI Images.\n    Defaults !requiretty\n    Defaults secure_path=\"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin\"\n    manila ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/manila-rootwrap /etc/manila/rootwrap.conf *, /var/lib/openstack/bin/manila-rootwrap /etc/manila/rootwrap.conf *\n  rootwrap_filters:\n    share:\n      pods:\n        - share\n      content: |\n        # manila-rootwrap command filters for share nodes\n        # This file should be owned by (and only-writeable by) the root user\n\n        [Filters]\n        # manila/utils.py : 'chown', '%s', '%s'\n        chown: CommandFilter, chown, root\n        # manila/utils.py : 'cat', '%s'\n        cat: CommandFilter, cat, root\n\n        # manila/share/drivers/lvm.py: 'mkfs.ext4', '/dev/mapper/%s'\n        mkfs.ext4: CommandFilter, mkfs.ext4, root\n\n        # manila/share/drivers/lvm.py: 'mkfs.ext3', '/dev/mapper/%s'\n        mkfs.ext3: CommandFilter, mkfs.ext3, root\n\n        # manila/share/drivers/lvm.py: 'smbd', '-s', '%s', '-D'\n        smbd: CommandFilter, smbd, root\n        smb: CommandFilter, smb, root\n\n        # manila/share/drivers/lvm.py: 'rmdir', '%s'\n        rmdir: CommandFilter, rmdir, root\n\n        # manila/share/drivers/lvm.py: 'dd' 'count=0', 'if=%s' % srcstr, 'of=%s'\n        dd: CommandFilter, dd, root\n\n        # manila/share/drivers/lvm.py: 'fsck', '-pf', %s\n        fsck: CommandFilter, fsck, root\n\n        # manila/share/drivers/lvm.py: 'resize2fs', %s\n        resize2fs: CommandFilter, resize2fs, root\n\n        # manila/share/drivers/helpers.py: 'smbcontrol', 'all', 'close-share', '%s'\n        smbcontrol: CommandFilter, smbcontrol, root\n\n        # manila/share/drivers/helpers.py: 'net', 'conf', 'addshare', '%s', '%s', 'writeable=y', 'guest_ok=y\n        # manila/share/drivers/helpers.py: 'net', 'conf', 'delshare', '%s'\n        # manila/share/drivers/helpers.py: 'net', 'conf', 'setparm', '%s', '%s', '%s'\n        # manila/share/drivers/helpers.py: 'net', 'conf', 'getparm', '%s', 'hosts allow'\n        net: CommandFilter, net, root\n\n        # manila/share/drivers/helpers.py: 'cp', '%s', '%s'\n        cp: CommandFilter, cp, root\n\n        # manila/share/drivers/helpers.py: 'service', '%s', '%s'\n        service: CommandFilter, service, root\n\n        # manila/share/drivers/lvm.py: 'lvremove', '-f', \"%s/%s\n        lvremove: CommandFilter, lvremove, root\n\n        # manila/share/drivers/lvm.py: 'lvextend', '-L', '%sG''-n', %s\n        lvextend: CommandFilter, lvextend, root\n\n        # manila/share/drivers/lvm.py: 'lvcreate', '-L', %s, '-n', %s\n        lvcreate: CommandFilter, lvcreate, root\n\n        # manila/share/drivers/lvm.py: 'vgs', '--noheadings', '-o', 'name'\n        # manila/share/drivers/lvm.py: 'vgs', %s, '--rows', '--units', 'g'\n        vgs: CommandFilter, vgs, root\n\n        # manila/share/drivers/lvm.py: 'tune2fs', '-U', 'random', '%volume-snapshot%'\n        tune2fs: CommandFilter, tune2fs, root\n\n        # manila/share/drivers/generic.py: 'sed', '-i', '\\'/%s/d\\'', '%s'\n        sed: CommandFilter, sed, root\n\n        # manila/share/drivers/glusterfs.py: 'mkdir', '%s'\n        # manila/share/drivers/ganesha/manager.py: 'mkdir', '-p', '%s'\n        mkdir: CommandFilter, mkdir, root\n\n        # manila/share/drivers/glusterfs.py: 'rm', '-rf', '%s'\n        rm: CommandFilter, rm, root\n\n        # manila/share/drivers/glusterfs.py: 'mount', '-t', 'glusterfs', '%s', '%s'\n        # manila/share/drivers/glusterfs/glusterfs_native.py: 'mount', '-t', 'glusterfs', '%s', '%s'\n        mount: CommandFilter, mount, root\n\n        # manila/share/drivers/glusterfs.py: 'gluster', '--xml', 'volume', 'info', '%s'\n        # manila/share/drivers/glusterfs.py: 'gluster', 'volume', 'set', '%s', 'nfs.export-dir', '%s'\n        gluster: CommandFilter, gluster, root\n\n        # manila/network/linux/ip_lib.py: 'ip', 'netns', 'exec', '%s', '%s'\n        ip: CommandFilter, ip, root\n\n        # manila/network/linux/interface.py: 'ovs-vsctl', 'add-port', '%s', '%s'\n        ovs-vsctl: CommandFilter, ovs-vsctl, root\n\n        # manila/share/drivers/glusterfs/glusterfs_native.py: 'find', '%s', '-mindepth', '1', '!', '-path', '%s', '!', '-path', '%s', '-delete'\n        # manila/share/drivers/glusterfs/glusterfs_native.py: 'find', '%s', '-mindepth', '1', '-delete'\n        find: CommandFilter, find, root\n\n        # manila/share/drivers/glusterfs/glusterfs_native.py: 'umount', '%s'\n        umount: CommandFilter, umount, root\n\n        # GPFS commands\n        # manila/share/drivers/ibm/gpfs.py: 'mmgetstate', '-Y'\n        mmgetstate: CommandFilter, mmgetstate, root\n        # manila/share/drivers/ibm/gpfs.py: 'mmlsattr', '%s'\n        mmlsattr: CommandFilter, mmlsattr, root\n        # manila/share/drivers/ibm/gpfs.py: 'mmcrfileset', '%s', '%s', '--inode-space', 'new'\n        mmcrfileset: CommandFilter, mmcrfileset, root\n        # manila/share/drivers/ibm/gpfs.py: 'mmlinkfileset', '%s', '%s', '-J', '%s'\n        mmlinkfileset: CommandFilter, mmlinkfileset, root\n        # manila/share/drivers/ibm/gpfs.py: 'mmsetquota', '-j', '%s', '-h', '%s', '%s'\n        mmsetquota: CommandFilter, mmsetquota, root\n        # manila/share/drivers/ibm/gpfs.py: 'mmunlinkfileset', '%s', '%s', '-f'\n        mmunlinkfileset: CommandFilter, mmunlinkfileset, root\n        # manila/share/drivers/ibm/gpfs.py: 'mmdelfileset', '%s', '%s', '-f'\n        mmdelfileset: CommandFilter, mmdelfileset, root\n        # manila/share/drivers/ibm/gpfs.py: 'mmcrsnapshot', '%s', '%s', '-j', '%s'\n        mmcrsnapshot: CommandFilter, mmcrsnapshot, root\n        # manila/share/drivers/ibm/gpfs.py: 'mmdelsnapshot', '%s', '%s', '-j', '%s'\n        mmdelsnapshot: CommandFilter, mmdelsnapshot, root\n        # manila/share/drivers/ibm/gpfs.py: 'rsync', '-rp', '%s', '%s'\n        rsync: CommandFilter, rsync, root\n        # manila/share/drivers/ibm/gpfs.py: 'exportfs'\n        exportfs: CommandFilter, exportfs, root\n        # manila/share/drivers/ibm/gpfs.py: 'stat', '--format=%F', '%s'\n        stat: CommandFilter, stat, root\n        # manila/share/drivers/ibm/gpfs.py: 'df', '-P', '-B', '1', '%s'\n        df: CommandFilter, df, root\n        # manila/share/drivers/ibm/gpfs.py: 'chmod', '777', '%s'\n        chmod: CommandFilter, chmod, root\n        # manila/share/drivers/ibm/gpfs.py: 'mmnfs', 'export', '%s', '%s'\n        mmnfs: CommandFilter, mmnfs, root\n        # manila/share/drivers/ibm/gpfs.py: 'mmlsfileset', '%s', '-J', '%s', '-L'\n        mmlsfileset: CommandFilter, mmlsfileset, root\n        # manila/share/drivers/ibm/gpfs.py: 'mmchfileset', '%s', '-J', '%s', '-j', '%s'\n        mmchfileset: CommandFilter, mmchfileset, root\n        # manila/share/drivers/ibm/gpfs.py: 'mmlsquota', '-j', '-J', '%s', '%s'\n        mmlsquota: CommandFilter, mmlsquota, root\n\n        # manila/share/drivers/ganesha/manager.py: 'mv', '%s', '%s'\n        mv: CommandFilter, mv, root\n\n        # manila/share/drivers/ganesha/manager.py: 'mktemp', '-p', '%s', '-t', '%s'\n        mktemp: CommandFilter, mktemp, root\n\n        # manila/share/drivers/ganesha/manager.py:\n        shcat: RegExpFilter, sh, root, sh, -c, echo '((.|\\n)*)' > /.*\n\n        # manila/share/drivers/ganesha/manager.py:\n        dbus-addexport: RegExpFilter, dbus-send, root, dbus-send, --print-reply, --system, --dest=org\\.ganesha\\.nfsd, /org/ganesha/nfsd/ExportMgr, org\\.ganesha\\.nfsd\\.exportmgr\\.(Add|Remove)Export, .*, .*\n\n        # manila/share/drivers/ganesha/manager.py:\n        dbus-removeexport: RegExpFilter, dbus-send, root, dbus-send, --print-reply, --system, --dest=org\\.ganesha\\.nfsd, /org/ganesha/nfsd/ExportMgr, org\\.ganesha\\.nfsd\\.exportmgr\\.(Add|Remove)Export, .*\n\n        # manila/share/drivers/ganesha/manager.py:\n        dbus-updateexport: RegExpFilter, dbus-send, root, dbus-send, --print-reply, --system, --dest=org\\.ganesha\\.nfsd, /org/ganesha/nfsd/ExportMgr, org\\.ganesha\\.nfsd\\.exportmgr\\.UpdateExport, .*, .*\n\n        # manila/share/drivers/ganesha/manager.py:\n        rmconf: RegExpFilter, sh, root, sh, -c, rm -f /.*/\\*\\.conf$\n\n        # ZFS commands\n        # manila/share/drivers/zfsonlinux/driver.py\n        # manila/share/drivers/zfsonlinux/utils.py\n        zpool: CommandFilter, zpool, root\n\n        # manila/share/drivers/zfsonlinux/driver.py\n        # manila/share/drivers/zfsonlinux/utils.py\n        zfs: CommandFilter, zfs, root\n\n        # manila/share/drivers/zfsonlinux/driver.py\n        kill: CommandFilter, kill, root\n\n        # manila/data/utils.py: 'ls', '-pA1', '--group-directories-first', '%s'\n        ls: CommandFilter, ls, root\n\n        # manila/data/utils.py: 'touch', '--reference=%s', '%s'\n        touch: CommandFilter, touch, root\n\n        # manila/share/drivers/container/container.py: docker <whatever>\n        docker: CommandFilter, docker, root\n\n        # manila/share/drivers/container/container.py: brctl <whatever>\n        brctl: CommandFilter, brctl, root\n\n        # manila/share/drivers/container/storage_helper.py: e2fsck <whatever>\n        # manila/share/drivers/generic.py: e2fsck <whatever>\n        # manila/share/drivers/lvm.py: e2fsck <whatever>\n        e2fsck: CommandFilter, e2fsck, root\n\n        # manila/share/drivers/lvm.py: lvconvert --merge %s\n        lvconvert: CommandFilter, lvconvert, root\n\n        # manila/data/utils.py: 'sha256sum', '%s'\n        sha256sum: CommandFilter, sha256sum, root\n\n        # manila/utils.py: 'tee', '%s'\n        tee: CommandFilter, tee, root\n\n        # manila/share/drivers/container/storage_helper.py: lvs -o lv_size --noheadings --nosuffix --units g <device>\n        lvs: CommandFilter, lvs, root\n\n        # manila/share/drivers/container/storage_helper.py: lvrename --autobackup n <old_name> <new_name>\n        lvrename: CommandFilter, lvrename, root\n  rootwrap: |\n    # Configuration for manila-rootwrap\n    # This file should be owned by (and only-writeable by) the root user\n\n    [DEFAULT]\n    # List of directories to load filter definitions from (separated by ',').\n    # These directories MUST all be only writeable by root !\n    filters_path=/etc/manila/rootwrap.d,/usr/share/manila/rootwrap\n\n    # List of directories to search executables in, in case filters do not\n    # explicitly specify a full path (separated by ',')\n    # If not specified, defaults to system PATH environment variable.\n    # These directories MUST all be only writeable by root !\n    exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/sbin,/usr/local/bin,/usr/lpp/mmfs/bin\n\n    # Enable logging to syslog\n    # Default value is False\n    use_syslog=False\n\n    # Which syslog facility to use.\n    # Valid values include auth, authpriv, syslog, user0, user1...\n    # Default value is 'syslog'\n    syslog_log_facility=syslog\n\n    # Which messages to log.\n    # INFO means log all usage\n    # ERROR means only log unsuccessful attempts\n    syslog_log_level=ERROR\n  manila:\n    DEFAULT:\n      default_share_type: default\n      default_share_group_type: default\n      share_name_template: share-%s\n      rootwrap_config: /etc/manila/rootwrap.conf\n      api_paste_config: /etc/manila/api-paste.ini\n      enabled_share_backends: generic\n      enabled_share_protocols: NFS\n      cephfs_auth_id: manila\n    keystone_authtoken:\n      auth_type: password\n      auth_version: v3\n      memcache_security_strategy: ENCRYPT\n      endpoint_type: internalURL\n      service_type: sharev2\n    neutron:\n      auth_type: password\n      auth_version: v3\n      memcache_security_strategy: ENCRYPT\n      endpoint_type: internalURL\n    nova:\n      auth_type: password\n      auth_version: v3\n      memcache_security_strategy: ENCRYPT\n      endpoint_type: internalURL\n    cinder:\n      auth_type: password\n      auth_version: v3\n      memcache_security_strategy: ENCRYPT\n      endpoint_type: internalURL\n    glance:\n      auth_type: password\n      auth_version: v3\n      memcache_security_strategy: ENCRYPT\n      endpoint_type: internalURL\n    database:\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    generic:\n      share_backend_name: GENERIC\n      share_driver: manila.share.drivers.generic.GenericShareDriver\n      driver_handles_share_servers: true\n      # manila-service-flavor\n      service_instance_flavor_id: 100\n      service_image_name: manila-service-image\n      service_instance_user: manila\n      service_instance_password: manila\n      # # Module path to the Virtual Interface (VIF) driver class. This option\n      # # is used only by drivers operating in\n      # # `driver_handles_share_servers=True` mode that provision OpenStack\n      # # compute instances as share servers. This option is only supported\n      # # with Neutron networking. Drivers provided in tree work with Linux\n      # # Bridge (manila.network.linux.interface.BridgeInterfaceDriver) and\n      # # OVS (manila.network.linux.interface.OVSInterfaceDriver). If the\n      # # manila-share service is running on a host that is connected to the\n      # # administrator network, a no-op driver\n      # # (manila.network.linux.interface.NoopInterfaceDriver) may be used.\n      # # (string value)\n      # interface_driver: manila.network.linux.interface.OVSInterfaceDriver\n    oslo_policy:\n      policy_file: /etc/manila/policy.yaml\n    oslo_concurrency:\n      lock_path: /var/lock\n    oslo_messaging_notifications:\n      driver: messagingv2\n    oslo_middleware:\n      enable_proxy_headers_parsing: true\n    oslo_messaging_rabbit:\n      rabbit_ha_queues: true\n  logging:\n    loggers:\n      keys:\n        - root\n        - manila\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: WARNING\n      handlers: 'null'\n    logger_manila:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: manila\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    formatter_default:\n      format: \"%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n  rally_tests:\n    tests:\n      ManilaShares.create_and_delete_share:\n        - args:\n            share_proto: \"nfs\"\n            size: 1\n            share_type: \"dhss_false\"\n            min_sleep: 1\n            max_sleep: 2\n          context:\n            quotas:\n              manila:\n                shares: 0\n                gigabytes: 0\n                share_networks: 0\n            users:\n              tenants: 2\n              users_per_tenant: 1\n              user_choice_method: \"round_robin\"\n            manila_share_networks:\n              use_share_networks: true\n          runner:\n            concurrency: 4\n            times: 4\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n  manila_api_uwsgi:\n    uwsgi:\n      add-header: \"Connection: close\"\n      buffer-size: 65535\n      die-on-term: true\n      enable-threads: true\n      exit-on-reload: false\n      hook-master-start: unix_signal:15 gracefully_kill_them_all\n      lazy-apps: true\n      log-x-forwarded-for: true\n      master: true\n      procname-prefix-spaced: \"manila-api:\"\n      route-user-agent: '^kube-probe.* donotlog:'\n      thunder-lock: true\n      worker-reload-mercy: 80\n      module: \"manila.wsgi.api:application\"\n      stats: 0.0.0.0:1717\n      stats-http: true\n\n# Names of secrets used by bootstrap and environmental checks\nsecrets:\n  identity:\n    admin: manila-keystone-admin\n    manila: manila-keystone-user\n    test: manila-keystone-test\n  oslo_db:\n    admin: manila-db-admin\n    manila: manila-db-user\n  rbd: manila-keyring\n  oslo_messaging:\n    admin: manila-rabbitmq-admin\n    manila: manila-rabbitmq-user\n  tls:\n    share:\n      api:\n        public: manila-tls-public\n        internal: manila-tls-internal\n  oci_image_registry:\n    manila: manila-oci-image-registry\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      manila:\n        username: manila\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      manila:\n        role: admin\n        region_name: RegionOne\n        username: manila\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      test:\n        role: admin\n        region_name: RegionOne\n        username: manila-test\n        password: password\n        project_name: test\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  sharev2:\n    name: manilav2\n    hosts:\n      default: manila-api\n      public: manila\n    host_fqdn_override:\n      default: null\n    path:\n      default: '/v2'\n      healthcheck: /healthcheck\n    scheme:\n      default: http\n      service: http\n    port:\n      api:\n        default: 8786\n        public: 80\n        service: 8786\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n        secret:\n          tls:\n            internal: mariadb-tls-direct\n      manila:\n        username: manila\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /manila\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_messaging:\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n        secret:\n          tls:\n            internal: rabbitmq-tls-direct\n      manila:\n        username: manila\n        password: password\n    statefulset:\n      replicas: 2\n      name: rabbitmq-rabbitmq\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /manila\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n  oslo_cache:\n    auth:\n      # NOTE(portdirect): this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  fluentd:\n    namespace: null\n    name: fluentd\n    hosts:\n      default: fluentd-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: 'http'\n    port:\n      service:\n        default: 24224\n      metrics:\n        default: 24220\n  # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress\n  # They are using to enable the Egress K8s network policy.\n  kube_dns:\n    namespace: kube-system\n    name: kubernetes-dns\n    hosts:\n      default: kube-dns\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: http\n    port:\n      dns:\n        default: 53\n        protocol: UDP\n  ingress:\n    namespace: null\n    name: ingress\n    hosts:\n      default: ingress\n    port:\n      ingress:\n        default: 80\n\ntls:\n  identity: false\n  oslo_messaging: false\n  oslo_db: false\n\nmanifests:\n  certificates: false\n  configmap_bin: true\n  configmap_etc: true\n  deployment_api: true\n  deployment_scheduler: true\n  deployment_data: true\n  deployment_share: true\n  ingress_api: true\n  job_bootstrap: true\n  job_db_init: true\n  job_db_sync: true\n  job_db_drop: false\n  job_image_repo_sync: true\n  job_rabbit_init: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  pdb_api: true\n  pod_rally_test: true\n  secret_db: true\n  network_policy: false\n  secret_ingress_tls: true\n  secret_keystone: true\n  secret_rabbitmq: true\n  secret_registry: true\n  service_ingress_api: true\n  service_api: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "mariadb/.helmignore",
    "content": "# Patterns to ignore when building packages.\n# This supports shell glob matching, relative path matching, and\n# negation (prefixed with !). Only one pattern per line.\n.DS_Store\n# Common VCS dirs\n.git/\n.gitignore\n.bzr/\n.bzrignore\n.hg/\n.hgignore\n.svn/\n# Common backup files\n*.swp\n*.bak\n*.tmp\n*~\n# Various IDEs\n.project\n.idea/\n*.tmproj\n"
  },
  {
    "path": "mariadb/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v10.6.7\ndescription: OpenStack-Helm MariaDB\nname: mariadb\nversion: 2025.2.0\nhome: https://mariadb.com/kb/en/\nicon: http://badges.mariadb.org/mariadb-badge-180x60.png\nsources:\n  - https://github.com/MariaDB/server\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "mariadb/README.rst",
    "content": "openstack-helm/mariadb\n======================\n\nBy default, this chart creates a 3-member mariadb galera cluster.\n\nThis chart leverages StatefulSets, with persistent storage.\n\nIt creates a job that acts as a temporary standalone galera cluster.\nThis host is bootstrapped with authentication and then the WSREP\nbindings are exposed publicly. The cluster members being StatefulSets\nare provisioned one at a time. The first host must be marked as\n``Ready`` before the next host will be provisioned. This is determined\nby the readinessProbes which actually validate that MySQL is up and\nresponsive.\n\nThe configuration leverages xtrabackup-v2 for synchronization. This may\nlater be augmented to leverage rsync which has some benefits.\n\nOnce the seed job completes, which completes only when galera reports\nthat it is Synced and all cluster members are reporting in thus matching\nthe cluster count according to the job to the replica count in the helm\nvalues configuration, the job is terminated. When the job is no longer\nactive, future StatefulSets provisioned will leverage the existing\ncluster members as gcomm endpoints. It is only when the job is running\nthat the cluster members leverage the seed job as their gcomm endpoint.\nThis ensures you can restart members and scale the cluster.\n\nThe StatefulSets all leverage PVCs to provide stateful storage to\n``/var/lib/mysql``.\n\nYou must ensure that your control nodes that should receive mariadb\ninstances are labeled with ``openstack-control-plane=enabled``, or\nwhatever you have configured in values.yaml for the label\nconfiguration:\n\n::\n\n    kubectl label nodes openstack-control-plane=enabled --all\n"
  },
  {
    "path": "mariadb/templates/bin/_backup_mariadb.sh.tpl",
    "content": "#!/bin/bash\n\nSCOPE=${1:-\"all\"}\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nsource /tmp/backup_main.sh\n\n# Export the variables required by the framework\n# Note: REMOTE_BACKUP_ENABLED, STORAGE_POLICY  and CONTAINER_NAME are already\n#       exported.\nexport DB_NAMESPACE=${MARIADB_POD_NAMESPACE}\nexport DB_NAME=\"mariadb\"\nexport LOCAL_DAYS_TO_KEEP=${MARIADB_LOCAL_BACKUP_DAYS_TO_KEEP}\nexport REMOTE_DAYS_TO_KEEP=${MARIADB_REMOTE_BACKUP_DAYS_TO_KEEP}\nexport REMOTE_BACKUP_RETRIES=${NUMBER_OF_RETRIES_SEND_BACKUP_TO_REMOTE}\nexport MIN_DELAY_SEND_REMOTE=${MIN_DELAY_SEND_BACKUP_TO_REMOTE}\nexport MAX_DELAY_SEND_REMOTE=${MAX_DELAY_SEND_BACKUP_TO_REMOTE}\nexport ARCHIVE_DIR=${MARIADB_BACKUP_BASE_DIR}/db/${DB_NAMESPACE}/${DB_NAME}/archive\n\n# Dump all the database files to existing $TMP_DIR and save logs to $LOG_FILE\ndump_databases_to_directory() {\n  TMP_DIR=$1\n  LOG_FILE=$2\n  SCOPE=${3:-\"all\"}\n\n\n  MYSQL=\"mariadb \\\n     --defaults-file=/etc/mysql/admin_user.cnf \\\n     --connect-timeout 10\"\n\n  MYSQLDUMP=\"mariadb-dump \\\n     --defaults-file=/etc/mysql/admin_user.cnf\"\n\n  if [[ \"${SCOPE}\" == \"all\" ]]; then\n    MYSQL_DBNAMES=( $($MYSQL --silent --skip-column-names -e \\\n       \"show databases;\" | \\\n       grep -ivE 'information_schema|performance_schema|mysql|sys') )\n  else\n    if [[ \"${SCOPE}\" != \"information_schema\" && \"${SCOPE}\" != \"performance_schema\" && \"${SCOPE}\" != \"mysql\" && \"${SCOPE}\" != \"sys\" ]]; then\n      MYSQL_DBNAMES=( ${SCOPE} )\n    else\n      log ERROR \"It is not allowed to backup database ${SCOPE}.\"\n      return 1\n    fi\n  fi\n\n  #check if there is a database to backup, otherwise exit\n  if [[ -z \"${MYSQL_DBNAMES// }\" ]]\n  then\n    log INFO \"There is no database to backup\"\n    return 0\n  fi\n\n  #Create a list of Databases\n  printf \"%s\\n\" \"${MYSQL_DBNAMES[@]}\" > $TMP_DIR/db.list\n\n  if [[ \"${SCOPE}\" == \"all\" ]]; then\n    #Retrieve and create the GRANT file for all the users\n{{- if .Values.manifests.certificates }}\n    SSL_DSN=\";mysql_ssl=1\"\n    SSL_DSN=\"$SSL_DSN;mysql_ssl_client_key=/etc/mysql/certs/tls.key\"\n    SSL_DSN=\"$SSL_DSN;mysql_ssl_client_cert=/etc/mysql/certs/tls.crt\"\n    SSL_DSN=\"$SSL_DSN;mysql_ssl_ca_file=/etc/mysql/certs/ca.crt\"\n    if ! pt-show-grants --defaults-file=/etc/mysql/admin_user.cnf $SSL_DSN \\\n{{- else }}\n    if ! pt-show-grants --defaults-file=/etc/mysql/admin_user.cnf \\\n{{- end }}\n         2>>\"$LOG_FILE\" > \"$TMP_DIR\"/grants.sql; then\n      log ERROR \"Failed to create GRANT for all the users\"\n      return 1\n    fi\n  fi\n\n  #Retrieve and create the GRANT files per DB\n  for db in \"${MYSQL_DBNAMES[@]}\"\n  do\n    echo $($MYSQL --skip-column-names -e \"select concat('show grants for ',user,';') \\\n          from mysql.db where ucase(db)=ucase('$db');\") | \\\n          sed -r \"s/show grants for ([a-zA-Z0-9_-]*)/show grants for '\\1'/g\" | \\\n          $MYSQL --silent --skip-column-names 2>>$LOG_FILE > $TMP_DIR/${db}_grant.sql\n    if [ \"$?\" -eq 0 ]\n    then\n      sed -i 's/$/;/' $TMP_DIR/${db}_grant.sql\n    else\n      log ERROR \"Failed to create GRANT files for ${db}\"\n      return 1\n    fi\n  done\n\n  #Dumping the database\n\n  SQL_FILE=mariadb.$MARIADB_POD_NAMESPACE.${SCOPE}\n\n  $MYSQLDUMP $MYSQL_BACKUP_MYSQLDUMP_OPTIONS \"${MYSQL_DBNAMES[@]}\"  \\\n            > $TMP_DIR/${SQL_FILE}.sql 2>>$LOG_FILE\n  if [[ $? -eq 0 && -s $TMP_DIR/${SQL_FILE}.sql ]]\n  then\n    log INFO \"Database(s) dumped successfully. (SCOPE = ${SCOPE})\"\n    return 0\n  else\n    log ERROR \"Backup failed and need attention. (SCOPE = ${SCOPE})\"\n    return 1\n  fi\n}\n\n# functions from  mariadb-verifier chart\n\nget_time_delta_secs () {\n  second_delta=0\n  input_date_second=$( date --date=\"$1\" +%s )\n  if [ -n \"$input_date_second\" ]; then\n    current_date=$( date +\"%Y-%m-%dT%H:%M:%SZ\" )\n    current_date_second=$( date --date=\"$current_date\" +%s )\n    ((second_delta=current_date_second-input_date_second))\n    if [ \"$second_delta\" -lt 0 ]; then\n      second_delta=0\n    fi\n  fi\n  echo $second_delta\n}\n\n\ncheck_data_freshness () {\n  archive_file=$(basename \"$1\")\n  archive_date=$(echo \"$archive_file\" | cut -d'.' -f 4)\n  SCOPE=$2\n\n  if [[ \"${SCOPE}\" != \"all\" ]]; then\n    log \"Data freshness check is skipped for individual database.\"\n    return 0\n  fi\n\n  log \"Checking for data freshness in the backups...\"\n  # Get some idea of which database.table has changed in the last 30m\n  # Excluding the system DBs and aqua_test_database\n  #\n  changed_tables=$(${MYSQL_LIVE} -e \"select TABLE_SCHEMA,TABLE_NAME from \\\ninformation_schema.tables where UPDATE_TIME >= SUBTIME(now(),'00:30:00') AND TABLE_SCHEMA \\\nNOT IN('information_schema', 'mysql', 'performance_schema', 'sys', 'aqua_test_database');\" | \\\nawk '{print $1 \".\" $2}')\n\n  if [ -n \"${changed_tables}\" ]; then\n    delta_secs=$(get_time_delta_secs \"$archive_date\")\n    age_offset={{ .Values.conf.backup.validateData.ageOffset }}\n    ((age_threshold=delta_secs+age_offset))\n\n    data_freshness=false\n    skipped_freshness=false\n\n    for table in ${changed_tables}; do\n      tab_schema=$(echo \"$table\" | awk -F. '{print $1}')\n      tab_name=$(echo \"$table\" | awk -F. '{print $2}')\n\n      local_table_existed=$(${MYSQL_LOCAL_SHORT_SILENT} -e \"select TABLE_SCHEMA,TABLE_NAME from \\\nINFORMATION_SCHEMA.TABLES where TABLE_SCHEMA=\\\"${tab_schema}\\\" AND TABLE_NAME=\\\"${tab_name}\\\";\")\n\n      if [ -n \"$local_table_existed\" ]; then\n        # TODO: If last updated field of a table structure has different\n        # patterns (updated/timstamp), it may be worth to parameterize the patterns.\n        datetime=$(${MYSQL_LOCAL_SHORT_SILENT} -e \"describe ${table};\" | \\\n                   awk '(/updated/ || /timestamp/) && /datetime/ {print $1}')\n\n        if [ -n \"${datetime}\" ]; then\n          data_ages=$(${MYSQL_LOCAL_SHORT_SILENT} -e \"select \\\ntime_to_sec(timediff(now(),${datetime})) from ${table} where ${datetime} is not null order by 1 limit 10;\")\n\n          for age in $data_ages; do\n            if [ \"$age\" -le $age_threshold ]; then\n              data_freshness=true\n              break\n            fi\n          done\n\n          # As long as there is an indication of data freshness, no need to check further\n          if [ \"$data_freshness\" = true ] ; then\n            break\n          fi\n        else\n          skipped_freshness=true\n          log \"No indicator to determine data freshness for table $table. Skipped data freshness check.\"\n\n          # Dumping out table structure to determine if enhancement is needed to include this table\n          debug_info=$(${MYSQL_LOCAL} --skip-column-names -e \"describe ${table};\" | awk '{print $2 \" \" $1}')\n          log \"$debug_info\" \"DEBUG\"\n        fi\n      else\n        log \"Table $table doesn't exist in local database\"\n        skipped_freshness=true\n      fi\n    done\n\n    if [ \"$data_freshness\" = true ] ; then\n      log \"Database passed integrity (data freshness) check.\"\n    else\n      if [ \"$skipped_freshness\" = false ] ; then\n        log \"Local backup database restore failed integrity check.\" \"ERROR\"\n        log \"The backup may not have captured the up-to-date data.\" \"INFO\"\n        return 1\n      fi\n    fi\n  else\n    log \"No tables changed in this backup. Skipped data freshness check as the\"\n    log \"check should have been performed by previous validation runs.\"\n  fi\n\n  return 0\n}\n\n\ncleanup_local_databases () {\n  old_local_dbs=$(${MYSQL_LOCAL_SHORT_SILENT} -e 'show databases;' | \\\n    grep -ivE 'information_schema|performance_schema|mysql|sys' || true)\n\n  for db in $old_local_dbs; do\n    ${MYSQL_LOCAL_SHORT_SILENT} -e \"drop database $db;\"\n  done\n}\n\nlist_archive_dir () {\n  archive_dir_content=$(ls -1R \"$ARCHIVE_DIR\")\n  if [ -n \"$archive_dir_content\" ]; then\n    log \"Content of $ARCHIVE_DIR\"\n    log \"${archive_dir_content}\"\n  fi\n}\n\nremove_remote_archive_file () {\n  archive_file=$(basename \"$1\")\n  token_req_file=$(mktemp --suffix \".json\")\n  header_file=$(mktemp)\n  resp_file=$(mktemp --suffix \".json\")\n  http_resp=\"404\"\n\n  HEADER_CONTENT_TYPE=\"Content-Type: application/json\"\n  HEADER_ACCEPT=\"Accept: application/json\"\n\n  cat << JSON_EOF > \"$token_req_file\"\n{\n    \"auth\": {\n        \"identity\": {\n            \"methods\": [\n                \"password\"\n            ],\n            \"password\": {\n                \"user\": {\n                    \"domain\": {\n                        \"name\": \"${OS_USER_DOMAIN_NAME}\"\n                    },\n                    \"name\": \"${OS_USERNAME}\",\n                    \"password\": \"${OS_PASSWORD}\"\n                }\n            }\n        },\n        \"scope\": {\n            \"project\": {\n                \"domain\": {\n                    \"name\": \"${OS_PROJECT_DOMAIN_NAME}\"\n                },\n                \"name\": \"${OS_PROJECT_NAME}\"\n            }\n        }\n    }\n}\nJSON_EOF\n\n  http_resp=$(curl -s -X POST \"$OS_AUTH_URL/auth/tokens\"  -H \"${HEADER_CONTENT_TYPE}\" \\\n       -H \"${HEADER_ACCEPT}\" -d @\"${token_req_file}\" -D \"$header_file\" -o \"$resp_file\" -w \"%{http_code}\")\n\n  if [ \"$http_resp\" = \"201\" ]; then\n    OS_TOKEN=$(grep -i \"x-subject-token\" \"$header_file\" | cut -d' ' -f2 | tr -d \"\\r\")\n\n    if [ -n \"$OS_TOKEN\" ]; then\n      OS_OBJ_URL=$(python3 -c \"import json,sys;print([[ep['url'] for ep in obj['endpoints'] if ep['interface']=='public'] for obj in json.load(sys.stdin)['token']['catalog'] if obj['type']=='object-store'][0][0])\" < \"$resp_file\")\n\n      if [ -n \"$OS_OBJ_URL\" ]; then\n        http_resp=$(curl -s -X DELETE \"$OS_OBJ_URL/$CONTAINER_NAME/$archive_file\" \\\n                         -H \"${HEADER_CONTENT_TYPE}\" -H \"${HEADER_ACCEPT}\" \\\n                         -H \"X-Auth-Token: ${OS_TOKEN}\" -D \"$header_file\" -o \"$resp_file\" -w \"%{http_code}\")\n      fi\n    fi\n  fi\n\n  if [ \"$http_resp\" == \"404\" ] ; then\n    log \"Failed to cleanup remote backup. Container object $archive_file is not on RGW.\"\n    return 1\n  fi\n\n  if [ \"$http_resp\" != \"204\" ] ; then\n    log \"Failed to cleanup remote backup. Cannot delete container object $archive_file\" \"ERROR\"\n    cat \"$header_file\"\n    cat \"$resp_file\"\n  fi\n  return 0\n}\n\nhandle_bad_archive_file () {\n  archive_file=$1\n\n  if [ ! -d \"$BAD_ARCHIVE_DIR\" ]; then\n    mkdir -p \"$BAD_ARCHIVE_DIR\"\n  fi\n\n  # Move the file to quarantine directory such that\n  # file won't be used for restore in case of recovery\n  #\n  log \"Moving $i to $BAD_ARCHIVE_DIR...\"\n  mv \"$i\" \"$BAD_ARCHIVE_DIR\"\n  log \"Removing $i from remote RGW...\"\n  if remove_remote_archive_file \"$i\"; then\n    log \"File $i has been successfully removed from RGW.\"\n  else\n    log \"FIle $i cannot be removed form RGW.\" \"ERROR\"\n    return 1\n  fi\n\n  # Atmost only three bad files are kept. Deleting the oldest if\n  # number of files exceeded the threshold.\n  #\n  bad_files=$(find \"$BAD_ARCHIVE_DIR\" -name \"*.tar.gz\" 2>/dev/null | wc -l)\n  if [ \"$bad_files\" -gt 3 ]; then\n    ((bad_files=bad_files-3))\n    delete_files=$(find \"$BAD_ARCHIVE_DIR\" -name \"*.tar.gz\" 2>/dev/null | sort | head --lines=$bad_files)\n    for b in $delete_files; do\n      log \"Deleting $b...\"\n      rm -f \"${b}\"\n    done\n  fi\n  return 0\n}\n\ncleanup_old_validation_result_file () {\n  clean_files=$(find \"$ARCHIVE_DIR\" -maxdepth 1 -name \"*.passed\" 2>/dev/null)\n  for d in $clean_files; do\n    archive_file=${d/.passed}\n    if [ ! -f \"$archive_file\" ]; then\n      log \"Deleting $d as its associated archive file $archive_file nolonger existed.\"\n      rm -f \"${d}\"\n    fi\n  done\n}\n\nvalidate_databases_backup () {\n  archive_file=$1\n  SCOPE=${2:-\"all\"}\n\n  restore_log='/tmp/restore_error.log'\n  tmp_dir=$(mktemp -d)\n\n  rm -f $restore_log\n  cd \"$tmp_dir\"\n  log \"Decompressing archive $archive_file...\"\n  if ! tar zxvf - < \"$archive_file\" 1>/dev/null; then\n    log \"Database restore from local backup failed. Archive decompression failed.\" \"ERROR\"\n    return 1\n  fi\n\n  db_list_file=\"$tmp_dir/db.list\"\n  if [[ -e \"$db_list_file\" ]]; then\n    dbs=$(sort < \"$db_list_file\" | grep -ivE sys | tr '\\n' ' ')\n  else\n    dbs=\" \"\n  fi\n\n  sql_file=\"${tmp_dir}/mariadb.${MARIADB_POD_NAMESPACE}.${SCOPE}.sql\"\n\n  if [[ \"${SCOPE}\" == \"all\" ]]; then\n    grant_file=\"${tmp_dir}/grants.sql\"\n  else\n    grant_file=\"${tmp_dir}/${SCOPE}_grant.sql\"\n  fi\n\n  if [[ -f $sql_file ]]; then\n    if $MYSQL_LOCAL < \"$sql_file\" 2>$restore_log; then\n      local_dbs=$(${MYSQL_LOCAL_SHORT_SILENT} -e 'show databases;' | \\\n        grep -ivE 'information_schema|performance_schema|mysql|sys' | sort | tr '\\n' ' ')\n\n      if [ \"$dbs\" = \"$local_dbs\" ]; then\n        log \"Databases restored successful.\"\n      else\n        log \"Database restore from local backup failed. Database mismatched between local backup and local server\" \"ERROR\"\n        log \"Databases restored on local server: $local_dbs\" \"DEBUG\"\n        log \"Databases in the local backup: $dbs\" \"DEBUG\"\n        return 1\n      fi\n    else\n      log \"Database restore from local backup failed. $dbs\" \"ERROR\"\n      cat $restore_log\n      return 1\n    fi\n\n    if [[ -f $grant_file ]]; then\n      if $MYSQL_LOCAL < \"$grant_file\" 2>$restore_log; then\n        if ! $MYSQL_LOCAL -e 'flush privileges;'; then\n          log \"Database restore from local backup failed. Failed to flush privileges.\" \"ERROR\"\n          return 1\n        fi\n        log \"Databases permission restored successful.\"\n      else\n        log \"Database restore from local backup failed. Databases permission failed to restore.\" \"ERROR\"\n        cat \"$restore_log\"\n        cat \"$grant_file\"\n        log \"Local DBs: $local_dbs\" \"DEBUG\"\n        return 1\n      fi\n    else\n      log \"Database restore from local backup failed. There is no permission file available\" \"ERROR\"\n      return 1\n    fi\n\n    if ! check_data_freshness \"$archive_file\" ${SCOPE}; then\n      # Log has already generated during check data freshness\n      return 1\n    fi\n  else\n    log \"Database restore from local backup failed. There is no database file available to restore from\" \"ERROR\"\n    return 1\n  fi\n\n  return 0\n}\n\n# end of functions form mariadb verifier chart\n\n# Verify all the databases backup archives\nverify_databases_backup_archives() {\n  SCOPE=${1:-\"all\"}\n\n  # verification code\n  export DB_NAME=\"mariadb\"\n  export ARCHIVE_DIR=${MARIADB_BACKUP_BASE_DIR}/db/${MARIADB_POD_NAMESPACE}/${DB_NAME}/archive\n  export BAD_ARCHIVE_DIR=${ARCHIVE_DIR}/quarantine\n  export MYSQL_OPTS=\"--silent --skip-column-names\"\n  export MYSQL_LIVE=\"mariadb ${MYSQL_OPTS}\"\n  export MYSQL_LOCAL_OPTS=\"\"\n  export MYSQL_LOCAL_SHORT=\"mariadb ${MYSQL_LOCAL_OPTS} --connect-timeout 2\"\n  export MYSQL_LOCAL_SHORT_SILENT=\"${MYSQL_LOCAL_SHORT} ${MYSQL_OPTS}\"\n  export MYSQL_LOCAL=\"mariadb ${MYSQL_LOCAL_OPTS} --connect-timeout 10\"\n\n  max_wait={{ .Values.conf.mariadb_server.setup_wait.iteration }}\n  duration={{ .Values.conf.mariadb_server.setup_wait.duration }}\n  counter=0\n  dbisup=false\n\n  log \"Waiting for Mariadb backup verification server to start...\"\n\n  # During Mariadb init/startup process, a temporary server is startup\n  # and shutdown prior to starting up the normal server.\n  # To avoid prematurely determine server availability, lets snooze\n  # a bit to give time for the process to complete prior to issue\n  # mysql commands.\n  #\n\n\n  while [ $counter -lt $max_wait ]; do\n    if ! $MYSQL_LOCAL_SHORT -e 'select 1' > /dev/null 2>&1 ; then\n      sleep $duration\n      ((counter=counter+1))\n    else\n      # Lets sleep for an additional duration just in case async\n      # init takes a bit more time to complete.\n      #\n      sleep $duration\n      dbisup=true\n      counter=$max_wait\n    fi\n  done\n\n  if ! $dbisup; then\n    log \"Mariadb backup verification server is not running\" \"ERROR\"\n    return 1\n  fi\n\n  # During Mariadb init process, a test database will be briefly\n  # created and deleted. Adding to the exclusion list for some\n  # edge cases\n  #\n  clean_db=$(${MYSQL_LOCAL_SHORT_SILENT} -e 'show databases;' | \\\n    grep -ivE 'information_schema|performance_schema|mysql|test|sys' || true)\n\n  if [[ -z \"${clean_db// }\" ]]; then\n    log \"Clean Server is up and running\"\n  else\n    cleanup_local_databases\n    log \"Old databases found on the Mariadb backup verification server were cleaned.\"\n    clean_db=$(${MYSQL_LOCAL_SHORT_SILENT} -e 'show databases;' | \\\n      grep -ivE 'information_schema|performance_schema|mysql|test|sys' || true)\n\n    if [[ -z \"${clean_db// }\" ]]; then\n      log \"Clean Server is up and running\"\n    else\n      log \"Cannot clean old databases on verification server.\" \"ERROR\"\n      return 1\n    fi\n    log \"The server is ready for verification.\"\n  fi\n\n  # Starting with 10.4.13, new definer mariadb.sys was added. However, mariadb.sys was deleted\n  # during init mariadb as it was not on the exclusion list. This corrupted the view of mysql.user.\n  # Insert the tuple back to avoid other similar issues with error i.e\n  #   The user specified as a definer ('mariadb.sys'@'localhost') does not exist\n  #\n  # Before insert the tuple mentioned above, we should make sure that the MariaDB version is 10.4.+\n  mariadb_version=$($MYSQL_LOCAL_SHORT -e \"status\" | grep -E '^Server\\s+version:')\n  log \"Current database ${mariadb_version}\"\n  if [[ ! -z ${mariadb_version} && -z $(grep '10.2' <<< ${mariadb_version}) ]]; then\n    if [[ -z $(grep 'mariadb.sys' <<< $($MYSQL_LOCAL_SHORT mysql  -e \"select * from global_priv where user='mariadb.sys'\")) ]]; then\n      $MYSQL_LOCAL_SHORT -e \"insert into mysql.global_priv values ('localhost','mariadb.sys',\\\n    '{\\\"access\\\":0,\\\"plugin\\\":\\\"mysql_native_password\\\",\\\"authentication_string\\\":\\\"\\\",\\\"account_locked\\\":true,\\\"password_last_changed\\\":0}');\"\n      $MYSQL_LOCAL_SHORT -e 'flush privileges;'\n    fi\n  fi\n\n  # Ensure archive dir existed\n  if [ -d \"$ARCHIVE_DIR\" ]; then\n    # List archive dir before\n    list_archive_dir\n\n      # Ensure the local databases are clean for each restore validation\n      #\n      cleanup_local_databases\n\n      if [[ \"${SCOPE}\" == \"all\" ]]; then\n        archive_files=$(find \"$ARCHIVE_DIR\" -maxdepth 1 -name \"*.tar.gz\" 2>/dev/null | sort)\n        for i in $archive_files; do\n          archive_file_passed=$i.passed\n          if [ ! -f \"$archive_file_passed\" ]; then\n            log \"Validating archive file $i...\"\n            if validate_databases_backup \"$i\"; then\n              touch \"$archive_file_passed\"\n            else\n              if handle_bad_archive_file \"$i\"; then\n                log \"File $i has been removed from RGW.\"\n              else\n                log \"File $i cannot be removed from RGW.\" \"ERROR\"\n                return 1\n              fi\n            fi\n          fi\n        done\n      else\n        archive_files=$(find \"$ARCHIVE_DIR\" -maxdepth 1 -name \"*.tar.gz\" 2>/dev/null | grep \"${SCOPE}\" | sort)\n        for i in $archive_files; do\n          archive_file_passed=$i.passed\n          if [ ! -f \"$archive_file_passed\" ]; then\n            log \"Validating archive file $i...\"\n            if validate_databases_backup \"${i}\" \"${SCOPE}\"; then\n              touch \"$archive_file_passed\"\n            else\n              if handle_bad_archive_file \"$i\"; then\n                log \"File $i has been removed from RGW.\"\n              else\n                log \"File $i cannot be removed from RGW.\" \"ERROR\"\n                return 1\n              fi\n            fi\n          fi\n        done\n      fi\n\n\n    # Cleanup passed files if its archive file nolonger existed\n    cleanup_old_validation_result_file\n\n    # List archive dir after\n    list_archive_dir\n  fi\n\n\n  return 0\n}\n\n# Call main program to start the database backup\nbackup_databases ${SCOPE}\n"
  },
  {
    "path": "mariadb/templates/bin/_health.sh.tpl",
    "content": "#!/usr/bin/env bash\n\n###########################################################################\n# Copyright 2017 The Openstack-Helm Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#    http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#########################################################################\n\nset -e\n\nMYSQL=\"mariadb \\\n  --defaults-file=/etc/mysql/admin_user.cnf \\\n  --host=localhost \\\n{{- if .Values.manifests.certificates }}\n  --ssl-verify-server-cert=false \\\n  --ssl-ca=/etc/mysql/certs/ca.crt \\\n  --ssl-key=/etc/mysql/certs/tls.key \\\n  --ssl-cert=/etc/mysql/certs/tls.crt \\\n{{- end }}\n  --connect-timeout 2\"\n\nmysql_query () {\n  TABLE=$1\n  KEY=$2\n  $MYSQL -e \"show ${TABLE} like \\\"${KEY}\\\"\" | \\\n    awk \"/${KEY}/ { print \\$NF; exit }\"\n}\n\nfunction usage {\n    echo \"Usage: $0 [-t <liveness|readiness>] [-d <percent>]\" 1>&2\n    exit 1\n}\n\nPROBE_TYPE=''\n\nwhile getopts \":t:d:\" opt; do\n  case $opt in\n    t)\n        PROBE_TYPE=$OPTARG\n        ;;\n    d)\n        DISK_ALARM_LIMIT=$OPTARG\n        ;;\n    *)\n        usage\n        ;;\n  esac\ndone\nshift $((OPTIND-1))\n\ncheck_readiness () {\n  if ! $MYSQL -e 'select 1' > /dev/null 2>&1 ; then\n      echo \"Select from mysql failed\"\n      exit 1\n  fi\n\n  DATADIR=$(mysql_query variables datadir)\n  TMPDIR=$(mysql_query variables tmpdir)\n  for partition in ${DATADIR} ${TMPDIR}; do\n      if [ \"$(df --output=pcent ${partition} | grep -Po '\\d+')\" -ge \"${DISK_ALARM_LIMIT:-100}\" ]; then\n          echo \"[ALARM] Critical high disk space utilization of ${partition}\"\n          exit 1\n      fi\n  done\n\n  if [ \"x$(mysql_query status wsrep_ready)\" != \"xON\" ]; then\n      echo \"WSREP says the node can not receive queries\"\n      exit 1\n  fi\n  if [ \"x$(mysql_query status wsrep_connected)\" != \"xON\" ]; then\n      echo \"WSREP not connected\"\n      exit 1\n  fi\n  if [ \"x$(mysql_query status wsrep_cluster_status)\" != \"xPrimary\" ]; then\n      echo \"Not in primary cluster\"\n      exit 1\n  fi\n  if [ \"x$(mysql_query status wsrep_local_state_comment)\" != \"xSynced\" ]; then\n      echo \"WSREP not synced\"\n      exit 1\n  fi\n}\n\ncheck_liveness () {\n  if pidof mariadb-upgrade > /dev/null 2>&1 ; then\n    echo \"The process mariadb-upgrade is active. Skip rest checks\"\n    exit 0\n  fi\n  if ! pidof mariadbd > /dev/null 2>&1 ; then\n    echo \"The mariadbd pid not found\"\n    exit 1\n  fi\n  # NOTE(mkarpin): SST process may take significant time in case of large databases,\n  # killing mysqld during SST may destroy all data on the node.\n  local datadir=\"/var/lib/mysql\"\n  if [ -f ${datadir}/sst_in_progress ]; then\n      echo \"SST is still in progress, skip further checks as mysql won't respond\"\n  else\n      # NOTE(vsaienko): in some cases maria might stuck during IST, or when neighbours\n      # IPs are changed. Here we check that we can connect to mysql socket to ensure\n      # process is alive.\n      if ! $MYSQL -e \"show status like 'wsrep_cluster_status'\" > /dev/null 2>&1 ; then\n          echo \"Can't connect to mysql socket\"\n          exit 1\n      fi\n      # Detect node that is not connected to wsrep provider\n      if [ \"x$(mysql_query status wsrep_ready)\" != \"xON\" ]; then\n          echo \"WSREP says the node can not receive queries\"\n          exit 1\n      fi\n      if [ \"x$(mysql_query status wsrep_connected)\" != \"xON\" ]; then\n          echo \"WSREP not connected\"\n          exit 1\n      fi\n  fi\n}\n\ncase $PROBE_TYPE in\n  liveness)\n      check_liveness\n      ;;\n  readiness)\n      check_readiness\n      ;;\n  *)\n      echo \"Unknown probe type: ${PROBE_TYPE}\"\n      usage\n      ;;\nesac\n"
  },
  {
    "path": "mariadb/templates/bin/_mariadb-wait-for-cluster.py.tpl",
    "content": "#!/usr/bin/env python3\n\nimport datetime\nfrom enum import Enum\nimport logging\nimport os\nimport sys\nimport time\n\nimport pymysql\nimport pykube\n\nMARIADB_HOST = os.getenv(\"MARIADB_HOST\")\nMARIADB_PASSWORD = os.getenv(\"MARIADB_PASSWORD\")\nMARIADB_REPLICAS = os.getenv(\"MARIADB_REPLICAS\")\n\nMARIADB_CLUSTER_STATE_LOG_LEVEL = os.getenv(\"MARIADB_CLUSTER_STATE_LOG_LEVEL\", \"INFO\")\n\nMARIADB_CLUSTER_STABILITY_COUNT = int(\n    os.getenv(\"MARIADB_CLUSTER_STABILITY_COUNT\", \"30\")\n)\nMARIADB_CLUSTER_STABILITY_WAIT = int(os.getenv(\"MARIADB_CLUSTER_STABILITY_WAIT\", \"4\"))\nMARIADB_CLUSTER_CHECK_WAIT = int(os.getenv(\"MARIADB_CLUSTER_CHECK_WAIT\", \"30\"))\n\nMARIADB_CLUSTER_STATE_CONFIGMAP = os.getenv(\"MARIADB_CLUSTER_STATE_CONFIGMAP\")\nMARIADB_CLUSTER_STATE_CONFIGMAP_NAMESPACE = os.getenv(\n    \"MARIADB_CLUSTER_STATE_CONFIGMAP_NAMESPACE\", \"openstack\"\n)\nMARIADB_CLUSTER_STATE_PYKUBE_REQUEST_TIMEOUT = int(\n    os.getenv(\"MARIADB_CLUSTER_STATE_PYKUBE_REQUEST_TIMEOUT\", 60)\n)\n\nlog_level = MARIADB_CLUSTER_STATE_LOG_LEVEL\nlogging.basicConfig(\n    stream=sys.stdout,\n    format=\"%(asctime)s %(levelname)s %(name)s %(message)s\",\n    datefmt=\"%Y-%m-%d %H:%M:%S\",\n)\nLOG = logging.getLogger(\"mariadb-cluster-wait\")\nLOG.setLevel(log_level)\n\n\ndef login():\n    config = pykube.KubeConfig.from_env()\n    client = pykube.HTTPClient(\n        config=config, timeout=MARIADB_CLUSTER_STATE_PYKUBE_REQUEST_TIMEOUT\n    )\n    LOG.info(f\"Created k8s api client from context {config.current_context}\")\n    return client\n\n\napi = login()\ncluster_state_map = (\n    pykube.ConfigMap.objects(api)\n    .filter(namespace=MARIADB_CLUSTER_STATE_CONFIGMAP_NAMESPACE)\n    .get_by_name(MARIADB_CLUSTER_STATE_CONFIGMAP)\n)\n\n\ndef get_current_state(cluster_state_map):\n    cluster_state_map.get(\n        MARIADB_CLUSTER_STATE_INITIAL_BOOTSTRAP_COMPLETED_KEY, \"False\"\n    )\n\n\ndef retry(times, exceptions):\n    def decorator(func):\n        def newfn(*args, **kwargs):\n            attempt = 0\n            while attempt < times:\n                try:\n                    return func(*args, **kwargs)\n                except exceptions:\n                    attempt += 1\n                    LOG.exception(\n                        f\"Exception thrown when attempting to run {func}, attempt {attempt} of {times}\"\n                    )\n            return func(*args, **kwargs)\n        return newfn\n    return decorator\n\n\nclass initalClusterState:\n\n    initial_state_key = \"initial-bootstrap-completed.cluster\"\n\n    @retry(times=100, exceptions=(Exception))\n    def __init__(self, api, namespace, name):\n        self.namespace = namespace\n        self.name = name\n        self.cm = (\n            pykube.ConfigMap.objects(api)\n            .filter(namespace=self.namespace)\n            .get_by_name(self.name)\n        )\n\n    def get_default(self):\n        \"\"\"We have deployments with completed job, but it is not reflected\n        in the configmap state. Assume when configmap is created more than\n        1h and we doing update/restart, and key not in map this is\n        existed environment. So we assume the cluster was initialy bootstrapped.\n        This is needed to avoid manual actions.\n        \"\"\"\n        now = datetime.datetime.utcnow()\n        created_at = datetime.datetime.strptime(\n            self.cm.obj[\"metadata\"][\"creationTimestamp\"], \"%Y-%m-%dT%H:%M:%SZ\"\n        )\n        delta = datetime.timedelta(seconds=3600)\n\n        if now - created_at > delta:\n            self.complete()\n            return \"COMPLETED\"\n        return \"NOT_COMPLETED\"\n\n    @property\n    @retry(times=10, exceptions=(Exception))\n    def is_completed(self):\n\n        self.cm.reload()\n        if self.initial_state_key in self.cm.obj[\"data\"]:\n            return self.cm.obj[\"data\"][self.initial_state_key]\n\n        return self.get_default() == \"COMPLETED\"\n\n    @retry(times=100, exceptions=(Exception))\n    def complete(self):\n        patch = {\"data\": {self.initial_state_key: \"COMPLETED\"}}\n        self.cm.patch(patch)\n\n\nics = initalClusterState(\n    api, MARIADB_CLUSTER_STATE_CONFIGMAP_NAMESPACE, MARIADB_CLUSTER_STATE_CONFIGMAP\n)\n\nif ics.is_completed:\n    LOG.info(\"The initial bootstrap was completed, skipping wait...\")\n    sys.exit(0)\n\nLOG.info(\"Checking for mariadb cluster state.\")\n\n\ndef is_mariadb_stabe():\n    try:\n        wsrep_OK = {\n            \"wsrep_ready\": \"ON\",\n            \"wsrep_connected\": \"ON\",\n            \"wsrep_cluster_status\": \"Primary\",\n            \"wsrep_local_state_comment\": \"Synced\",\n            \"wsrep_cluster_size\": str(MARIADB_REPLICAS),\n        }\n        wsrep_vars = \",\".join([\"'\" + var + \"'\" for var in wsrep_OK.keys()])\n        db_cursor = pymysql.connect(\n            host=MARIADB_HOST, password=MARIADB_PASSWORD,\n            read_default_file=\"/etc/mysql/admin_user.cnf\"\n        ).cursor()\n        db_cursor.execute(f\"SHOW GLOBAL STATUS WHERE Variable_name IN ({wsrep_vars})\")\n        wsrep_vars = db_cursor.fetchall()\n        diff = set(wsrep_vars).difference(set(wsrep_OK.items()))\n        if diff:\n            LOG.error(f\"The wsrep is not OK: {diff}\")\n        else:\n            LOG.info(\"The wspep is ready\")\n            return True\n    except Exception as e:\n        LOG.exception(f\"Got exception while checking state. {e}\")\n    return False\n\n\ncount = 0\nready = False\nstable_for = 1\n\nwhile True:\n    if is_mariadb_stabe():\n        stable_for += 1\n        LOG.info(\n            f\"The cluster is stable for {stable_for} out of {MARIADB_CLUSTER_STABILITY_COUNT}\"\n        )\n        if stable_for == MARIADB_CLUSTER_STABILITY_COUNT:\n            ics.complete()\n            sys.exit(0)\n        else:\n            LOG.info(f\"Sleeping for {MARIADB_CLUSTER_STABILITY_WAIT}\")\n            time.sleep(MARIADB_CLUSTER_STABILITY_WAIT)\n            continue\n    else:\n        LOG.info(\"Resetting stable_for count.\")\n        stable_for = 0\n    LOG.info(f\"Sleeping for {MARIADB_CLUSTER_CHECK_WAIT}\")\n    time.sleep(MARIADB_CLUSTER_CHECK_WAIT)\n"
  },
  {
    "path": "mariadb/templates/bin/_mariadb_controller.py.tpl",
    "content": "#!/usr/bin/env python3\n\n\"\"\"\nMariadb controller\n\nThe script is responsible for set mariadb_role: primary to first\nactive pod in mariadb deployment.\n\nEnv variables:\nMARIADB_CONTROLLER_DEBUG: Flag to enable debug when set to 1.\nMARIADB_CONTROLLER_CHECK_PODS_DELAY: The delay between check pod attempts.\nMARIADB_CONTROLLER_PYKUBE_REQUEST_TIMEOUT: The timeout for kubernetes http session\nMARIADB_CONTROLLER_PODS_NAMESPACE: The namespace to look for mariadb pods.\nMARIADB_MASTER_SERVICE_NAME: The name of master service for mariadb.\n\nChangelog:\n0.1.0: Initial varsion\n\"\"\"\n\n\nimport logging\nimport os\nimport sys\nimport time\n\nimport pykube\n\nMARIADB_CONTROLLER_DEBUG = os.getenv(\"MARIADB_CONTROLLER_DEBUG\")\nMARIADB_CONTROLLER_CHECK_PODS_DELAY = int(\n    os.getenv(\"MARIADB_CONTROLLER_CHECK_PODS_DELAY\", 10)\n)\nMARIADB_CONTROLLER_PYKUBE_REQUEST_TIMEOUT = int(\n    os.getenv(\"MARIADB_CONTROLLER_PYKUBE_REQUEST_TIMEOUT\", 60)\n)\nMARIADB_CONTROLLER_PODS_NAMESPACE = os.getenv(\n    \"MARIADB_CONTROLLER_PODS_NAMESPACE\", \"openstack\"\n)\nMARIADB_MASTER_SERVICE_NAME = os.getenv(\n    \"MARIADB_MASTER_SERVICE_NAME\", \"mariadb\"\n)\n\nlog_level = \"DEBUG\" if MARIADB_CONTROLLER_DEBUG else \"INFO\"\nlogging.basicConfig(\n    stream=sys.stdout,\n    format=\"%(asctime)s %(levelname)s %(name)s %(message)s\",\n    datefmt=\"%Y-%m-%d %H:%M:%S\",\n)\nLOG = logging.getLogger(\"mariadb-controller\")\n\nLOG.setLevel(log_level)\n\n\ndef login():\n    config = pykube.KubeConfig.from_env()\n    client = pykube.HTTPClient(\n        config=config, timeout=MARIADB_CONTROLLER_PYKUBE_REQUEST_TIMEOUT\n    )\n    LOG.info(f\"Created k8s api client from context {config.current_context}\")\n    return client\n\n\napi = login()\n\n\ndef resource_list(klass, selector, namespace=None):\n    return klass.objects(api).filter(namespace=namespace, selector=selector)\n\n\ndef get_mariadb_pods():\n    sorted_pods = sorted(\n        resource_list(\n            pykube.Pod,\n            {\"application\": \"mariadb\", \"component\": \"server\"},\n            MARIADB_CONTROLLER_PODS_NAMESPACE,\n        ).iterator(),\n        key=lambda i: i.name,\n    )\n    return sorted_pods\n\n\ndef get_mariadb_master_service(namespace):\n    return pykube.Service.objects(api).filter(namespace=namespace).get(name=MARIADB_MASTER_SERVICE_NAME)\n\n\ndef link_master_service(pod):\n    svc = get_mariadb_master_service(MARIADB_CONTROLLER_PODS_NAMESPACE)\n    svc.reload()\n    if svc.obj['spec']['selector'].get('statefulset.kubernetes.io/pod-name') == pod.name:\n        LOG.debug(f\"Nothing to do, master service points to {pod.name}\")\n    else:\n        svc.obj['spec']['selector']['statefulset.kubernetes.io/pod-name'] = pod.name\n        svc.update()\n        LOG.info(f\"Link master service with {pod.name}\")\n\n\ndef is_ready(pod):\n    if pod.ready and \"deletionTimestamp\" not in pod.metadata:\n        return True\n\n\ndef main():\n    while True:\n        for pod in get_mariadb_pods():\n            pod.reload()\n            if is_ready(pod):\n                link_master_service(pod)\n                break\n        LOG.debug(f\"Sleeping for {MARIADB_CONTROLLER_CHECK_PODS_DELAY}\")\n        time.sleep(MARIADB_CONTROLLER_CHECK_PODS_DELAY)\n\n\nmain()\n"
  },
  {
    "path": "mariadb/templates/bin/_prometheus-create-mysql-user.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -e\n\n  # SLAVE MONITOR\n  # Grants ability to SHOW SLAVE STATUS, SHOW REPLICA STATUS,\n  # SHOW ALL SLAVES STATUS, SHOW ALL REPLICAS STATUS, SHOW RELAYLOG EVENTS.\n  # New privilege added in MariaDB Enterprise Server 10.5.8-5. Alias for REPLICA MONITOR.\n  #\n  # REPLICATION CLIENT\n  # Grants ability to SHOW MASTER STATUS, SHOW SLAVE STATUS, SHOW BINARY LOGS. In ES10.5,\n  # is an alias for BINLOG MONITOR and the capabilities have changed. BINLOG MONITOR grants\n  # ability to SHOW MASTER STATUS, SHOW BINARY LOGS, SHOW BINLOG EVENTS, and SHOW BINLOG STATUS.\n\n  mariadb_version=$(mariadb --defaults-file=/etc/mysql/admin_user.cnf -e \"status\" | grep -E '^Server\\s+version:')\n  echo \"Current database ${mariadb_version}\"\n\n  if [[ ! -z ${mariadb_version} && -z $(grep -E '10.2|10.3|10.4' <<< ${mariadb_version}) ]]; then\n    # In case MariaDB version is 10.2.x-10.4.x - we use old privileges definitions\n    if ! mariadb --defaults-file=/etc/mysql/admin_user.cnf -e \\\n      \"CREATE OR REPLACE USER '${EXPORTER_USER}'@'127.0.0.1' IDENTIFIED BY '${EXPORTER_PASSWORD}'; \\\n      GRANT SLAVE MONITOR, PROCESS, BINLOG MONITOR, SLAVE MONITOR, SELECT ON *.* TO '${EXPORTER_USER}'@'127.0.0.1'; \\\n      FLUSH PRIVILEGES;\" ; then\n      echo \"ERROR: Could not create user: ${EXPORTER_USER}\"\n      exit 1\n    fi\n  else\n    # here we use new MariaDB privileges definitions defines since version 10.5\n    if ! mariadb --defaults-file=/etc/mysql/admin_user.cnf -e \\\n      \"CREATE OR REPLACE USER '${EXPORTER_USER}'@'127.0.0.1' IDENTIFIED BY '${EXPORTER_PASSWORD}'; \\\n      GRANT SLAVE MONITOR, PROCESS, REPLICATION CLIENT, SELECT ON *.* TO '${EXPORTER_USER}'@'127.0.0.1'  ${MARIADB_X509}; \\\n      FLUSH PRIVILEGES;\" ; then\n      echo \"ERROR: Could not create user: ${EXPORTER_USER}\"\n      exit 1\n    fi\n  fi\n"
  },
  {
    "path": "mariadb/templates/bin/_prometheus-mysqld-exporter.sh.tpl",
    "content": "#!/bin/sh\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\ncompareVersions() {\necho $1 $2 | \\\nawk '{ split($1, a, \".\");\n       split($2, b, \".\");\n       res = -1;\n       for (i = 1; i <= 3; i++){\n           if (a[i] < b[i]) {\n               res =-1;\n               break;\n           } else if (a[i] > b[i]) {\n               res = 1;\n               break;\n           } else if (a[i] == b[i]) {\n               if (i == 3) {\n               res = 0;\n               break;\n               } else {\n               continue;\n               }\n           }\n       }\n       print res;\n     }'\n}\n\nMYSQL_EXPORTER_VER=`/bin/mysqld_exporter --version 2>&1 | grep \"mysqld_exporter\" | awk '{print $3}'`\n\n#in versions greater than 0.10.0 different configuration flags are used:\n#https://github.com/prometheus/mysqld_exporter/commit/66c41ac7eb90a74518a6ecf6c6bb06464eb68db8\ncompverResult=`compareVersions \"${MYSQL_EXPORTER_VER}\" \"0.10.0\"`\nCONFIG_FLAG_PREFIX='-'\nif [ ${compverResult} -gt 0 ]; then\n    CONFIG_FLAG_PREFIX='--'\nfi\n\nexec /bin/mysqld_exporter \\\n  ${CONFIG_FLAG_PREFIX}config.my-cnf=/etc/mysql/mysql_user.cnf \\\n  ${CONFIG_FLAG_PREFIX}web.listen-address=\"${POD_IP}:${LISTEN_PORT}\" \\\n  ${CONFIG_FLAG_PREFIX}web.telemetry-path=\"$TELEMETRY_PATH\"\n"
  },
  {
    "path": "mariadb/templates/bin/_restore_mariadb.sh.tpl",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n{{- $envAll := . }}\n\n# Capture the user's command line arguments\nARGS=(\"$@\")\n\nif [[ -s /tmp/restore_main.sh ]]; then\n  source /tmp/restore_main.sh\nelse\n  echo \"File /tmp/restore_main.sh does not exist.\"\n  exit 1\nfi\n\n# Export the variables needed by the framework\nexport DB_NAME=\"mariadb\"\nexport DB_NAMESPACE=${MARIADB_POD_NAMESPACE}\nexport ARCHIVE_DIR=${MARIADB_BACKUP_BASE_DIR}/db/${DB_NAMESPACE}/${DB_NAME}/archive\n\nRESTORE_USER='restoreuser'\nRESTORE_PW=$(pwgen 16 1)\nRESTORE_LOG='/tmp/restore_error.log'\nrm -f $RESTORE_LOG\n\n# This is for commands which require admin access\nMYSQL=\"mariadb \\\n       --defaults-file=/etc/mysql/admin_user.cnf \\\n       --connect-timeout 10\"\n\n# This is for commands which we want the temporary \"restore\" user\n# to execute\nRESTORE_CMD=\"mariadb \\\n             --user=${RESTORE_USER} \\\n             --password=${RESTORE_PW} \\\n             --host={{ tuple \"oslo_db\" \"direct\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }} \\\n             --port={{ tuple \"oslo_db\" \"direct\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }} \\\n{{- if .Values.manifests.certificates }}\n             --ssl-ca=/etc/mysql/certs/ca.crt \\\n             --ssl-key=/etc/mysql/certs/tls.key \\\n             --ssl-cert=/etc/mysql/certs/tls.crt \\\n{{- end }}\n             --connect-timeout 10\"\n\n# Get a single database data from the SQL file.\n# $1 - database name\n# $2 - sql file path\ncurrent_db_desc() {\n  PATTERN=\"-- Current Database:\"\n  sed -n \"/${PATTERN} \\`$1\\`/,/${PATTERN}/p\" $2\n}\n\n#Return all database from an archive\nget_databases() {\n  TMP_DIR=$1\n  DB_FILE=$2\n\n  if [[ -e ${TMP_DIR}/db.list ]]\n  then\n    DBS=$(cat ${TMP_DIR}/db.list | \\\n              grep -ivE 'information_schema|performance_schema|mysql|sys' )\n  else\n    DBS=\" \"\n  fi\n\n  echo $DBS > $DB_FILE\n}\n\n# Determine sql file from 2 options - current and legacy one\n# if current is not found check that there is no other namespaced dump file\n# before falling back to legacy one\n_get_sql_file() {\n  TMP_DIR=$1\n  SQL_FILE=\"${TMP_DIR}/mariadb.${MARIADB_POD_NAMESPACE}.*.sql\"\n  LEGACY_SQL_FILE=\"${TMP_DIR}/mariadb.*.sql\"\n  INVALID_SQL_FILE=\"${TMP_DIR}/mariadb.*.*.sql\"\n  if [ -f ${SQL_FILE} ]\n  then\n    echo \"Found $(ls ${SQL_FILE})\" > /dev/stderr\n    printf ${SQL_FILE}\n  elif [ -f ${INVALID_SQL_FILE} ]\n  then\n    echo \"Expected to find ${SQL_FILE} or ${LEGACY_SQL_FILE}, but found $(ls ${INVALID_SQL_FILE})\" > /dev/stderr\n  elif [ -f ${LEGACY_SQL_FILE} ]\n  then\n    echo \"Falling back to legacy naming ${LEGACY_SQL_FILE}. Found $(ls ${LEGACY_SQL_FILE})\" > /dev/stderr\n    printf ${LEGACY_SQL_FILE}\n  fi\n}\n\n# Extract all tables of a database from an archive and put them in the requested\n# file.\nget_tables() {\n  DATABASE=$1\n  TMP_DIR=$2\n  TABLE_FILE=$3\n\n  SQL_FILE=$(_get_sql_file $TMP_DIR)\n  if [ ! -z $SQL_FILE ]; then\n    current_db_desc ${DATABASE} ${SQL_FILE} \\\n        | grep \"^CREATE TABLE\" | awk -F '`' '{print $2}' \\\n        > $TABLE_FILE\n  else\n    # Error, cannot report the tables\n    echo \"No SQL file found - cannot extract the tables\"\n    return 1\n  fi\n}\n\n# Extract all rows in the given table of a database from an archive and put\n# them in the requested file.\nget_rows() {\n  DATABASE=$1\n  TABLE=$2\n  TMP_DIR=$3\n  ROW_FILE=$4\n\n  SQL_FILE=$(_get_sql_file $TMP_DIR)\n  if [ ! -z $SQL_FILE ]; then\n    current_db_desc ${DATABASE} ${SQL_FILE} \\\n        | grep \"INSERT INTO \\`${TABLE}\\` VALUES\" > $ROW_FILE\n    return 0\n  else\n    # Error, cannot report the rows\n    echo \"No SQL file found - cannot extract the rows\"\n    return 1\n  fi\n}\n\n# Extract the schema for the given table in the given database belonging to\n# the archive file found in the TMP_DIR.\nget_schema() {\n  DATABASE=$1\n  TABLE=$2\n  TMP_DIR=$3\n  SCHEMA_FILE=$4\n\n  SQL_FILE=$(_get_sql_file $TMP_DIR)\n  if [ ! -z $SQL_FILE ]; then\n    DB_FILE=$(mktemp -p /tmp)\n    current_db_desc ${DATABASE} ${SQL_FILE} > ${DB_FILE}\n    sed -n /'CREATE TABLE `'$TABLE'`'/,/'--'/p ${DB_FILE} > ${SCHEMA_FILE}\n    if [[ ! (-s ${SCHEMA_FILE}) ]]; then\n      sed -n /'CREATE TABLE IF NOT EXISTS `'$TABLE'`'/,/'--'/p ${DB_FILE} \\\n          > ${SCHEMA_FILE}\n    fi\n    rm -f ${DB_FILE}\n  else\n    # Error, cannot report the rows\n    echo \"No SQL file found - cannot extract the schema\"\n    return 1\n  fi\n}\n\n# Create temporary user for restoring specific databases.\ncreate_restore_user() {\n  restore_db=$1\n\n  # Ensure any old restore user is removed first, if it exists.\n  # If it doesn't exist it may return error, so do not exit the\n  # script if that's the case.\n  delete_restore_user \"dont_exit_on_error\"\n\n  $MYSQL --execute=\"GRANT SELECT ON *.* TO ${RESTORE_USER}@'%' IDENTIFIED BY '${RESTORE_PW}';\" 2>>$RESTORE_LOG\n  if [[ \"$?\" -eq 0 ]]\n  then\n    $MYSQL --execute=\"GRANT ALL ON ${restore_db}.* TO ${RESTORE_USER}@'%' IDENTIFIED BY '${RESTORE_PW}';\" 2>>$RESTORE_LOG\n    if [[ \"$?\" -ne 0 ]]\n    then\n      cat $RESTORE_LOG\n      echo \"Failed to grant restore user ALL permissions on database ${restore_db}\"\n      return 1\n    fi\n  else\n    cat $RESTORE_LOG\n    echo \"Failed to grant restore user select permissions on all databases\"\n    return 1\n  fi\n}\n\n# Delete temporary restore user\ndelete_restore_user() {\n  error_handling=$1\n\n  $MYSQL --execute=\"DROP USER ${RESTORE_USER}@'%';\" 2>>$RESTORE_LOG\n  if [[ \"$?\" -ne 0 ]]\n  then\n    if [ \"$error_handling\" == \"exit_on_error\" ]\n    then\n      cat $RESTORE_LOG\n      echo \"Failed to delete temporary restore user - needs attention to avoid a security hole\"\n      return 1\n    fi\n  fi\n}\n\n#Restore a single database\nrestore_single_db() {\n  SINGLE_DB_NAME=$1\n  TMP_DIR=$2\n\n  if [[ -z \"$SINGLE_DB_NAME\" ]]\n  then\n    echo \"Restore single DB called but with wrong parameter.\"\n    return 1\n  fi\n\n  SQL_FILE=$(_get_sql_file $TMP_DIR)\n  if [ ! -z $SQL_FILE ]; then\n    # Restoring a single database requires us to create a temporary user\n    # which has capability to only restore that ONE database. One gotcha\n    # is that the mysql command to restore the database is going to throw\n    # errors because of all the other databases that it cannot access. So\n    # because of this reason, the --force option is used to prevent the\n    # command from stopping on an error.\n    create_restore_user $SINGLE_DB_NAME\n    if [[ $? -ne 0 ]]\n    then\n      echo \"Restore $SINGLE_DB_NAME failed create restore user.\"\n      return 1\n    fi\n    $RESTORE_CMD --force < $SQL_FILE 2>>$RESTORE_LOG\n    if [[ \"$?\" -eq 0 ]]\n    then\n      echo \"Database $SINGLE_DB_NAME Restore successful.\"\n    else\n      cat $RESTORE_LOG\n      delete_restore_user \"exit_on_error\"\n      echo \"Database $SINGLE_DB_NAME Restore failed.\"\n      return 1\n    fi\n    delete_restore_user \"exit_on_error\"\n    if [[ $? -ne 0 ]]\n    then\n      echo \"Restore $SINGLE_DB_NAME failed delete restore user.\"\n      return 1\n    fi\n    if [ -f ${TMP_DIR}/${SINGLE_DB_NAME}_grant.sql ]\n    then\n      $MYSQL < ${TMP_DIR}/${SINGLE_DB_NAME}_grant.sql 2>>$RESTORE_LOG\n      if [[ \"$?\" -eq 0 ]]\n      then\n        if ! $MYSQL --execute=\"FLUSH PRIVILEGES;\"; then\n          echo \"Failed to flush privileges for $SINGLE_DB_NAME.\"\n          return 1\n        fi\n        echo \"Database $SINGLE_DB_NAME Permission Restore successful.\"\n      else\n        cat $RESTORE_LOG\n        echo \"Database $SINGLE_DB_NAME Permission Restore failed.\"\n        return 1\n      fi\n    else\n      echo \"There is no permission file available for $SINGLE_DB_NAME\"\n      return 1\n    fi\n  else\n    echo \"There is no database file available to restore from\"\n    return 1\n  fi\n  return 0\n}\n\n#Restore all the databases\nrestore_all_dbs() {\n  TMP_DIR=$1\n\n  SQL_FILE=$(_get_sql_file $TMP_DIR)\n  if [ ! -z $SQL_FILE ]; then\n    # Check the scope of the archive.\n    SCOPE=$(echo ${SQL_FILE} | awk -F'.' '{print $(NF-1)}')\n    if [[ \"${SCOPE}\" != \"all\" ]]; then\n      # This is just a single database backup. The user should\n      # instead use the single database restore option.\n      echo \"Cannot use the restore all option for an archive containing only a single database.\"\n      echo \"Please use the single database restore option.\"\n      return 1\n    fi\n\n    $MYSQL < $SQL_FILE 2>$RESTORE_LOG\n    if [[ \"$?\" -eq 0 ]]\n    then\n      echo \"Databases $( echo $DBS | tr -d '\\n') Restore successful.\"\n    else\n      cat $RESTORE_LOG\n      echo \"Databases $( echo $DBS | tr -d '\\n') Restore failed.\"\n      return 1\n    fi\n    if [[ -f ${TMP_DIR}/grants.sql ]]\n    then\n      $MYSQL < ${TMP_DIR}/grants.sql 2>$RESTORE_LOG\n      if [[ \"$?\" -eq 0 ]]\n      then\n        if ! $MYSQL --execute=\"FLUSH PRIVILEGES;\"; then\n          echo \"Failed to flush privileges.\"\n          return 1\n        fi\n        echo \"Databases Permission Restore successful.\"\n      else\n        cat $RESTORE_LOG\n        echo \"Databases Permission Restore failed.\"\n        return 1\n      fi\n    else\n      echo \"There is no permission file available\"\n      return 1\n    fi\n  else\n    echo \"There is no database file available to restore from\"\n    return 1\n  fi\n  return 0\n}\n\n# Call the CLI interpreter, providing the archive directory path and the\n# user arguments passed in\ncli_main ${ARGS[@]}\n"
  },
  {
    "path": "mariadb/templates/bin/_start.py.tpl",
    "content": "#!/usr/bin/python3\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nimport errno\nimport logging\nimport os\nimport secrets\nimport select\nimport signal\nimport subprocess  # nosec\nimport socket\nimport sys\nimport tempfile\nimport time\nimport threading\nfrom datetime import datetime, timedelta\n\nimport configparser\nimport iso8601\nimport kubernetes.client\nimport kubernetes.config\n\n# Create logger, console handler and formatter\nlogger = logging.getLogger('OpenStack-Helm Mariadb')\nlogger.setLevel(logging.INFO)\nch = logging.StreamHandler()\nch.setLevel(logging.INFO)\nformatter = logging.Formatter(\n    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n# Set the formatter and add the handler\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n# Get the local hostname\nlocal_hostname = socket.gethostname()\nlogger.info(\"This instance hostname: {0}\".format(local_hostname))\n\n# Get local node IP address\nlocal_ip = socket.gethostbyname(local_hostname)\nlogger.info(\"This instance IP address: {0}\".format(local_ip))\n\n# Get the instance number\ninstance_number = local_hostname.split(\"-\")[-1]\nlogger.info(\"This instance number: {0}\".format(instance_number))\n\n# Setup k8s client credentials and check api version\nkubernetes.config.load_incluster_config()\nkubernetes_version = kubernetes.client.VersionApi().get_code().git_version\nlogger.info(\"Kubernetes API Version: {0}\".format(kubernetes_version))\nk8s_api_instance = kubernetes.client.CoreV1Api()\n\n# Setup secrets generator\nsecretsGen = secrets.SystemRandom()\n\ndef check_env_var(env_var):\n    \"\"\"Check if an env var exists.\n\n    Keyword arguments:\n    env_var -- the env var to check for the existance of\n    \"\"\"\n    if env_var in os.environ:\n        return True\n    else:\n        logger.critical(\"environment variable \\\"{0}\\\" not set\".format(env_var))\n        sys.exit(1)\n\n\n# Set some variables from env vars injected into the container\nif check_env_var(\"STATE_CONFIGMAP\"):\n    state_configmap_name = os.environ['STATE_CONFIGMAP']\n    logger.info(\"Will use \\\"{0}\\\" configmap for cluster state info\".format(\n        state_configmap_name))\nif check_env_var(\"POD_NAMESPACE\"):\n    pod_namespace = os.environ['POD_NAMESPACE']\nif check_env_var(\"DIRECT_SVC_NAME\"):\n    direct_svc_name = os.environ['DIRECT_SVC_NAME']\nif check_env_var(\"MARIADB_REPLICAS\"):\n    mariadb_replicas = os.environ['MARIADB_REPLICAS']\nif check_env_var(\"POD_NAME_PREFIX\"):\n    pod_name_prefix = os.environ['POD_NAME_PREFIX']\nif check_env_var(\"DISCOVERY_DOMAIN\"):\n    discovery_domain = os.environ['DISCOVERY_DOMAIN']\nif check_env_var(\"WSREP_PORT\"):\n    wsrep_port = os.environ['WSREP_PORT']\nif check_env_var(\"MYSQL_DBADMIN_USERNAME\"):\n    mysql_dbadmin_username = os.environ['MYSQL_DBADMIN_USERNAME']\nif check_env_var(\"MYSQL_DBADMIN_PASSWORD\"):\n    mysql_dbadmin_password = os.environ['MYSQL_DBADMIN_PASSWORD']\nif check_env_var(\"MYSQL_DBSST_USERNAME\"):\n    mysql_dbsst_username = os.environ['MYSQL_DBSST_USERNAME']\nif check_env_var(\"MYSQL_DBSST_PASSWORD\"):\n    mysql_dbsst_password = os.environ['MYSQL_DBSST_PASSWORD']\nif check_env_var(\"MYSQL_DBAUDIT_USERNAME\"):\n    mysql_dbaudit_username = os.environ['MYSQL_DBAUDIT_USERNAME']\nelse:\n    mysql_dbaudit_username = ''\nif check_env_var(\"MYSQL_DBAUDIT_PASSWORD\"):\n    mysql_dbaudit_password = os.environ['MYSQL_DBAUDIT_PASSWORD']\n\nmysql_x509 = os.getenv('MARIADB_X509', \"\")\nMYSQL_SSL_CMD_OPTS=[\"--ssl-verify-server-cert=false\",\n                    \"--ssl-ca=/etc/mysql/certs/ca.crt\",\n                    \"--ssl-key=/etc/mysql/certs/tls.key\",\n                    \"--ssl-cert=/etc/mysql/certs/tls.crt\"]\n\nif mysql_dbadmin_username == mysql_dbsst_username:\n    logger.critical(\n        \"The dbadmin username should not match the sst user username\")\n    sys.exit(1)\n\n# Set some variables for tuneables\ncluster_leader_ttl = int(os.environ['CLUSTER_LEADER_TTL'])\nstate_configmap_update_period = 10\ndefault_sleep = 20\n\n# set one name for all commands, avoid \"magic names\"\nMYSQL_BINARY_NAME='mariadbd'\n\n\ndef ensure_state_configmap(pod_namespace, configmap_name, configmap_body):\n    \"\"\"Ensure the state configmap exists.\n\n    Keyword arguments:\n    pod_namespace -- the namespace to house the configmap\n    configmap_name -- the configmap name\n    configmap_body -- the configmap body\n    \"\"\"\n    try:\n        k8s_api_instance.read_namespaced_config_map(\n            name=configmap_name, namespace=pod_namespace)\n        return True\n    except:\n        k8s_api_instance.create_namespaced_config_map(\n            namespace=pod_namespace, body=configmap_body)\n\n        return False\n\n\ndef run_cmd_with_logging(popenargs,\n                         logger,\n                         stdout_log_level=logging.INFO,\n                         stderr_log_level=logging.INFO,\n                         **kwargs):\n    \"\"\"Run subprocesses and stream output to logger.\"\"\"\n    child = subprocess.Popen(  # nosec\n        popenargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)\n    log_level = {\n        child.stdout: stdout_log_level,\n        child.stderr: stderr_log_level\n    }\n\n    def check_io():\n        ready_to_read = select.select([child.stdout, child.stderr], [], [],\n                                      1000)[0]\n        for io in ready_to_read:\n            line = io.readline().decode()\n            logger.log(log_level[io], line[:-1])\n\n    while child.poll(\n    ) is None:  # keep checking stdout/stderr until the child exits\n        check_io()\n    check_io()  # check again to catch anything after the process exits\n    return child.wait()\n\n\ndef wait_mysql_status(delay=30):\n    logger.info(\"Start checking mariadb status\")\n    i = 0\n    res = 1\n    while True:\n        logger.info(\"Checking mysql status {0}\".format(i))\n        cmd = ['mariadb',\n            \"--defaults-file=/etc/mysql/admin_user.cnf\",\n            \"--host=localhost\"]\n        if mysql_x509:\n          cmd.extend(MYSQL_SSL_CMD_OPTS)\n        cmd.extend([\"--execute=status\"])\n        res = run_cmd_with_logging(cmd, logger)\n        if res == 0:\n            logger.info(\"mariadb status check passed\")\n            break\n        else:\n            logger.info(\"mariadb status check failed\")\n        i += 1\n        time.sleep(delay)\n\n\ndef stop_mysqld():\n    \"\"\"Stop mysqld, assuming pid file in default location.\"\"\"\n    logger.info(\"Shutting down any mysqld instance if required\")\n    mysqld_pidfile_path = \"/var/lib/mysql/{0}.pid\".format(local_hostname)\n\n    def is_pid_running(pid):\n        if os.path.isdir('/proc/{0}'.format(pid)):\n            return True\n        return False\n\n    def is_pid_mysqld(pid):\n        with open('/proc/{0}/comm'.format(pid), \"r\") as mysqld_pidfile:\n            comm = mysqld_pidfile.readlines()[0].rstrip('\\n')\n        if comm.startswith(MYSQL_BINARY_NAME):\n            return True\n        else:\n            return False\n\n    if not os.path.isfile(mysqld_pidfile_path):\n        logger.debug(\"No previous pid file found for mysqld\")\n        return\n\n    if os.stat(mysqld_pidfile_path).st_size == 0:\n        logger.info(\n            \"{0} file is empty, removing it\".format(mysqld_pidfile_path))\n        os.remove(mysqld_pidfile_path)\n        return\n\n    logger.info(\n        \"Previous pid file found for mysqld, attempting to shut it down\")\n\n    with open(mysqld_pidfile_path, \"r\") as mysqld_pidfile:\n        mysqld_pid = int(mysqld_pidfile.readlines()[0].rstrip('\\n'))\n\n    if not is_pid_running(mysqld_pid):\n        logger.info(\n            \"Mysqld was not running with pid {0}, going to remove stale \"\n            \"file\".format(mysqld_pid))\n        os.remove(mysqld_pidfile_path)\n        return\n    if not is_pid_mysqld(mysqld_pid):\n        logger.error(\n            \"pidfile process is not mysqld, removing pidfile and panic\")\n        os.remove(mysqld_pidfile_path)\n        sys.exit(1)\n\n    logger.info(\"pid from pidfile is mysqld\")\n    os.kill(mysqld_pid, 15)\n    try:\n        pid, status = os.waitpid(mysqld_pid, 0)\n    except OSError as err:\n        # The process has already exited\n        if err.errno == errno.ECHILD:\n            return\n        else:\n            raise\n    logger.info(\"Mysqld stopped: pid = {0}, \"\n                \"exit status = {1}\".format(pid, status))\n\ndef mysqld_write_cluster_conf(mode='run'):\n    \"\"\"Write out dynamic cluster config.\n\n    Keyword arguments:\n    mode -- whether we are writing the cluster config for the cluster to 'run'\n            or 'bootstrap' (default 'run')\n    \"\"\"\n    logger.info(\"Setting up cluster config\")\n    cluster_config = configparser.ConfigParser()\n    cluster_config['mysqld'] = {}\n    cluster_config_params = cluster_config['mysqld']\n    wsrep_cluster_members = []\n    for node in range(int(mariadb_replicas)):\n        node_hostname = \"{0}-{1}\".format(pod_name_prefix, node)\n        if local_hostname == node_hostname:\n            cluster_config_params['wsrep_node_address'] = local_ip\n            wsrep_node_name = \"{0}.{1}\".format(node_hostname, discovery_domain)\n            cluster_config_params['wsrep_node_name'] = wsrep_node_name\n\n    if mode == 'run':\n        cluster_config_params['wsrep_cluster_address'] = \"gcomm://{0}:{1}\".format(\n            discovery_domain, wsrep_port)\n\n    else:\n        cluster_config_params['wsrep_cluster_address'] = \"gcomm://\"\n    cluster_config_file = '/etc/mysql/conf.d/10-cluster-config.cnf'\n    logger.info(\n        \"Writing out cluster config to: {0}\".format(cluster_config_file))\n    with open(cluster_config_file, 'w') as configfile:\n        cluster_config.write(configfile)\n\n\n# Function to setup mysqld\ndef mysqld_bootstrap():\n    \"\"\"Bootstrap the db if no data found in the 'bootstrap_test_dir'\"\"\"\n    logger.info(\"Boostrapping Mariadb\")\n    mysql_data_dir = '/var/lib/mysql'\n    bootstrap_test_dir = \"{0}/mysql\".format(mysql_data_dir)\n    if not os.path.isdir(bootstrap_test_dir):\n        stop_mysqld()\n        mysqld_write_cluster_conf(mode='bootstrap')\n        run_cmd_with_logging([\n            'mariadb-install-db', '--user=mysql',\n            \"--datadir={0}\".format(mysql_data_dir)\n        ], logger)\n        if not mysql_dbaudit_username:\n            template = (\n                # NOTE: since mariadb 10.4.13 definer of view\n                # mysql.user is not root but mariadb.sys user\n                # it is safe not to remove it because the account by default\n                # is locked and cannot login\n                \"DELETE FROM mysql.user WHERE user != 'mariadb.sys' ;\\n\"  # nosec\n                \"CREATE OR REPLACE USER '{0}'@'%' IDENTIFIED BY \\'{1}\\' ;\\n\"\n                \"GRANT ALL ON *.* TO '{0}'@'%' {4} WITH GRANT OPTION; \\n\"\n                \"DROP DATABASE IF EXISTS test ;\\n\"\n                \"CREATE OR REPLACE USER '{2}'@'127.0.0.1' IDENTIFIED BY '{3}';\\n\"\n                \"GRANT PROCESS, RELOAD, LOCK TABLES, REPLICATION CLIENT ON *.* TO '{2}'@'127.0.0.1';\\n\"\n                \"FLUSH PRIVILEGES ;\\n\"\n                \"SHUTDOWN ;\".format(mysql_dbadmin_username, mysql_dbadmin_password,\n                                    mysql_dbsst_username, mysql_dbsst_password,\n                                    mysql_x509))\n        else:\n            template = (\n                \"DELETE FROM mysql.user WHERE user != 'mariadb.sys' ;\\n\"  # nosec\n                \"CREATE OR REPLACE USER '{0}'@'%' IDENTIFIED BY \\'{1}\\' ;\\n\"\n                \"GRANT ALL ON *.* TO '{0}'@'%' {6} WITH GRANT OPTION;\\n\"\n                \"DROP DATABASE IF EXISTS test ;\\n\"\n                \"CREATE OR REPLACE USER '{2}'@'127.0.0.1' IDENTIFIED BY '{3}';\\n\"\n                \"GRANT PROCESS, RELOAD, LOCK TABLES, REPLICATION CLIENT ON *.* TO '{2}'@'127.0.0.1' ;\\n\"\n                \"CREATE OR REPLACE USER '{4}'@'%' IDENTIFIED BY '{5}';\\n\"\n                \"GRANT SELECT ON *.* TO '{4}'@'%' {6};\\n\"\n                \"FLUSH PRIVILEGES ;\\n\"\n                \"SHUTDOWN ;\".format(mysql_dbadmin_username, mysql_dbadmin_password,\n                                    mysql_dbsst_username, mysql_dbsst_password,\n                                    mysql_dbaudit_username, mysql_dbaudit_password,\n                                    mysql_x509))\n        bootstrap_sql_file = tempfile.NamedTemporaryFile(suffix='.sql').name\n        with open(bootstrap_sql_file, 'w') as f:\n            f.write(template)\n            f.close()\n        run_cmd_with_logging([\n            MYSQL_BINARY_NAME, '--user=mysql', '--bind-address=127.0.0.1',\n            '--wsrep_cluster_address=gcomm://',\n            \"--init-file={0}\".format(bootstrap_sql_file)\n        ], logger)\n        os.remove(bootstrap_sql_file)\n    else:\n        logger.info(\"Skipping bootstrap as {0} directory is present\".format(\n            bootstrap_test_dir))\n\n\ndef safe_update_configmap(configmap_dict, configmap_patch):\n    \"\"\"Update a configmap with locking.\n\n    Keyword arguments:\n    configmap_dict -- a dict representing the configmap to be patched\n    configmap_patch -- a dict containign the patch\n    \"\"\"\n    logger.debug(\"Safe Patching configmap\")\n    # NOTE(portdirect): Explictly set the resource version we are patching to\n    # ensure nothing else has modified the confimap since we read it.\n    configmap_patch['metadata']['resourceVersion'] = configmap_dict[\n        'metadata']['resource_version']\n    try:\n        api_response = k8s_api_instance.patch_namespaced_config_map(\n            name=state_configmap_name,\n            namespace=pod_namespace,\n            body=configmap_patch)\n        return True\n    except kubernetes.client.rest.ApiException as error:\n        if error.status == 409:\n            # This status code indicates a collision trying to write to the\n            # config map while another instance is also trying the same.\n            logger.warning(\"Collision writing configmap: {0}\".format(error))\n            # This often happens when the replicas were started at the same\n            # time, and tends to be persistent. Sleep with some random\n            # jitter value briefly to break the synchronization.\n            naptime = secretsGen.uniform(0.8,1.2)\n            time.sleep(naptime)\n        else:\n            logger.error(\"Failed to set configmap: {0}\".format(error))\n            return error\n\n\ndef set_configmap_annotation(key, value):\n    \"\"\"Update a configmap's annotations via patching.\n\n    Keyword arguments:\n    key -- the key to be patched\n    value -- the value to give the key\n    \"\"\"\n    logger.debug(\"Setting configmap annotation key={0} value={1}\".format(\n        key, value))\n    configmap_dict = k8s_api_instance.read_namespaced_config_map(\n        name=state_configmap_name, namespace=pod_namespace).to_dict()\n    configmap_patch = {'metadata': {'annotations': {}}}\n    configmap_patch['metadata']['annotations'][key] = value\n    return safe_update_configmap(\n        configmap_dict=configmap_dict, configmap_patch=configmap_patch)\n\n\ndef set_configmap_data(key, value):\n    \"\"\"Update a configmap's data via patching.\n\n    Keyword arguments:\n    key -- the key to be patched\n    value -- the value to give the key\n    \"\"\"\n    logger.debug(\"Setting configmap data key={0} value={1}\".format(key, value))\n    configmap_dict = k8s_api_instance.read_namespaced_config_map(\n        name=state_configmap_name, namespace=pod_namespace).to_dict()\n    configmap_patch = {'data': {}, 'metadata': {}}\n    configmap_patch['data'][key] = value\n    return safe_update_configmap(\n        configmap_dict=configmap_dict, configmap_patch=configmap_patch)\n\n\ndef get_configmap_value(key, type='data'):\n    \"\"\"Get a configmap's key's value.\n\n    Keyword arguments:\n    key -- the key to retrive the data from\n    type -- the type of data to retrive from the configmap, can either be 'data'\n            or an 'annotation'. (default data)\n    \"\"\"\n    state_configmap = k8s_api_instance.read_namespaced_config_map(\n        name=state_configmap_name, namespace=pod_namespace)\n    state_configmap_dict = state_configmap.to_dict()\n    if type == 'data':\n        state_configmap_data = state_configmap_dict['data']\n    elif type == 'annotation':\n        state_configmap_data = state_configmap_dict['metadata']['annotations']\n    else:\n        logger.error(\n            \"Unknown data type \\\"{0}\\\" reqested for retrival\".format(type))\n        return False\n    if state_configmap_data and key in state_configmap_data:\n        return state_configmap_data[key]\n    else:\n        return None\n\n\ndef get_cluster_state():\n    \"\"\"Get the current cluster state from a configmap, creating the configmap\n    if it does not already exist.\n    \"\"\"\n    logger.info(\"Getting cluster state\")\n    state = None\n    while state is None:\n        try:\n            state = get_configmap_value(\n                type='annotation',\n                key='openstackhelm.openstack.org/cluster.state')\n            logger.info(\n                \"The cluster is currently in \\\"{0}\\\" state.\".format(state))\n        except:\n            logger.info(\"The cluster configmap \\\"{0}\\\" does not exist.\".format(\n                state_configmap_name))\n            time.sleep(default_sleep)\n            leader_expiry_raw = datetime.utcnow() + timedelta(\n                seconds=cluster_leader_ttl)\n            leader_expiry = \"{0}Z\".format(leader_expiry_raw.isoformat(\"T\"))\n            if check_for_active_nodes():\n                # NOTE(portdirect): here we make the assumption that the 1st pod\n                # in an existing statefulset is the one to adopt as leader.\n                leader = \"{0}-0\".format(\"-\".join(\n                    local_hostname.split(\"-\")[:-1]))\n                state = \"live\"\n                logger.info(\n                    \"The cluster is running already though unmanaged \\\"{0}\\\" will be declared leader in a \\\"{1}\\\" state\".\n                    format(leader, state))\n            else:\n                leader = local_hostname\n                state = \"new\"\n                logger.info(\n                    \"The cluster is new \\\"{0}\\\" will be declared leader in a \\\"{1}\\\" state\".\n                    format(leader, state))\n\n            initial_configmap_body = {\n                \"apiVersion\": \"v1\",\n                \"kind\": \"ConfigMap\",\n                \"metadata\": {\n                    \"name\": state_configmap_name,\n                    \"annotations\": {\n                        \"openstackhelm.openstack.org/cluster.state\": state,\n                        \"openstackhelm.openstack.org/leader.node\": leader,\n                        \"openstackhelm.openstack.org/leader.expiry\":\n                        leader_expiry,\n                        \"openstackhelm.openstack.org/reboot.node\": \"\"\n                    }\n                },\n                \"data\": {}\n            }\n            ensure_state_configmap(\n                pod_namespace=pod_namespace,\n                configmap_name=state_configmap_name,\n                configmap_body=initial_configmap_body)\n    return state\n\n\ndef declare_myself_cluster_leader():\n    \"\"\"Declare the current pod as the cluster leader.\"\"\"\n    logger.info(\"Declaring myself current cluster leader\")\n    leader_expiry_raw = datetime.utcnow() + timedelta(\n        seconds=cluster_leader_ttl)\n    leader_expiry = \"{0}Z\".format(leader_expiry_raw.isoformat(\"T\"))\n    set_configmap_annotation(\n        key='openstackhelm.openstack.org/leader.node', value=local_hostname)\n    set_configmap_annotation(\n        key='openstackhelm.openstack.org/leader.expiry', value=leader_expiry)\n\n\ndef deadmans_leader_election():\n    \"\"\"Run a simplisic deadmans leader election.\"\"\"\n    leader_node = get_configmap_value(\n        type='annotation', key='openstackhelm.openstack.org/leader.node')\n    leader_expiry = get_configmap_value(\n        type='annotation', key='openstackhelm.openstack.org/leader.expiry')\n    if iso8601.parse_date(leader_expiry).replace(\n            tzinfo=None) < datetime.utcnow().replace(tzinfo=None):\n        logger.info(\"Current cluster leader has expired\")\n        declare_myself_cluster_leader()\n    elif local_hostname == leader_node:\n        logger.info(\"Renewing cluster leader lease\")\n        declare_myself_cluster_leader()\n\n\ndef get_grastate_val(key):\n    \"\"\"Extract data from grastate.dat.\n\n    Keyword arguments:\n    key -- the key to extract the value of\n    \"\"\"\n    logger.debug(\"Reading grastate.dat key={0}\".format(key))\n    try:\n        # This attempts to address a potential race condition with the initial\n        # creation of the grastate.date file where the file would exist\n        # however, it is not immediately populated. Testing indicated it could\n        # take 15-20 seconds for the file to be populated. So loop and keep\n        # checking up to 60 seconds. If it still isn't populated afterwards,\n        # the IndexError will still occur as we are seeing now without the loop.\n        time_end = time.time() + 60\n        while time.time() < time_end:\n            with open(\"/var/lib/mysql/grastate.dat\", \"r\") as myfile:\n                grastate_raw = [s.strip() for s in myfile.readlines()]\n            if grastate_raw:\n                break\n            time.sleep(1)\n        return [i for i in grastate_raw\n                if i.startswith(\"{0}:\".format(key))][0].split(':')[1].strip()\n    except IndexError:\n        logger.error(\n            \"IndexError: Unable to find %s with ':' in grastate.dat\", key)\n        raise\n\n\ndef set_grastate_val(key, value):\n    \"\"\"Set values in grastate.dat.\n\n    Keyword arguments:\n    key -- the key to set the value of\n    value -- the value to set the key to\n    \"\"\"\n    logger.debug(\"Updating grastate.dat key={0} value={1}\".format(key, value))\n    with open(\"/var/lib/mysql/grastate.dat\", \"r\") as sources:\n        lines = sources.readlines()\n        for line_num, line_content in enumerate(lines):\n            if line_content.startswith(\"{0}:\".format(key)):\n                line_content = \"{0}: {1}\\n\".format(key, value)\n            lines[line_num] = line_content\n    with open(\"/var/lib/mysql/grastate.dat\", \"w\") as sources:\n        for line in lines:\n            sources.write(line)\n\n\ndef update_grastate_configmap():\n    \"\"\"Update state configmap with grastate.dat info.\"\"\"\n    while not os.path.exists('/var/lib/mysql/grastate.dat'):\n        time.sleep(1)\n    logger.info(\"Updating grastate configmap\")\n    grastate = dict()\n    grastate['version'] = get_grastate_val(key='version')\n    grastate['uuid'] = get_grastate_val(key='uuid')\n    grastate['seqno'] = get_grastate_val(key='seqno')\n    grastate['safe_to_bootstrap'] = get_grastate_val(key='safe_to_bootstrap')\n    grastate['sample_time'] = \"{0}Z\".format(datetime.utcnow().isoformat(\"T\"))\n    for grastate_key, grastate_value in list(grastate.items()):\n        configmap_key = \"{0}.{1}\".format(grastate_key, local_hostname)\n        if get_configmap_value(type='data', key=configmap_key) != grastate_value:\n            set_configmap_data(key=configmap_key, value=grastate_value)\n\n\ndef update_grastate_on_restart():\n    \"\"\"Update the grastate.dat on node restart.\"\"\"\n    logger.info(\"Updating grastate info for node\")\n    if os.path.exists('/var/lib/mysql/grastate.dat'):\n        if get_grastate_val(key='seqno') == '-1':\n            logger.info(\n                \"Node shutdown was not clean, getting position via wsrep-recover\"\n            )\n\n            def recover_wsrep_position():\n                \"\"\"Extract recovered wsrep position from uncleanly exited node.\"\"\"\n                wsrep_recover = subprocess.Popen(  # nosec\n                    [\n                        MYSQL_BINARY_NAME, '--bind-address=127.0.0.1',\n                        '--wsrep_cluster_address=gcomm://', '--wsrep-recover'\n                    ],\n                    stdout=subprocess.PIPE,\n                    stderr=subprocess.PIPE,\n                    encoding=\"utf-8\")\n                out, err = wsrep_recover.communicate()\n                wsrep_rec_pos = None\n                # NOTE: communicate() returns a tuple (stdout_data, stderr_data).\n                # The data will be strings if streams were opened in text mode;\n                # otherwise, bytes. If it is bytes, we should decode and get a\n                # str for the err.split() to not error below.\n                if isinstance(err, bytes):\n                    err = err.decode('utf-8')\n                for item in err.split(\"\\n\"):\n                    logger.info(\"Recovering wsrep position: {0}\".format(item))\n                    if \"WSREP: Recovered position:\" in item:\n                        line = item.strip().split()\n                        wsrep_rec_pos = line[-1].split(':')[-1]\n                if wsrep_rec_pos is None:\n                    logger.error(\"WSREP_REC_POS position could not be found.\")\n                    raise Exception(\"WSREP_REC_POS position could not be found.\")\n                return wsrep_rec_pos\n\n            set_grastate_val(key='seqno', value=recover_wsrep_position())\n        else:\n            logger.info(\"Node shutdown was clean, using grastate.dat\")\n\n        update_grastate_configmap()\n\n    else:\n        logger.info(\"No grastate.dat exists I am a new node\")\n\n\ndef get_active_endpoints(endpoints_name=direct_svc_name,\n                         namespace=pod_namespace):\n    \"\"\"Returns a list of active endpoints.\n\n    Keyword arguments:\n    endpoints_name -- endpoints to check for active backends\n                      (default direct_svc_name)\n    namespace -- namespace to check for endpoints (default pod_namespace)\n    \"\"\"\n    try:\n        endpoints = k8s_api_instance.read_namespaced_endpoints(\n            name=endpoints_name, namespace=pod_namespace)\n    except kubernetes.client.rest.ApiException as error:\n        logger.error(\"Failed to get mariadb service with error: {0}\".format(error))\n        raise error\n    endpoints_dict = endpoints.to_dict()\n    active_endpoints = []\n    if endpoints_dict['subsets']:\n        active_endpoints = [s['addresses'] for s in endpoints_dict['subsets'] if 'addresses' in s\n        ][0]\n    return active_endpoints\n\n\ndef check_for_active_nodes(endpoints_name=direct_svc_name,\n                           namespace=pod_namespace):\n    \"\"\"Check K8s endpoints to see if there are active Mariadb Instances.\n\n    Keyword arguments:\n    endpoints_name -- endpoints to check for active backends\n                      (default direct_svc_name)\n    namespace -- namespace to check for endpoints (default pod_namespace)\n    \"\"\"\n    logger.info(\"Checking for active nodes\")\n    active_endpoints = get_active_endpoints()\n    if active_endpoints and len(active_endpoints) >= 1:\n        logger.info(\"Amount of active endpoints:  {0}\".format(len(active_endpoints)))\n        return True\n    else:\n        logger.info(\"Amount of active endpoints:  0\")\n        return False\n\n\ndef check_if_cluster_data_is_fresh():\n    \"\"\"Check if the state_configmap is both current and reasonably stable.\"\"\"\n    logger.info(\"Checking to see if cluster data is fresh\")\n    state_configmap = k8s_api_instance.read_namespaced_config_map(\n        name=state_configmap_name, namespace=pod_namespace)\n    state_configmap_dict = state_configmap.to_dict()\n    sample_times = dict()\n    for key, value in list(state_configmap_dict['data'].items()):\n        keyitems = key.split('.')\n        key = keyitems[0]\n        node = keyitems[1]\n        if key == 'sample_time':\n            sample_times[node] = value\n    sample_time_ok = True\n    for key, value in list(sample_times.items()):\n        sample_time = iso8601.parse_date(value).replace(tzinfo=None)\n        # NOTE(vsaienko): give some time on resolving configmap update conflicts\n        sample_cutoff_time = datetime.utcnow().replace(\n            tzinfo=None) - timedelta(seconds=5*state_configmap_update_period)\n        if not sample_time >= sample_cutoff_time:\n            logger.info(\n                \"The data we have from the cluster is too old to make a \"\n                \"decision for node {0}\".format(key))\n            sample_time_ok = False\n        else:\n            logger.info(\n                \"The data we have from the cluster is ok for node {0}\".format(\n                    key))\n    return sample_time_ok\n\n\ndef get_nodes_with_highest_seqno():\n    \"\"\"Find out which node(s) has the highest sequence number and return\n    them in an array.\"\"\"\n    logger.info(\"Getting the node(s) with highest seqno from configmap.\")\n    # We can proceed only when we get seqno from all nodes, and if seqno is\n    # -1 it means we didn't get it correctly, the shutdown was not clean and we need\n    # to wait for a value taken by wsrep recover.\n    while True:\n        state_configmap = k8s_api_instance.read_namespaced_config_map(\n            name=state_configmap_name, namespace=pod_namespace)\n        state_configmap_dict = state_configmap.to_dict()\n        seqnos = dict()\n        for key, value in list(state_configmap_dict['data'].items()):\n            keyitems = key.split('.')\n            key = keyitems[0]\n            node = keyitems[1]\n            if key == 'seqno':\n                #Explicit casting to integer to have resulting list of integers for correct comparison\n                seqnos[node] = int(value)\n        max_seqno = max(seqnos.values())\n        max_seqno_nodes = sorted([k for k, v in list(seqnos.items()) if v == max_seqno])\n        if [x for x in seqnos.values() if x < 0 ]:\n            logger.info(\"Thq seqno for some nodes is < 0, can't make a decision about leader. Node seqnums: %s\", seqnos)\n            time.sleep(state_configmap_update_period)\n            continue\n        return max_seqno_nodes\n\n\ndef resolve_leader_node(nodename_array):\n    \"\"\"From the given nodename array, determine which node is the leader\n    by choosing the node which has a hostname with the lowest number at\n    the end of it. If by chance there are two nodes with the same number\n    then the first one encountered will be chosen.\"\"\"\n    logger.info(\"Returning the node with the lowest hostname\")\n    lowest = sys.maxsize\n    leader = nodename_array[0]\n    for nodename in nodename_array:\n        nodenum = int(nodename[nodename.rindex('-') + 1:])\n        logger.info(\"Nodename %s has nodenum %d\", nodename, nodenum)\n        if nodenum < lowest:\n            lowest = nodenum\n            leader = nodename\n    logger.info(\"Resolved leader is %s\", leader)\n    return leader\n\n\ndef check_if_i_lead():\n    \"\"\"Check on full restart of cluster if this node should lead the cluster\n    reformation.\"\"\"\n    logger.info(\"Checking to see if I lead the cluster for reboot\")\n    # as we sample on the update period - we sample for a full cluster\n    # leader election period as a simplistic way of ensureing nodes are\n    # reliably checking in following full restart of cluster.\n    count = cluster_leader_ttl / state_configmap_update_period\n    counter = 0\n    while counter < count:\n        if check_if_cluster_data_is_fresh():\n            counter += 1\n        else:\n            counter = 0\n        time.sleep(state_configmap_update_period)\n        logger.info(\n            \"Cluster info has been uptodate {0} times out of the required \"\n            \"{1}\".format(counter, count))\n    max_seqno_nodes = get_nodes_with_highest_seqno()\n    leader_node = resolve_leader_node(max_seqno_nodes)\n    if (local_hostname == leader_node and not check_for_active_nodes()\n            and get_cluster_state() == 'live'):\n        logger.info(\"I lead the cluster. Setting cluster state to reboot.\")\n        set_configmap_annotation(\n            key='openstackhelm.openstack.org/cluster.state', value='reboot')\n        set_configmap_annotation(\n            key='openstackhelm.openstack.org/reboot.node', value=local_hostname)\n        return True\n    elif local_hostname == leader_node:\n        logger.info(\"The cluster is already rebooting\")\n        return False\n    else:\n        logger.info(\"{0} leads the cluster\".format(leader_node))\n        return False\n\n\ndef monitor_cluster(stop_event):\n    \"\"\"Function to kick off grastate configmap updating thread\"\"\"\n    while True:\n        if stop_event.is_set():\n            logger.info(\"Stopped monitor_cluster thread\")\n            break\n        try:\n            update_grastate_configmap()\n        except Exception as error:\n            logger.error(\"Error updating grastate configmap: {0}\".format(error))\n        time.sleep(state_configmap_update_period)\n\n# Stop event\nstop_event = threading.Event()\n\n# Setup the thread for the cluster monitor\nmonitor_cluster_thread = threading.Thread(target=monitor_cluster, args=(stop_event,))\nmonitor_cluster_thread.daemon = True\n\n\ndef launch_cluster_monitor():\n    \"\"\"Launch grastate configmap updating thread\"\"\"\n    if not monitor_cluster_thread.is_alive():\n        monitor_cluster_thread.start()\n\n\ndef leader_election(stop_event):\n    \"\"\"Function to kick off leader election thread\"\"\"\n    while True:\n        if stop_event.is_set():\n            logger.info(\"Stopped leader_election thread\")\n            break\n        try:\n            deadmans_leader_election()\n        except Exception as error:\n            logger.error(\"Error electing leader: {0}\".format(error))\n        time.sleep(cluster_leader_ttl / 2)\n\n\n# Setup the thread for the leader election\nleader_election_thread = threading.Thread(target=leader_election, args=(stop_event,))\nleader_election_thread.daemon = True\n\n\ndef launch_leader_election():\n    \"\"\"Launch leader election thread\"\"\"\n    if not leader_election_thread.is_alive():\n        leader_election_thread.start()\n\n\ndef run_mysqld(cluster='existing'):\n    \"\"\"Launch the mysqld instance for the pod. This will also run mysql upgrade\n    if we are the 1st replica, and the rest of the cluster is already running.\n    This senario will be triggerd either following a rolling update, as this\n    works in reverse order for statefulset. Or restart of the 1st instance, in\n    which case the comand should be a no-op.\n\n    Keyword arguments:\n    cluster -- whether we going to form a cluster 'new' or joining an existing\n               cluster 'existing' (default 'existing')\n    \"\"\"\n    stop_mysqld()\n    mysqld_write_cluster_conf(mode='run')\n    launch_leader_election()\n    launch_cluster_monitor()\n    mysqld_cmd = [MYSQL_BINARY_NAME, '--user=mysql']\n    if cluster == 'new':\n        mysqld_cmd.append('--wsrep-new-cluster')\n\n    mysql_data_dir = '/var/lib/mysql'\n    db_test_dir = \"{0}/mysql\".format(mysql_data_dir)\n    if os.path.isdir(db_test_dir):\n        logger.info(\"Setting the admin passwords to the current value and upgrade mysql if needed\")\n        if not mysql_dbaudit_username:\n            template = (\n                \"CREATE OR REPLACE USER '{0}'@'%' IDENTIFIED BY \\'{1}\\' ;\\n\"\n                \"GRANT ALL ON *.* TO '{0}'@'%' {4} WITH GRANT OPTION ;\\n\"\n                \"CREATE OR REPLACE USER '{2}'@'127.0.0.1' IDENTIFIED BY '{3}' ;\\n\"\n                \"GRANT PROCESS, RELOAD, LOCK TABLES, REPLICATION CLIENT ON *.* TO '{2}'@'127.0.0.1' ;\\n\"\n                \"FLUSH PRIVILEGES ;\".format(mysql_dbadmin_username, mysql_dbadmin_password,\n                                    mysql_dbsst_username, mysql_dbsst_password,\n                                    mysql_x509))\n        else:\n            template = (\n                \"CREATE OR REPLACE USER '{0}'@'%' IDENTIFIED BY \\'{1}\\' ;\\n\"\n                \"GRANT ALL ON *.* TO '{0}'@'%' {6} WITH GRANT OPTION ;\\n\"\n                \"CREATE OR REPLACE USER '{2}'@'127.0.0.1' IDENTIFIED BY '{3}' ;\\n\"\n                \"GRANT PROCESS, RELOAD, LOCK TABLES, REPLICATION CLIENT ON *.* TO '{2}'@'127.0.0.1' ;\\n\"\n                \"CREATE OR REPLACE USER '{4}'@'%' IDENTIFIED BY '{5}' ;\\n\"\n                \"GRANT SELECT ON *.* TO '{4}'@'%' {6};\\n\"\n                \"FLUSH PRIVILEGES ;\".format(mysql_dbadmin_username, mysql_dbadmin_password,\n                                    mysql_dbsst_username, mysql_dbsst_password,\n                                    mysql_dbaudit_username, mysql_dbaudit_password,\n                                    mysql_x509))\n        bootstrap_sql_file = tempfile.NamedTemporaryFile(suffix='.sql').name\n        with open(bootstrap_sql_file, 'w') as f:\n            f.write(template)\n            f.close()\n        run_cmd_with_logging_thread = threading.Thread(target=run_cmd_with_logging, args=([\n            MYSQL_BINARY_NAME, '--bind-address=127.0.0.1', '--wsrep-on=false',\n            \"--init-file={0}\".format(bootstrap_sql_file)\n        ], logger))\n        run_cmd_with_logging_thread.start()\n        wait_mysql_status()\n        logger.info(\"Upgrading local mysql instance\")\n        upgrade_cmd=['mariadb-upgrade', '--skip-write-binlog',\n                     \"--user={0}\".format(mysql_dbadmin_username),\n                     \"--password={0}\".format(mysql_dbadmin_password)]\n        if mysql_x509:\n            upgrade_cmd.extend(MYSQL_SSL_CMD_OPTS)\n        upgrade_res = run_cmd_with_logging(upgrade_cmd, logger)\n        if upgrade_res != 0:\n            raise Exception('Mysql upgrade failed, cannot proceed')\n        stop_mysqld()\n        os.remove(bootstrap_sql_file)\n    else:\n        logger.info(\n            \"This is a fresh node joining the cluster for the 1st time, not attempting to set admin passwords or upgrading\"\n        )\n    logger.info(\"Launching MariaDB\")\n    run_cmd_with_logging(mysqld_cmd, logger)\n\n\ndef mysqld_reboot():\n    \"\"\"Reboot a mysqld cluster.\"\"\"\n    declare_myself_cluster_leader()\n    set_grastate_val(key='safe_to_bootstrap', value='1')\n    run_mysqld(cluster='new')\n\n\ndef sigterm_shutdown(x, y):\n    \"\"\"Shutdown the instance of mysqld on shutdown signal.\"\"\"\n    logger.info(\"Got a sigterm from the container runtime, time to go.\")\n    stop_event.set()\n    stop_mysqld()\n    monitor_cluster_thread.join()\n    leader_election_thread.join()\n    sys.exit(0)\n\n\n# Register the signal to the handler\nsignal.signal(signal.SIGTERM, sigterm_shutdown)\n\n# Main logic loop\nif get_cluster_state() == 'new':\n    leader_node = get_configmap_value(\n        type='annotation', key='openstackhelm.openstack.org/leader.node')\n    if leader_node == local_hostname:\n        set_configmap_annotation(\n            key='openstackhelm.openstack.org/cluster.state', value='init')\n        declare_myself_cluster_leader()\n        launch_leader_election()\n        mysqld_bootstrap()\n        update_grastate_configmap()\n        set_configmap_annotation(\n            key='openstackhelm.openstack.org/cluster.state', value='live')\n        run_mysqld(cluster='new')\n    else:\n        logger.info(\"Waiting for cluster to start running\")\n        while not get_cluster_state() == 'live':\n            time.sleep(default_sleep)\n        while not check_for_active_nodes():\n            time.sleep(default_sleep)\n        launch_leader_election()\n        run_mysqld()\nelif get_cluster_state() == 'init':\n    logger.info(\"Waiting for cluster to start running\")\n    while not get_cluster_state() == 'live':\n        time.sleep(default_sleep)\n    while not check_for_active_nodes():\n        time.sleep(default_sleep)\n    launch_leader_election()\n    run_mysqld()\nelif get_cluster_state() == 'live':\n    logger.info(\"Cluster has been running starting restore/rejoin\")\n    if not int(mariadb_replicas) > 1:\n        logger.info(\n            \"There is only a single node in this cluster, we are good to go\")\n        update_grastate_on_restart()\n        mysqld_reboot()\n    else:\n        if check_for_active_nodes():\n            logger.info(\n                \"There are currently running nodes in the cluster, we can \"\n                \"join them\")\n            run_mysqld()\n        else:\n            logger.info(\"This cluster has lost all running nodes, we need to \"\n                        \"determine the new lead node\")\n            update_grastate_on_restart()\n            launch_leader_election()\n            launch_cluster_monitor()\n            if check_if_i_lead():\n                logger.info(\"I won the ability to reboot the cluster\")\n                mysqld_reboot()\n            else:\n                logger.info(\n                    \"Waiting for the lead node to come online before joining \"\n                    \"it\")\n                while not check_for_active_nodes():\n                    time.sleep(default_sleep)\n                set_configmap_annotation(\n                    key='openstackhelm.openstack.org/cluster.state', value='live')\n                set_configmap_annotation(\n                    key='openstackhelm.openstack.org/reboot.node', value='')\n                run_mysqld()\nelif get_cluster_state() == 'reboot':\n    reboot_node = get_configmap_value(\n        type='annotation', key='openstackhelm.openstack.org/reboot.node')\n    if reboot_node == local_hostname:\n        logger.info(\n        \"Cluster reboot procedure wasn`t finished. Trying again.\")\n        update_grastate_on_restart()\n        launch_leader_election()\n        launch_cluster_monitor()\n        mysqld_reboot()\n    else:\n        logger.info(\n            \"Waiting for the lead node to come online before joining \"\n            \"it\")\n        update_grastate_on_restart()\n        launch_leader_election()\n        launch_cluster_monitor()\n        while not check_for_active_nodes():\n            time.sleep(default_sleep)\n        set_configmap_annotation(\n            key='openstackhelm.openstack.org/cluster.state', value='live')\n        run_mysqld()\nelse:\n    logger.critical(\"Dont understand cluster state, exiting with error status\")\n    sys.exit(1)\n"
  },
  {
    "path": "mariadb/templates/bin/_start_mariadb_verify_server.sh.tpl",
    "content": "#!/bin/bash -ex\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nlog () {\n  msg_default=\"Need some text to log\"\n  level_default=\"INFO\"\n  component_default=\"Mariadb Backup Verifier\"\n\n  msg=${1:-$msg_default}\n  level=${2:-$level_default}\n  component=${3:-\"$component_default\"}\n\n  echo \"$(date +'%Y-%m-%d %H:%M:%S,%3N') - ${component} - ${level} - ${msg}\"\n}\n\nlog \"Starting Mariadb server for backup verification...\"\nmariadb-install-db --user=nobody --ldata=/var/lib/mysql >/dev/null 2>&1\nMYSQL_ALLOW_EMPTY_PASSWORD=1 mariadbd --user=nobody --verbose >/dev/null 2>&1\n"
  },
  {
    "path": "mariadb/templates/bin/_test.sh.tpl",
    "content": "#!/bin/bash\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nrm -f /tmp/test-success\n\nmariadb-slap \\\n  --defaults-file=/etc/mysql/test-params.cnf \\\n  {{ include \"helm-toolkit.utils.joinListWithSpace\" $.Values.conf.tests.params }} -vv \\\n  --post-system=\"touch /tmp/test-success\"\n\nif ! [ -f /tmp/test-success ]; then\n  exit 1\nfi\n"
  },
  {
    "path": "mariadb/templates/certificates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.certificates -}}\n{{ dict \"envAll\" . \"service\" \"oslo_db\" \"type\" \"default\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end -}}\n"
  },
  {
    "path": "mariadb/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n{{ if eq .Values.endpoints.oslo_db.auth.admin.username .Values.endpoints.oslo_db.auth.sst.username }}\n{{ fail \"the DB admin username should not match the sst user username\" }}\n{{ end }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: mariadb-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n  health.sh: |\n{{ tuple \"bin/_health.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  start.py: |\n{{ tuple \"bin/_start.py.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  test.sh: |\n{{ tuple \"bin/_test.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- if .Values.conf.backup.enabled }}\n  backup_mariadb.sh: |\n{{ tuple \"bin/_backup_mariadb.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  start_verification_server.sh: |\n{{ tuple \"bin/_start_mariadb_verify_server.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  restore_mariadb.sh: |\n{{ tuple \"bin/_restore_mariadb.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  backup_main.sh: |\n{{ include \"helm-toolkit.scripts.db-backup-restore.backup_main\" . | indent 4 }}\n  restore_main.sh: |\n{{ include \"helm-toolkit.scripts.db-backup-restore.restore_main\" . | indent 4 }}\n{{- end }}\n{{- if .Values.manifests.job_ks_user }}\n  ks-user.sh: |\n{{ include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n{{- end }}\n{{- if .Values.manifests.deployment_controller }}\n  mariadb_controller.py: |\n{{ tuple \"bin/_mariadb_controller.py.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  mariadb-wait-for-cluster.py: |\n{{ tuple \"bin/_mariadb-wait-for-cluster.py.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\" );\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: mariadb-etc\ndata:\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" ( index $envAll.Values.conf.database \"my\" ) \"key\" \"my.cnf\" ) | indent 2 }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" ( index $envAll.Values.conf.database \"00_base\" ) \"key\" \"00-base.cnf\" ) | indent 2 }}\n{{- if $envAll.Values.conf.database.config_override }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" ( index $envAll.Values.conf.database \"config_override\" ) \"key\" \"20-override.cnf\" ) | indent 2 }}\n{{- end }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" ( index $envAll.Values.conf.database \"99_force\" ) \"key\" \"99-force.cnf\" ) | indent 2 }}\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/configmap-services-tcp.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_services_tcp }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: mariadb-services-tcp\ndata:\n  {{ tuple \"oslo_db\" \"internal\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}: \"{{ .Release.Namespace }}/{{ tuple \"oslo_db\" \"direct\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}:{{ tuple \"oslo_db\" \"direct\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\"\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/cron-job-backup-mariadb.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.cron_job_mariadb_backup }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"mariadb-backup\" }}\n{{- $failoverUserClass := .Values.conf.backup.remote_backup.failover_user_class }}\n{{ tuple $envAll \"mariadb_backup\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: mariadb-backup\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"mariadb-backup\" \"backup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  schedule: {{ .Values.jobs.mariadb_backup.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.mariadb_backup.history.success }}\n  failedJobsHistoryLimit: {{ .Values.jobs.mariadb_backup.history.failed }}\n  concurrencyPolicy: Forbid\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"mariadb-backup\" \"backup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" \"mariadb-backup\" \"containerNames\" (list \"init\" \"backup-perms\" \"mariadb-backup\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{- if .Values.jobs.mariadb_backup.backoffLimit }}\n      backoffLimit: {{ .Values.jobs.mariadb_backup.backoffLimit }}\n{{- end }}\n{{- if .Values.jobs.mariadb_backup.activeDeadlineSeconds }}\n      activeDeadlineSeconds: {{ .Values.jobs.mariadb_backup.activeDeadlineSeconds }}\n{{- end }}\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"mariadb-backup\" \"backup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n        spec:\n{{ dict \"envAll\" $envAll \"application\" \"mariadb_backup\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 10 }}\n          serviceAccountName: {{ $serviceAccountName }}\n          restartPolicy: OnFailure\n          shareProcessNamespace: true\n{{- if $envAll.Values.pod.tolerations.mariadb.enabled }}\n{{ tuple $envAll \"mariadb\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 10 }}\n{{- end }}\n{{- if $envAll.Values.pod.affinity }}\n{{- if $envAll.Values.pod.affinity.mariadb_backup }}\n          affinity:\n{{  index $envAll.Values.pod.affinity \"mariadb_backup\"  | toYaml | indent 12}}\n{{- end }}\n{{- end }}\n          nodeSelector:\n            {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n          initContainers:\n{{ tuple $envAll \"mariadb_backup\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 12 }}\n            - name: backup-perms\n{{ tuple $envAll \"mariadb_backup\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.mariadb_backup | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{ dict \"envAll\" $envAll \"application\" \"mariadb_backup\" \"container\" \"backup_perms\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 14 }}\n              command:\n                - chown\n                - -R\n                - \"65534:65534\"\n                - $(MARIADB_BACKUP_BASE_DIR)\n              env:\n                - name: MARIADB_BACKUP_BASE_DIR\n                  value: {{ .Values.conf.backup.base_path | quote }}\n              volumeMounts:\n                - mountPath: /tmp\n                  name: pod-tmp\n                - mountPath: {{ .Values.conf.backup.base_path }}\n                  name: mariadb-backup-dir\n            - name: verify-perms\n{{ tuple $envAll \"mariadb_backup\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.mariadb_backup | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{ dict \"envAll\" $envAll \"application\" \"mariadb_backup\" \"container\" \"verify_perms\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 14 }}\n              command:\n                - chown\n                - -R\n                - \"65534:65534\"\n                - /var/lib/mysql\n              volumeMounts:\n                - mountPath: /tmp\n                  name: pod-tmp\n                - mountPath: /var/lib/mysql\n                  name: mysql-data\n          containers:\n            - name: mariadb-backup\n              command:\n                - /bin/sh\n              args:\n                - -c\n                - >-\n                    ( /tmp/start_verification_server.sh ) &\n                    /tmp/backup_mariadb.sh\n              env:\n                - name: MARIADB_BACKUP_BASE_DIR\n                  value: {{ .Values.conf.backup.base_path | quote }}\n                - name: MYSQL_BACKUP_MYSQLDUMP_OPTIONS\n                  value: {{ .Values.conf.backup.mysqldump_options | quote }}\n                - name: MARIADB_LOCAL_BACKUP_DAYS_TO_KEEP\n                  value: {{ .Values.conf.backup.days_to_keep | quote }}\n                - name: MARIADB_POD_NAMESPACE\n                  valueFrom:\n                    fieldRef:\n                      fieldPath: metadata.namespace\n                - name: REMOTE_BACKUP_ENABLED\n                  value: \"{{ .Values.conf.backup.remote_backup.enabled }}\"\n{{- if .Values.conf.backup.remote_backup.enabled }}\n                - name: MARIADB_REMOTE_BACKUP_DAYS_TO_KEEP\n                  value: {{ .Values.conf.backup.remote_backup.days_to_keep | quote }}\n                - name: CONTAINER_NAME\n                  value: {{ .Values.conf.backup.remote_backup.container_name | quote }}\n                - name: STORAGE_POLICY\n                  value: \"{{ .Values.conf.backup.remote_backup.storage_policy }}\"\n                - name: NUMBER_OF_RETRIES_SEND_BACKUP_TO_REMOTE\n                  value: {{ .Values.conf.backup.remote_backup.number_of_retries | quote }}\n                - name: MIN_DELAY_SEND_BACKUP_TO_REMOTE\n                  value: {{ .Values.conf.backup.remote_backup.delay_range.min | quote }}\n                - name: MAX_DELAY_SEND_BACKUP_TO_REMOTE\n                  value: {{ .Values.conf.backup.remote_backup.delay_range.max | quote }}\n                - name: THROTTLE_BACKUPS_ENABLED\n                  value: \"{{ .Values.conf.backup.remote_backup.throttle_backups.enabled }}\"\n                - name: THROTTLE_LIMIT\n                  value: {{ .Values.conf.backup.remote_backup.throttle_backups.sessions_limit | quote }}\n                - name: THROTTLE_LOCK_EXPIRE_AFTER\n                  value: {{ .Values.conf.backup.remote_backup.throttle_backups.lock_expire_after | quote }}\n                - name: THROTTLE_RETRY_AFTER\n                  value: {{ .Values.conf.backup.remote_backup.throttle_backups.retry_after | quote }}\n                - name: THROTTLE_CONTAINER_NAME\n                  value: {{ .Values.conf.backup.remote_backup.throttle_backups.container_name | quote }}\n{{- with $env := dict \"ksUserSecret\" $envAll.Values.secrets.identity.mariadb }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 16 }}\n{{- $failoverIdentityClass := index $envAll.Values.endpoints.identity.auth $failoverUserClass }}\n{{- if $failoverIdentityClass }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_failover_env_vars\" $env | indent 16 }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{ tuple $envAll \"mariadb_backup\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.mariadb_backup | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{ dict \"envAll\" $envAll \"application\" \"mariadb_backup\" \"container\" \"mariadb_backup\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 14 }}\n              volumeMounts:\n                - name: pod-tmp\n                  mountPath: /tmp\n                - mountPath: /tmp/backup_mariadb.sh\n                  name: mariadb-bin\n                  readOnly: true\n                  subPath: backup_mariadb.sh\n                - mountPath: /tmp/backup_main.sh\n                  name: mariadb-bin\n                  readOnly: true\n                  subPath: backup_main.sh\n                - mountPath: {{ .Values.conf.backup.base_path }}\n                  name: mariadb-backup-dir\n                - name: mariadb-secrets\n                  mountPath: /etc/mysql/admin_user.cnf\n                  subPath: admin_user.cnf\n                  readOnly: true\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 16 }}\n                - name: mariadb-bin\n                  mountPath: /tmp/start_verification_server.sh\n                  readOnly: true\n                  subPath: start_verification_server.sh\n                - name: mysql-data\n                  mountPath: /var/lib/mysql\n                - name: var-run\n                  mountPath: /run/mysqld\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: mycnfd\n              emptyDir: {}\n            - name: var-run\n              emptyDir: {}\n            - name: mariadb-etc\n              configMap:\n                name: mariadb-etc\n                defaultMode: 0444\n            - name: mysql-data\n              emptyDir: {}\n            - name: mariadb-secrets\n              secret:\n                secretName: mariadb-secrets\n                defaultMode: 420\n            - configMap:\n                defaultMode: 365\n                name: mariadb-bin\n              name: mariadb-bin\n            {{- if and .Values.volume.backup.enabled  .Values.manifests.pvc_backup }}\n            - name: mariadb-backup-dir\n              persistentVolumeClaim:\n                claimName: mariadb-backup-data\n            {{- else }}\n            - hostPath:\n                path: {{ .Values.conf.backup.base_path }}\n                type: DirectoryOrCreate\n              name: mariadb-backup-dir\n            {{- end }}\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/deployment-controller.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_controller }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"mariadb-controller\" }}\n{{ tuple $envAll \"controller\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $envAll.Release.Name }}-{{ $serviceAccountName }}-pod\n  namespace: {{ $envAll.Release.Namespace }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - pods\n    verbs:\n      - get\n      - list\n  - apiGroups:\n      - \"\"\n    resources:\n      - services\n    verbs:\n      - update\n      - patch\n      - get\n      - list\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $envAll.Release.Name }}-{{ $serviceAccountName }}-pod\n  namespace: {{ $envAll.Release.Namespace }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $envAll.Release.Name }}-{{ $serviceAccountName }}-pod\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\n\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mariadb-controller\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"mariadb\" \"controller\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.controller }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"mariadb\" \"controller\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"mariadb\" \"controller\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"controller\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"mariadb\" \"controller\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n{{ if $envAll.Values.pod.tolerations.mariadb.enabled }}\n{{ tuple $envAll \"mariadb\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.controller.node_selector_key }}: {{ .Values.labels.controller.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"controller\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: controller\n{{ tuple $envAll \"mariadb_controller\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"controller\" \"container\" \"controller\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.controller | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/mariadb_controller.py\n          env:\n{{ include \"helm-toolkit.utils.to_k8s_env_vars\" .Values.pod.env.mariadb_controller | indent 12 }}\n            - name: MARIADB_CONTROLLER_PODS_NAMESPACE\n              value: {{ $envAll.Release.Namespace }}\n            - name: MARIADB_MASTER_SERVICE_NAME\n              value: {{ tuple \"oslo_db\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - mountPath: /tmp/mariadb_controller.py\n              name: mariadb-bin\n              readOnly: true\n              subPath: mariadb_controller.py\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: mariadb-bin\n          configMap:\n            name: mariadb-bin\n            defaultMode: 365\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/exporter-configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.monitoring.prometheus.configmap_bin .Values.monitoring.prometheus.enabled }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: mysql-exporter-bin\ndata:\n  create-mysql-user.sh: |\n{{ tuple \"bin/_prometheus-create-mysql-user.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  mysqld-exporter.sh: |\n{{ tuple \"bin/_prometheus-mysqld-exporter.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/exporter-job-create-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.monitoring.prometheus.job_user_create .Values.monitoring.prometheus.enabled }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"mariadb-exporter-create-sql-user\" }}\n{{ tuple $envAll \"prometheus_create_mysql_user\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: mariadb-exporter-create-sql-user\n  labels:\n{{ tuple $envAll \"prometheus-mysql-exporter\" \"create-sql-user\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  backoffLimit: {{ .Values.jobs.exporter_create_sql_user.backoffLimit }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"prometheus-mysql-exporter\" \"create-sql-user\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"create-sql-user\" \"containerNames\" (list \"init\" \"exporter-create-sql-user\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      shareProcessNamespace: true\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"prometheus_create_mysql_user\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      activeDeadlineSeconds: {{ .Values.jobs.exporter_create_sql_user.activeDeadlineSeconds }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.prometheus_mysql_exporter.node_selector_key }}: {{ .Values.labels.prometheus_mysql_exporter.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"prometheus_create_mysql_user\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: exporter-create-sql-user\n{{ tuple $envAll \"prometheus_create_mysql_user\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"prometheus_create_mysql_user\" \"container\" \"main\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.prometheus_create_mysql_user | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/create-mysql-user.sh\n          env:\n            - name: EXPORTER_USER\n              valueFrom:\n                secretKeyRef:\n                  name: mysql-exporter-secrets\n                  key: EXPORTER_USER\n            - name: EXPORTER_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: mysql-exporter-secrets\n                  key: EXPORTER_PASSWORD\n{{- if $envAll.Values.manifests.certificates }}\n            - name: MARIADB_X509\n              value: \"REQUIRE X509\"\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: mysql-exporter-bin\n              mountPath: /tmp/create-mysql-user.sh\n              subPath: create-mysql-user.sh\n              readOnly: true\n            - name: mariadb-secrets\n              mountPath: /etc/mysql/admin_user.cnf\n              subPath: admin_user.cnf\n              readOnly: true\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: mysql-exporter-bin\n          configMap:\n            name: mysql-exporter-bin\n            defaultMode: 0555\n        - name: mariadb-secrets\n          secret:\n            secretName: mariadb-secrets\n            defaultMode: 0444\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/exporter-secrets-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.monitoring.prometheus.secret_etc .Values.monitoring.prometheus.enabled }}\n{{- $envAll := . }}\n\n{{- $exporter_user := .Values.endpoints.oslo_db.auth.exporter.username }}\n{{- $exporter_password := .Values.endpoints.oslo_db.auth.exporter.password }}\n{{- $db_host := \"localhost\" }}\n{{- $data_source_name := printf \"%s:%s@(%s)/\" $exporter_user $exporter_password $db_host }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: mysql-exporter-secrets\ntype: Opaque\ndata:\n  DATA_SOURCE_NAME: {{ $data_source_name | b64enc }}\n  EXPORTER_USER: {{ .Values.endpoints.oslo_db.auth.exporter.username | b64enc }}\n  EXPORTER_PASSWORD: {{ .Values.endpoints.oslo_db.auth.exporter.password | b64enc }}\n  mysql_user.cnf: {{ tuple \"secrets/_prometheus-exporter_user.cnf.tpl\" . | include \"helm-toolkit.utils.template\" | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "mariadb/templates/job-cluster-wait.yaml",
    "content": "{{/*\nCopyright 2019 Mirantis inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_cluster_wait }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := print .Release.Name \"-cluster-wait\" }}\n{{ tuple $envAll \"cluster_wait\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $envAll.Release.Name }}-{{ $serviceAccountName }}-pod\n  namespace: {{ $envAll.Release.Namespace }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - configmaps\n    verbs:\n      - update\n      - patch\n      - get\n      - list\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $envAll.Release.Name }}-{{ $serviceAccountName }}-pod\n  namespace: {{ $envAll.Release.Namespace }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $envAll.Release.Name }}-{{ $serviceAccountName }}-pod\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: \"{{.Release.Name}}-cluster-wait\"\n  labels:\n{{ tuple $envAll \"mariadb\" \"cluster-wait\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  backoffLimit: {{ .Values.jobs.cluster_wait.clusterCheckRetries }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"mariadb\" \"cluster-wait\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"cluster_wait\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"cluster_wait\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: {{.Release.Name}}-mariadb-cluster-wait\n{{ tuple $envAll \"mariadb_scripted_test\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cluster_wait\" \"container\" \"mariadb_cluster_wait\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: MARIADB_HOST\n              value: {{ tuple \"oslo_db\" \"internal\" $envAll | include \"helm-toolkit.endpoints.endpoint_host_lookup\" }}\n            - name: MARIADB_REPLICAS\n              value: {{ .Values.pod.replicas.server | quote }}\n            - name: MARIADB_CLUSTER_CHECK_WAIT\n              value: {{ .Values.jobs.cluster_wait.clusterCheckWait | quote }}\n            - name: MARIADB_CLUSTER_STABILITY_COUNT\n              value: {{ .Values.jobs.cluster_wait.clusterStabilityCount | quote }}\n            - name: MARIADB_CLUSTER_STABILITY_WAIT\n              value: {{ .Values.jobs.cluster_wait.clusterStabilityWait | quote }}\n            - name: MARIADB_CLUSTER_STATE_CONFIGMAP\n              value: {{ printf \"%s-%s\" .Release.Name \"mariadb-state\" | quote }}\n            - name: MARIADB_CLUSTER_STATE_CONFIGMAP_NAMESPACE\n              value: {{ $envAll.Release.Namespace }}\n            - name: MARIADB_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: mariadb-dbadmin-password\n                  key: MYSQL_DBADMIN_PASSWORD\n          command:\n            - /tmp/mariadb-wait-for-cluster.py\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: mariadb-bin\n              mountPath: /tmp/mariadb-wait-for-cluster.py\n              subPath: mariadb-wait-for-cluster.py\n              readOnly: true\n            - name: mariadb-secrets\n              mountPath: /etc/mysql/admin_user.cnf\n              subPath: admin_user.cnf\n              readOnly: true\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: mariadb-bin\n          configMap:\n            name: mariadb-bin\n            defaultMode: 0555\n        - name: mariadb-secrets\n          secret:\n            secretName: mariadb-secrets\n            defaultMode: 0444\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"mariadb\" -}}\n{{- if .Values.pod.tolerations.mariadb.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $backoffLimit := .Values.jobs.ks_user.backoffLimit }}\n{{- $activeDeadlineSeconds := .Values.jobs.ks_user.activeDeadlineSeconds }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"mariadb\" \"configMapBin\" \"mariadb-bin\" \"backoffLimit\" $backoffLimit \"activeDeadlineSeconds\" $activeDeadlineSeconds -}}\n{{- if .Values.pod.tolerations.mariadb.enabled -}}\n{{- $_ := set $ksUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/mariadb-backup-pvc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.volume.backup.enabled .Values.manifests.pvc_backup }}\n---\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: mariadb-backup-data\nspec:\n  accessModes: [ \"ReadWriteOnce\" ]\n  resources:\n    requests:\n      storage: {{ .Values.volume.backup.size }}\n  storageClassName: {{ .Values.volume.backup.class_name }}\n{{- end }}\n\n"
  },
  {
    "path": "mariadb/templates/network_policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"mariadb\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "mariadb/templates/pdb-mariadb.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_server }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: mariadb-server\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.mariadb.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"mariadb\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/pod-test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if .Values.manifests.pod_test }}\n{{- $envAll := . }}\n{{- $dependencies := .Values.dependencies.static.tests }}\n\n{{- $serviceAccountName := print .deployment_name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: \"{{.deployment_name}}-test\"\n  labels:\n{{ tuple $envAll \"mariadb\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"mariadb-test\" \"containerNames\" (list \"init\" \"mariadb-test\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n  shareProcessNamespace: true\n  serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"tests\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 2 }}\n{{ if $envAll.Values.pod.tolerations.mariadb.enabled }}\n{{ tuple $envAll \"mariadb\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 2 }}\n{{ end }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n  restartPolicy: Never\n  initContainers:\n{{ tuple $envAll \"tests\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n  containers:\n    - name: mariadb-test\n{{ dict \"envAll\" $envAll \"application\" \"tests\" \"container\" \"test\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n{{ tuple $envAll \"scripted_test\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n      command:\n        - /tmp/test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: mariadb-bin\n          mountPath: /tmp/test.sh\n          subPath: test.sh\n          readOnly: true\n        - name: mariadb-secrets\n          mountPath: /etc/mysql/test-params.cnf\n          {{ if eq $envAll.Values.conf.tests.endpoint \"internal\" }}\n          subPath: admin_user_internal.cnf\n          {{ else if eq $envAll.Values.conf.tests.endpoint \"direct\" }}\n          subPath: admin_user.cnf\n          {{ else }}\n          {{ fail \"Either 'direct' or 'internal' should be specified for .Values.conf.tests.endpoint\" }}\n          {{ end }}\n          readOnly: true\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 8 }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: mariadb-bin\n      configMap:\n        name: mariadb-bin\n        defaultMode: 0555\n    - name: mariadb-secrets\n      secret:\n        secretName: mariadb-secrets\n        defaultMode: 0444\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/secret-backup-restore.yaml",
    "content": "{{/*\nThis manifest results a secret being created which has the key information\nneeded for backing up and restoring the Mariadb databases.\n*/}}\n\n{{- if and .Values.conf.backup.enabled .Values.manifests.secret_backup_restore }}\n\n{{- $envAll := . }}\n{{- $userClass := \"backup_restore\" }}\n{{- $secretName := index $envAll.Values.secrets.mariadb $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n  BACKUP_ENABLED: {{ $envAll.Values.conf.backup.enabled | quote | b64enc }}\n  BACKUP_BASE_PATH: {{ $envAll.Values.conf.backup.base_path | b64enc }}\n  LOCAL_DAYS_TO_KEEP: {{ $envAll.Values.conf.backup.days_to_keep | quote | b64enc }}\n  MYSQLDUMP_OPTIONS: {{ $envAll.Values.conf.backup.mysqldump_options | b64enc }}\n  REMOTE_BACKUP_ENABLED: {{ $envAll.Values.conf.backup.remote_backup.enabled | quote | b64enc }}\n  REMOTE_BACKUP_CONTAINER: {{ $envAll.Values.conf.backup.remote_backup.container_name | b64enc }}\n  REMOTE_BACKUP_DAYS_TO_KEEP: {{ $envAll.Values.conf.backup.remote_backup.days_to_keep | quote | b64enc }}\n  REMOTE_BACKUP_STORAGE_POLICY: {{ $envAll.Values.conf.backup.remote_backup.storage_policy | b64enc }}\n  REMOTE_BACKUP_RETRIES: {{ $envAll.Values.conf.backup.remote_backup.number_of_retries | quote | b64enc }}\n  REMOTE_BACKUP_SEND_DELAY_MIN: {{ $envAll.Values.conf.backup.remote_backup.delay_range.min | quote | b64enc }}\n  REMOTE_BACKUP_SEND_DELAY_MAX: {{ $envAll.Values.conf.backup.remote_backup.delay_range.max | quote | b64enc }}\n  THROTTLE_BACKUPS_ENABLED: {{ $envAll.Values.conf.backup.remote_backup.throttle_backups.enabled | quote | b64enc }}\n  THROTTLE_LIMIT: {{ $envAll.Values.conf.backup.remote_backup.throttle_backups.sessions_limit | quote | b64enc }}\n  THROTTLE_LOCK_EXPIRE_AFTER: {{ $envAll.Values.conf.backup.remote_backup.throttle_backups.lock_expire_after | quote | b64enc }}\n  THROTTLE_RETRY_AFTER: {{ $envAll.Values.conf.backup.remote_backup.throttle_backups.retry_after | quote | b64enc }}\n  THROTTLE_CONTAINER_NAME: {{ $envAll.Values.conf.backup.remote_backup.throttle_backups.container_name | quote | b64enc }}\n...\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/secret-dbadmin-password.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_dbadmin_password }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: mariadb-dbadmin-password\ntype: Opaque\ndata:\n  MYSQL_DBADMIN_PASSWORD: {{ .Values.endpoints.oslo_db.auth.admin.password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/secret-dbaudit-password.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_dbaudit_password }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: mariadb-dbaudit-password\ntype: Opaque\ndata:\n  MYSQL_DBAUDIT_PASSWORD: {{ .Values.endpoints.oslo_db.auth.audit.password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/secret-rgw.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\nThis manifest results in two secrets being created:\n  1) Keystone \"mariadb\" secret, which is needed to access the cluster\n     (remote or same cluster) for storing mariadb backups. If the\n     cluster is remote, the auth_url would be non-null.\n  2) Keystone \"admin\" secret, which is needed to create the\n     \"mariadb\" keystone account mentioned above. This may not\n     be needed if the account is in a remote cluster (auth_url is non-null\n     in that case).\n*/}}\n\n{{- if .Values.conf.backup.remote_backup.enabled }}\n\n{{- $envAll := . }}\n{{- $userClass := .Values.conf.backup.remote_backup.primary_user_class }}\n{{- $failoverUserClass := .Values.conf.backup.remote_backup.failover_user_class }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n{{- $identityClass := index .Values.endpoints.identity.auth $userClass }}\n{{- if $identityClass.auth_url }}\n  OS_AUTH_URL: {{ $identityClass.auth_url | b64enc }}\n{{- else }}\n  OS_AUTH_URL: {{ tuple \"identity\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n  OS_REGION_NAME: {{ $identityClass.region_name | b64enc }}\n  OS_INTERFACE: {{ $identityClass.interface | default \"internal\" | b64enc }}\n  OS_PROJECT_DOMAIN_NAME: {{ $identityClass.project_domain_name | b64enc }}\n  OS_PROJECT_NAME: {{ $identityClass.project_name | b64enc }}\n  OS_USER_DOMAIN_NAME: {{ $identityClass.user_domain_name | b64enc }}\n  OS_USERNAME: {{ $identityClass.username | b64enc }}\n  OS_PASSWORD: {{ $identityClass.password | b64enc }}\n  OS_DEFAULT_DOMAIN: {{ $identityClass.default_domain_id | default \"default\" | b64enc }}\n\n{{- $failoverIdentityClass := index .Values.endpoints.identity.auth $failoverUserClass }}\n{{- if $failoverIdentityClass }}\n{{- if $failoverIdentityClass.auth_url }}\n  OS_AUTH_URL_FAILOVER: {{ $failoverIdentityClass.auth_url | b64enc }}\n{{- else }}\n  OS_AUTH_URL_FAILOVER: {{ tuple \"identity\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n  OS_REGION_NAME_FAILOVER: {{ $failoverIdentityClass.region_name | b64enc }}\n  OS_INTERFACE_FAILOVER: {{ $failoverIdentityClass.interface | default \"internal\" | b64enc }}\n  OS_PROJECT_DOMAIN_NAME_FAILOVER: {{ $failoverIdentityClass.project_domain_name | b64enc }}\n  OS_PROJECT_NAME_FAILOVER: {{ $failoverIdentityClass.project_name | b64enc }}\n  OS_USER_DOMAIN_NAME_FAILOVER: {{ $failoverIdentityClass.user_domain_name | b64enc }}\n  OS_USERNAME_FAILOVER: {{ $failoverIdentityClass.username | b64enc }}\n  OS_PASSWORD_FAILOVER: {{ $failoverIdentityClass.password | b64enc }}\n  OS_DEFAULT_DOMAIN_FAILOVER: {{ $failoverIdentityClass.default_domain_id | default \"default\" | b64enc }}\n{{- end }}\n...\n{{- if .Values.manifests.job_ks_user }}\n{{- $userClass := \"admin\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n{{- $identityClass := index .Values.endpoints.identity.auth $userClass }}\n{{- if $identityClass.auth_url }}\n  OS_AUTH_URL: {{ $identityClass.auth_url | b64enc }}\n{{- else }}\n  OS_AUTH_URL: {{ tuple \"identity\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n  OS_REGION_NAME: {{ $identityClass.region_name | b64enc }}\n  OS_INTERFACE: {{ $identityClass.interface | default \"internal\" | b64enc }}\n  OS_PROJECT_DOMAIN_NAME: {{ $identityClass.project_domain_name | b64enc }}\n  OS_PROJECT_NAME: {{ $identityClass.project_name | b64enc }}\n  OS_USER_DOMAIN_NAME: {{ $identityClass.user_domain_name | b64enc }}\n  OS_USERNAME: {{ $identityClass.username | b64enc }}\n  OS_PASSWORD: {{ $identityClass.password | b64enc }}\n  OS_DEFAULT_DOMAIN: {{ $identityClass.default_domain_id | default \"default\" | b64enc }}\n...\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/secret-sst-password.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_sst_password }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: mariadb-dbsst-password\ntype: Opaque\ndata:\n  MYSQL_DBSST_PASSWORD: {{ .Values.endpoints.oslo_db.auth.sst.password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/secrets/_admin_user.cnf.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n[client]\nuser = {{ .Values.endpoints.oslo_db.auth.admin.username }}\npassword = {{ .Values.endpoints.oslo_db.auth.admin.password }}\nhost = {{ tuple \"oslo_db\" \"direct\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nport = {{ tuple \"oslo_db\" \"direct\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- if .Values.manifests.certificates }}\nssl-ca = /etc/mysql/certs/ca.crt\nssl-key = /etc/mysql/certs/tls.key\nssl-cert = /etc/mysql/certs/tls.crt\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/secrets/_admin_user_internal.cnf.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n[client]\nuser = {{ .Values.endpoints.oslo_db.auth.admin.username }}\npassword = {{ .Values.endpoints.oslo_db.auth.admin.password }}\nhost = {{ tuple \"oslo_db\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\nport = {{ tuple \"oslo_db\" \"internal\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- if .Values.manifests.certificates }}\nssl-ca = /etc/mysql/certs/ca.crt\nssl-key = /etc/mysql/certs/tls.key\nssl-cert = /etc/mysql/certs/tls.crt\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/secrets/_prometheus-exporter_user.cnf.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n[client]\nuser = {{ .Values.endpoints.oslo_db.auth.exporter.username }}\npassword = {{ .Values.endpoints.oslo_db.auth.exporter.password }}\nhost = localhost\nport = {{ tuple \"oslo_db\" \"direct\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- if .Values.manifests.certificates }}\nssl-ca = /etc/mysql/certs/ca.crt\nssl-key = /etc/mysql/certs/tls.key\nssl-cert = /etc/mysql/certs/tls.crt\n{{- end }}"
  },
  {
    "path": "mariadb/templates/secrets-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_etc }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: mariadb-secrets\ntype: Opaque\ndata:\n  admin_user.cnf: {{ tuple \"secrets/_admin_user.cnf.tpl\" . | include \"helm-toolkit.utils.template\"  | b64enc }}\n  admin_user_internal.cnf: {{ tuple \"secrets/_admin_user_internal.cnf.tpl\" . | include \"helm-toolkit.utils.template\"  | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/service-discovery.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_discovery }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"oslo_db\" \"discovery\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: mysql\n      port: {{ tuple \"oslo_db\" \"direct\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    - name: wsrep\n      port: {{ tuple \"oslo_db\" \"direct\" \"wsrep\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    - name: ist\n      port: {{ tuple \"oslo_db\" \"direct\" \"ist\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    - name: sst\n      port: {{ tuple \"oslo_db\" \"direct\" \"sst\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  clusterIP: None\n  publishNotReadyAddresses: false\n  selector:\n{{ tuple $envAll \"mariadb\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{ .Values.network.mariadb_discovery | include \"helm-toolkit.snippets.service_params\" | indent 2 }}\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/service-master.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_master }}\n\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"oslo_db\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: mysql\n      port: {{ tuple \"oslo_db\" \"direct\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"mariadb\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{ .Values.network.mariadb_master | include \"helm-toolkit.snippets.service_params\" | indent 2 }}\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"oslo_db\" \"direct\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: mysql\n      port: {{ tuple \"oslo_db\" \"direct\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"mariadb\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{ .Values.network.mariadb | include \"helm-toolkit.snippets.service_params\" | indent 2 }}\n{{- end }}\n"
  },
  {
    "path": "mariadb/templates/statefulset.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"mariadbReadinessProbe\" }}\nexec:\n  command:\n    - /tmp/health.sh\n    - -t\n    - readiness\n    - -d\n    - {{ .Values.pod.probes.server.mariadb.readiness.disk_usage_percent | quote }}\n{{- end }}\n{{- define \"mariadbLivenessProbe\" }}\nexec:\n  command:\n    - /tmp/health.sh\n    - -t\n    - liveness\n{{- end }}\n{{- define \"exporterProbeTemplate\" }}\nhttpGet:\n    path: /metrics\n    port: {{ tuple \"prometheus_mysql_exporter\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if .Values.manifests.statefulset }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := printf \"%s-%s\" .deployment_name \"mariadb\" }}\n{{ tuple $envAll \"mariadb\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\n  namespace: {{ $envAll.Release.Namespace }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - configmaps\n    verbs:\n      - create\n  - apiGroups:\n      - \"\"\n    resourceNames:\n      - {{ printf \"%s-%s\" .deployment_name \"mariadb-state\" | quote }}\n    resources:\n      - configmaps\n    verbs:\n      - get\n      - patch\n  - apiGroups:\n      - \"\"\n    resourceNames:\n      - {{ tuple \"oslo_db\" \"direct\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n    resources:\n      - endpoints\n    verbs:\n      - get\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\n  namespace: {{ $envAll.Release.Namespace }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  # NOTE(portdirect): the statefulset name must match the POD_NAME_PREFIX env var for discovery to work\n  name: {{ tuple \"oslo_db\" \"direct\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n    configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    mariadb-dbadmin-password-hash: {{ tuple \"secret-dbadmin-password.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    mariadb-sst-password-hash: {{ tuple \"secret-dbadmin-password.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    configmap-bin-exporter-hash: {{ tuple \"exporter-configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    secrets-etc-exporter-hash: {{ tuple \"exporter-secrets-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n  labels:\n{{ tuple $envAll \"mariadb\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  serviceName: \"{{ tuple \"oslo_db\" \"discovery\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\"\n  podManagementPolicy: \"Parallel\"\n  replicas: {{ .Values.pod.replicas.server }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"mariadb\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"mariadb\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        mariadb-dbadmin-password-hash: {{ tuple \"secret-dbadmin-password.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        mariadb-sst-password-hash: {{ tuple \"secret-dbadmin-password.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        secrets-etc-exporter-hash: {{ tuple \"exporter-secrets-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"mariadb-server\" \"containerNames\" (list \"init\" \"mariadb-perms\" \"mariadb\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      shareProcessNamespace: true\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"server\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"mariadb\" \"server\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n{{ if $envAll.Values.pod.tolerations.mariadb.enabled }}\n{{ tuple $envAll \"mariadb\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.server.timeout }}\n      nodeSelector:\n        {{ .Values.labels.server.node_selector_key }}: {{ .Values.labels.server.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"mariadb\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n{{- if .Values.volume.chown_on_start }}\n        - name: mariadb-perms\n{{ tuple $envAll \"mariadb\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"server\" \"container\" \"perms\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.server | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command: [\"/bin/sh\", \"-c\"]\n          args:\n            - set -xe;\n              /bin/chown -R \"mysql:mysql\" /var/lib/mysql;\n              /bin/chmod 700 /var/lib/mysql;\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: mysql-data\n              mountPath: /var/lib/mysql\n{{- end }}\n      containers:\n        - name: mariadb\n{{ tuple $envAll \"mariadb\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"server\" \"container\" \"mariadb\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.server | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n            - name: POD_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            {{- if $envAll.Values.manifests.certificates }}\n            - name: MARIADB_X509\n              value: \"REQUIRE X509\"\n            {{- end }}\n            - name: MARIADB_REPLICAS\n              value: {{ .Values.pod.replicas.server | quote }}\n            - name: POD_NAME_PREFIX\n              value: {{ tuple \"oslo_db\" \"direct\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n            - name: DISCOVERY_DOMAIN\n              value: {{ tuple \"oslo_db\" \"discovery\" . | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\n            - name: DIRECT_SVC_NAME\n              value: {{ tuple \"oslo_db\" \"direct\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n            - name: WSREP_PORT\n              value: {{ tuple \"oslo_db\" \"direct\" \"wsrep\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: STATE_CONFIGMAP\n              value: {{ printf \"%s-%s\" .deployment_name \"mariadb-state\" | quote }}\n            - name: MYSQL_DBADMIN_USERNAME\n              value: {{ .Values.endpoints.oslo_db.auth.admin.username }}\n            - name: MYSQL_DBADMIN_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: mariadb-dbadmin-password\n                  key: MYSQL_DBADMIN_PASSWORD\n            - name: MYSQL_DBSST_USERNAME\n              value: {{ .Values.endpoints.oslo_db.auth.sst.username }}\n            - name: MYSQL_DBSST_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: mariadb-dbsst-password\n                  key: MYSQL_DBSST_PASSWORD\n            {{- if .Values.manifests.secret_dbaudit_password }}\n            - name: MYSQL_DBAUDIT_USERNAME\n              value: {{ .Values.endpoints.oslo_db.auth.audit.username }}\n            - name: MYSQL_DBAUDIT_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: mariadb-dbaudit-password\n                  key: MYSQL_DBAUDIT_PASSWORD\n            {{- end }}\n            - name: MYSQL_HISTFILE\n              value: {{ .Values.conf.database.mysql_histfile }}\n            - name: CLUSTER_LEADER_TTL\n              value: {{ .Values.conf.galera.cluster_leader_ttl | quote }}\n          ports:\n            - name: mysql\n              protocol: TCP\n              containerPort: {{ tuple \"oslo_db\" \"direct\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            - name: wsrep\n              protocol: TCP\n              containerPort: {{ tuple \"oslo_db\" \"direct\" \"wsrep\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            - name: ist\n              protocol: TCP\n              containerPort: {{ tuple \"oslo_db\" \"direct\" \"ist\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            - name: sst\n              protocol: TCP\n              containerPort: {{ tuple \"oslo_db\" \"direct\" \"sst\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          command:\n            - /tmp/start.py\n{{ dict \"envAll\" . \"component\" \"server\" \"container\" \"mariadb\" \"type\" \"readiness\" \"probeTemplate\" (include \"mariadbReadinessProbe\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" . \"component\" \"server\" \"container\" \"mariadb\" \"type\" \"liveness\" \"probeTemplate\" (include \"mariadbLivenessProbe\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: var-run\n              mountPath: /var/run/mysqld\n            - name: mycnfd\n              mountPath: /etc/mysql/conf.d\n            - name: mariadb-bin\n              mountPath: /tmp/start.py\n              subPath: start.py\n              readOnly: true\n            - name: mariadb-bin\n              mountPath: /tmp/stop.sh\n              subPath: stop.sh\n              readOnly: true\n            - name: mariadb-bin\n              mountPath: /tmp/health.sh\n              subPath: health.sh\n              readOnly: true\n            - name: mariadb-etc\n              mountPath: /etc/mysql/my.cnf\n              subPath: my.cnf\n              readOnly: true\n            - name: mariadb-etc\n              mountPath: /etc/mysql/conf.d/00-base.cnf\n              subPath: 00-base.cnf\n              readOnly: true\n            {{- if .Values.conf.database.config_override }}\n            - name: mariadb-etc\n              mountPath: /etc/mysql/conf.d/20-override.cnf\n              subPath: 20-override.cnf\n              readOnly: true\n            {{- end }}\n            - name: mariadb-etc\n              mountPath: /etc/mysql/conf.d/99-force.cnf\n              subPath: 99-force.cnf\n              readOnly: true\n            - name: mariadb-secrets\n              mountPath: /etc/mysql/admin_user.cnf\n              subPath: admin_user.cnf\n              readOnly: true\n            - name: mysql-data\n              mountPath: /var/lib/mysql\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- if .Values.monitoring.prometheus.enabled }}\n        - name: mysql-exporter\n{{ tuple $envAll \"prometheus_mysql_exporter\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"server\" \"container\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.prometheus_mysql_exporter | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" . \"component\" \"server\" \"container\" \"mariadb_exporter\" \"type\" \"readiness\" \"probeTemplate\" (include \"exporterProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" . \"component\" \"server\" \"container\" \"mariadb_exporter\" \"type\" \"liveness\" \"probeTemplate\" (include \"exporterProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          command:\n            - /tmp/mysqld-exporter.sh\n          ports:\n            - name: metrics\n              containerPort: {{ tuple \"prometheus_mysql_exporter\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          env:\n            - name: EXPORTER_USER\n              valueFrom:\n                secretKeyRef:\n                  name: mysql-exporter-secrets\n                  key: EXPORTER_USER\n            - name: EXPORTER_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: mysql-exporter-secrets\n                  key: EXPORTER_PASSWORD\n            - name: DATA_SOURCE_NAME\n              valueFrom:\n                secretKeyRef:\n                  name: mysql-exporter-secrets\n                  key: DATA_SOURCE_NAME\n            - name: POD_IP\n              valueFrom:\n                fieldRef:\n                  fieldPath: status.podIP\n            - name: LISTEN_PORT\n              value: {{ tuple \"prometheus_mysql_exporter\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: TELEMETRY_PATH\n              value: {{ tuple \"prometheus_mysql_exporter\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.keystone_endpoint_path_lookup\" | quote }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: mysql-exporter-secrets\n              mountPath: /etc/mysql/mysql_user.cnf\n              subPath: mysql_user.cnf\n              readOnly: true\n            - name: mysql-exporter-bin\n              mountPath: /tmp/mysqld-exporter.sh\n              subPath: mysqld-exporter.sh\n              readOnly: true\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: mycnfd\n          emptyDir: {}\n        - name: var-run\n          emptyDir: {}\n        - name: mariadb-bin\n          configMap:\n            name: mariadb-bin\n            defaultMode: 0555\n        - name: mariadb-etc\n          configMap:\n            name: mariadb-etc\n            defaultMode: 0444\n        - name: mariadb-secrets\n          secret:\n            secretName: mariadb-secrets\n            defaultMode: 0444\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n        {{- if not .Values.volume.enabled }}\n        - name: mysql-data\n        {{- if .Values.volume.use_local_path_for_single_pod_cluster.enabled }}\n          hostPath:\n            path: {{ .Values.volume.use_local_path_for_single_pod_cluster.host_path }}\n            type: DirectoryOrCreate\n        {{- else }}\n          emptyDir: {}\n        {{- end }}\n        {{- end }}\n{{- if .Values.monitoring.prometheus.enabled }}\n        - name: mysql-exporter-secrets\n          secret:\n            secretName: mysql-exporter-secrets\n            defaultMode: 0444\n        - name: mysql-exporter-bin\n          configMap:\n            name: mysql-exporter-bin\n            defaultMode: 0555\n{{- end }}\n{{- if .Values.volume.enabled }}\n  volumeClaimTemplates:\n  - metadata:\n      name: mysql-data\n    spec:\n      accessModes: [\"ReadWriteOnce\"]\n      resources:\n        requests:\n          storage: {{ .Values.volume.size }}\n      {{- if ne .Values.volume.class_name \"default\" }}\n      storageClassName: {{ .Values.volume.class_name }}\n      {{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "mariadb/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for mariadb.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nrelease_group: null\n\nimages:\n  tags:\n    mariadb: quay.io/airshipit/mariadb:latest-ubuntu_noble\n    prometheus_create_mysql_user: quay.io/airshipit/mariadb:11.4.8-noble\n    prometheus_mysql_exporter: docker.io/prom/mysqld-exporter:v0.12.1\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n    mariadb_backup: quay.io/airshipit/porthole-mysqlclient-utility:latest-ubuntu_jammy\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    scripted_test: quay.io/airshipit/mariadb:latest-ubuntu_noble\n    mariadb_controller: quay.io/airshipit/mariadb:latest-ubuntu_noble\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  server:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  prometheus_mysql_exporter:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  controller:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\npod:\n  env:\n    mariadb_controller:\n      MARIADB_CONTROLLER_DEBUG: 0\n      MARIADB_CONTROLLER_CHECK_PODS_DELAY: 10\n      MARIADB_CONTROLLER_PYKUBE_REQUEST_TIMEOUT: 60\n  probes:\n    server:\n      mariadb:\n        readiness:\n          enabled: true\n          disk_usage_percent: 99\n          params:\n            initialDelaySeconds: 30\n            periodSeconds: 30\n            timeoutSeconds: 15\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 120\n            periodSeconds: 30\n            timeoutSeconds: 15\n      mariadb_exporter:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 5\n            periodSeconds: 60\n            timeoutSeconds: 10\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 15\n            periodSeconds: 60\n            timeoutSeconds: 10\n  security_context:\n    server:\n      pod:\n        runAsUser: 999\n      container:\n        perms:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        mariadb:\n          runAsUser: 999\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    prometheus_mysql_exporter:\n      pod:\n        runAsUser: 99\n      container:\n        exporter:\n          runAsUser: 99\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    prometheus_create_mysql_user:\n      pod:\n        runAsUser: 0\n      container:\n        main:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    mariadb_backup:\n      pod:\n        runAsUser: 65534\n      container:\n        backup_perms:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        verify_perms:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        mariadb_backup:\n          runAsUser: 65534\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    tests:\n      pod:\n        runAsUser: 999\n      container:\n        test:\n          runAsUser: 999\n          readOnlyRootFilesystem: true\n    controller:\n      pod:\n        runAsUser: 65534\n      container:\n        controller:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    cluster_wait:\n      pod:\n        runAsUser: 65534\n        runAsNonRoot: true\n      container:\n        mariadb_cluster_wait:\n          allowPrivilegeEscalation: false\n          capabilities:\n            drop:\n              - ALL\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    mariadb:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n        effect: NoSchedule\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n        effect: NoSchedule\n  replicas:\n    server: 3\n    controller: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    termination_grace_period:\n      server:\n        timeout: 600\n    disruption_budget:\n      mariadb:\n        min_available: 0\n  resources:\n    enabled: false\n    server:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      tests:\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n      prometheus_create_mysql_user:\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      mariadb_backup:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - mariadb-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    mariadb_backup:\n      jobs:\n        - mariadb-ks-user\n      services:\n        - endpoint: internal\n          service: oslo_db\n    prometheus_create_mysql_user:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    tests:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    controller:\n      services: null\n    cluster_wait:\n      services:\n        - endpoint: internal\n          service: oslo_db\nvolume:\n  # this value is used for single pod deployments of mariadb to prevent losing all data\n  # if the pod is restarted\n  use_local_path_for_single_pod_cluster:\n    enabled: false\n    host_path: \"/tmp/mysql-data\"\n  chown_on_start: true\n  enabled: true\n  class_name: general\n  size: 5Gi\n  backup:\n    enabled: true\n    class_name: general\n    size: 5Gi\n\njobs:\n  cluster_wait:\n    clusterCheckWait: 30\n    clusterCheckRetries: 30\n    clusterStabilityCount: 30\n    clusterStabilityWait: 4\n  exporter_create_sql_user:\n    backoffLimit: 87600\n    activeDeadlineSeconds: 3600\n  mariadb_backup:\n    # activeDeadlineSeconds == 0 means no deadline\n    activeDeadlineSeconds: 0\n    backoffLimit: 6\n    cron: \"0 0 * * *\"\n    history:\n      success: 3\n      failed: 1\n  ks_user:\n    # activeDeadlineSeconds == 0 means no deadline\n    activeDeadlineSeconds: 0\n    backoffLimit: 6\n\nconf:\n  tests:\n    # This may either be:\n    # * direct: which will hit the backends directly via a k8s service ip\n    # Note, deadlocks and failure are to be expected with concurrency if\n    # hitting the `direct` endpoint.\n    endpoint: internal\n    # This is a list of tuning params passed to mysqlslap:\n    params:\n      - --auto-generate-sql\n      - --concurrency=100\n      - --number-of-queries=1000\n      - --number-char-cols=1\n      - --number-int-cols=1\n  mariadb_server:\n    setup_wait:\n      iteration: 30\n      duration: 5\n  backup:\n    enabled: false\n    base_path: /var/backup\n    validateData:\n      ageOffset: 120\n    mysqldump_options: >\n      --single-transaction --quick --add-drop-database\n      --add-drop-table --add-locks --databases\n    days_to_keep: 3\n    remote_backup:\n      enabled: false\n      container_name: mariadb\n      days_to_keep: 14\n      storage_policy: default-placement\n      number_of_retries: 5\n      delay_range:\n        min: 30\n        max: 60\n      throttle_backups:\n        enabled: false\n        sessions_limit: 480\n        lock_expire_after: 7200\n        retry_after: 3600\n        container_name: throttle-backups-manager\n      primary_user_class: mariadb\n      failover_user_class: mariadb_failover\n  galera:\n    cluster_leader_ttl: 60\n  database:\n    mysql_histfile: \"/dev/null\"\n    my: |\n      [mysqld]\n      datadir=/var/lib/mysql\n      basedir=/usr\n      ignore-db-dirs=lost+found\n\n      [client-server]\n      !includedir /etc/mysql/conf.d/\n    00_base: |\n      [mysqld]\n      # Charset\n      character_set_server=utf8\n      collation_server=utf8_general_ci\n      skip-character-set-client-handshake\n\n      # Logging\n      slow_query_log=off\n      slow_query_log_file=/var/log/mysql/mariadb-slow.log\n      log_warnings=2\n\n      # General logging has huge performance penalty therefore is disabled by default\n      general_log=off\n      general_log_file=/var/log/mysql/mariadb-error.log\n\n      long_query_time=3\n      log_queries_not_using_indexes=on\n\n      # Networking\n      bind_address=0.0.0.0\n      port={{ tuple \"oslo_db\" \"direct\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n\n      # When a client connects, the server will perform hostname resolution,\n      # and when DNS is slow, establishing the connection will become slow as well.\n      # It is therefore recommended to start the server with skip-name-resolve to\n      # disable all DNS lookups. The only limitation is that the GRANT statements\n      # must then use IP addresses only.\n      skip_name_resolve\n\n      # Tuning\n      user=mysql\n      max_allowed_packet=256M\n      open_files_limit=10240\n      max_connections=8192\n      max-connect-errors=1000000\n\n      # General security settings\n      # Reference: https://dev.mysql.com/doc/mysql-security-excerpt/8.0/en/general-security-issues.html\n      # secure_file_priv is set to '/home' because it is read-only, which will\n      # disable this feature completely.\n      secure_file_priv=/home\n      local_infile=0\n      symbolic_links=0\n      sql_mode=\"STRICT_ALL_TABLES,STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION\"\n\n\n      ## Generally, it is unwise to set the query cache to be larger than 64-128M\n      ## as the costs associated with maintaining the cache outweigh the performance\n      ## gains.\n      ## The query cache is a well known bottleneck that can be seen even when\n      ## concurrency is moderate. The best option is to disable it from day 1\n      ## by setting query_cache_size=0 (now the default on MySQL 5.6)\n      ## and to use other ways to speed up read queries: good indexing, adding\n      ## replicas to spread the read load or using an external cache.\n      query_cache_size=0\n      query_cache_type=0\n\n      sync_binlog=0\n      thread_cache_size=16\n      table_open_cache=2048\n      table_definition_cache=1024\n\n      #\n      # InnoDB\n      #\n      # The buffer pool is where data and indexes are cached: having it as large as possible\n      # will ensure you use memory and not disks for most read operations.\n      # Typical values are 50..75% of available RAM.\n      # TODO(tomasz.paszkowski): This needs to by dynamic based on available RAM.\n      innodb_buffer_pool_size=1024M\n      innodb_doublewrite=0\n      innodb_file_format=Barracuda\n      innodb_file_per_table=1\n      innodb_flush_method=O_DIRECT\n      innodb_io_capacity=500\n      innodb_locks_unsafe_for_binlog=1\n      innodb_log_file_size=128M\n      innodb_old_blocks_time=1000\n      innodb_read_io_threads=8\n      innodb_write_io_threads=8\n\n      # Clustering\n      binlog_format=ROW\n      default-storage-engine=InnoDB\n      innodb_autoinc_lock_mode=2\n      innodb_flush_log_at_trx_commit=2\n      wsrep_cluster_name={{ tuple \"oslo_db\" \"direct\" . | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" | replace \".\" \"_\" }}\n      wsrep_on=1\n      wsrep_provider=/usr/lib/galera/libgalera_smm.so\n      wsrep_provider_options=\"evs.suspect_timeout=PT30S; gmcast.peer_timeout=PT15S; gmcast.listen_addr=tcp://0.0.0.0:{{ tuple \"oslo_db\" \"direct\" \"wsrep\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\"\n      wsrep_slave_threads=12\n      wsrep_sst_auth={{ .Values.endpoints.oslo_db.auth.sst.username }}:{{ .Values.endpoints.oslo_db.auth.sst.password }}\n      wsrep_sst_method=mariabackup\n\n      {{ if .Values.manifests.certificates }}\n      # TLS\n      ssl_ca=/etc/mysql/certs/ca.crt\n      ssl_key=/etc/mysql/certs/tls.key\n      ssl_cert=/etc/mysql/certs/tls.crt\n      # tls_version = TLSv1.2,TLSv1.3\n      {{ end }}\n\n\n      [mysqldump]\n      max-allowed-packet=16M\n\n      [client]\n      default_character_set=utf8\n      protocol=tcp\n      port={{ tuple \"oslo_db\" \"direct\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n      {{ if .Values.manifests.certificates }}\n      # TLS\n      ssl_ca=/etc/mysql/certs/ca.crt\n      ssl_key=/etc/mysql/certs/tls.key\n      ssl_cert=/etc/mysql/certs/tls.crt\n      # tls_version = TLSv1.2,TLSv1.3\n      ssl-verify-server-cert\n      {{ end }}\n\n    config_override: null\n    # Any configuration here will override the base config.\n    # config_override: |-\n    #   [mysqld]\n    #   wsrep_slave_threads=1\n    99_force: |\n      [mysqld]\n      datadir=/var/lib/mysql\n      tmpdir=/tmp\n\nmonitoring:\n  prometheus:\n    enabled: false\n    mysqld_exporter:\n      scrape: true\n\nsecrets:\n  identity:\n    admin: keystone-admin-user\n    mariadb: mariadb-backup-user\n  mariadb:\n    backup_restore: mariadb-backup-restore\n  oci_image_registry:\n    mariadb: mariadb-oci-image-registry-key\n  tls:\n    oslo_db:\n      server:\n        public: mariadb-tls-server\n        internal: mariadb-tls-direct\n\n# typically overridden by environmental\n# values, but should include all endpoints\n# required by this chart\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      mariadb:\n        username: mariadb\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  monitoring:\n    name: prometheus\n    namespace: null\n    hosts:\n      default: prom-metrics\n      public: prometheus\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 9090\n        public: 80\n  prometheus_mysql_exporter:\n    namespace: null\n    hosts:\n      default: mysql-exporter\n    host_fqdn_override:\n      default: null\n    path:\n      default: /metrics\n    scheme:\n      default: 'http'\n    port:\n      metrics:\n        default: 9104\n  oslo_db:\n    namespace: null\n    auth:\n      admin:\n        username: root\n        password: password\n      sst:\n        username: sst\n        password: password\n      audit:\n        username: audit\n        password: password\n      exporter:\n        username: exporter\n        password: password\n    hosts:\n      default: mariadb\n      direct: mariadb-server\n      discovery: mariadb-discovery\n    host_fqdn_override:\n      default: null\n    path: null\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n      wsrep:\n        default: 4567\n      ist:\n        default: 4568\n      sst:\n        default: 4444\n  kube_dns:\n    namespace: kube-system\n    name: kubernetes-dns\n    hosts:\n      default: kube-dns\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: http\n    port:\n      dns_tcp:\n        default: 53\n      dns:\n        default: 53\n        protocol: UDP\n  identity:\n    name: backup-storage-auth\n    namespace: openstack\n    auth:\n      admin:\n        # Auth URL of null indicates local authentication\n        # HTK will form the URL unless specified here\n        auth_url: null\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      mariadb:\n        # Auth URL of null indicates local authentication\n        # HTK will form the URL unless specified here\n        auth_url: null\n        role: admin\n        region_name: RegionOne\n        username: mariadb-backup-user\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 80\n        internal: 5000\n\nnetwork:\n  mariadb: {}\n  mariadb_discovery: {}\n  mariadb_master: {}\n\nnetwork_policy:\n  mariadb:\n    ingress:\n      - {}\n    egress:\n      - {}\n  prometheus-mysql-exporter:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nmanifests:\n  certificates: false\n  configmap_bin: true\n  configmap_etc: true\n  configmap_services_tcp: true\n  job_image_repo_sync: true\n  cron_job_mariadb_backup: false\n  job_ks_user: false\n  pvc_backup: false\n  monitoring:\n    prometheus:\n      configmap_bin: true\n      job_user_create: true\n      secret_etc: true\n  pdb_server: true\n  network_policy: false\n  pod_test: true\n  secret_dbadmin_password: true\n  secret_sst_password: true\n  secret_dbaudit_password: true\n  secret_backup_restore: false\n  secret_etc: true\n  secret_registry: true\n  service_discovery: true\n  service_error: false\n  service: true\n  statefulset: true\n  deployment_controller: true\n  service_master: true\n  job_cluster_wait: false\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "mariadb-backup/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v10.6.14\ndescription: OpenStack-Helm MariaDB backups\nname: mariadb-backup\nversion: 2025.2.0\nhome: https://mariadb.com/kb/en/\nicon: http://badges.mariadb.org/mariadb-badge-180x60.png\nsources:\n  - https://github.com/MariaDB/server\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "mariadb-backup/README.rst",
    "content": "openstack-helm/mariadb-backup\n======================\n\nBy default, this chart creates a mariadb-backup cronjob that runs in a schedule\nin order to create mysql backups.\n\nThis chart depends on mariadb-cluster chart.\n\nThe backups are stored in a PVC and also are possible to upload then to a remote\nRGW container.\n\nYou must ensure that your control nodes that should receive mariadb\ninstances are labeled with ``openstack-control-plane=enabled``, or\nwhatever you have configured in values.yaml for the label\nconfiguration:\n\n::\n\n    kubectl label nodes openstack-control-plane=enabled --all\n"
  },
  {
    "path": "mariadb-backup/templates/bin/_backup_mariadb.sh.tpl",
    "content": "#!/bin/bash\n\nSCOPE=${1:-\"all\"}\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nsource /tmp/backup_main.sh\n\n# Export the variables required by the framework\n# Note: REMOTE_BACKUP_ENABLED, STORAGE_POLICY  and CONTAINER_NAME are already\n#       exported.\nexport DB_NAMESPACE=${MARIADB_POD_NAMESPACE}\nexport DB_NAME=\"mariadb\"\nexport LOCAL_DAYS_TO_KEEP=${MARIADB_LOCAL_BACKUP_DAYS_TO_KEEP}\nexport REMOTE_DAYS_TO_KEEP=${MARIADB_REMOTE_BACKUP_DAYS_TO_KEEP}\nexport REMOTE_BACKUP_RETRIES=${NUMBER_OF_RETRIES_SEND_BACKUP_TO_REMOTE}\nexport MIN_DELAY_SEND_REMOTE=${MIN_DELAY_SEND_BACKUP_TO_REMOTE}\nexport MAX_DELAY_SEND_REMOTE=${MAX_DELAY_SEND_BACKUP_TO_REMOTE}\nexport ARCHIVE_DIR=${MARIADB_BACKUP_BASE_DIR}/db/${DB_NAMESPACE}/${DB_NAME}/archive\n\n# Dump all the database files to existing $TMP_DIR and save logs to $LOG_FILE\ndump_databases_to_directory() {\n  TMP_DIR=$1\n  LOG_FILE=$2\n  SCOPE=${3:-\"all\"}\n\n\n  MYSQL=\"mariadb \\\n     --defaults-file=/etc/mysql/admin_user.cnf \\\n     --connect-timeout 10\"\n\n  MYSQLDUMP=\"mariadb-dump \\\n     --defaults-file=/etc/mysql/admin_user.cnf\"\n\n  if [[ \"${SCOPE}\" == \"all\" ]]; then\n    MYSQL_DBNAMES=( $($MYSQL --silent --skip-column-names -e \\\n       \"show databases;\" | \\\n       grep -ivE 'information_schema|performance_schema|mysql|sys') )\n  else\n    if [[ \"${SCOPE}\" != \"information_schema\" && \"${SCOPE}\" != \"performance_schema\" && \"${SCOPE}\" != \"mysql\" && \"${SCOPE}\" != \"sys\" ]]; then\n      MYSQL_DBNAMES=( ${SCOPE} )\n    else\n      log ERROR \"It is not allowed to backup database ${SCOPE}.\"\n      return 1\n    fi\n  fi\n\n  #check if there is a database to backup, otherwise exit\n  if [[ -z \"${MYSQL_DBNAMES// }\" ]]\n  then\n    log INFO \"There is no database to backup\"\n    return 0\n  fi\n\n  #Create a list of Databases\n  printf \"%s\\n\" \"${MYSQL_DBNAMES[@]}\" > $TMP_DIR/db.list\n\n  if [[ \"${SCOPE}\" == \"all\" ]]; then\n    #Retrieve and create the GRANT file for all the users\n{{- if .Values.manifests.certificates }}\n    SSL_DSN=\";mysql_ssl=1\"\n    SSL_DSN=\"$SSL_DSN;mysql_ssl_client_key=/etc/mysql/certs/tls.key\"\n    SSL_DSN=\"$SSL_DSN;mysql_ssl_client_cert=/etc/mysql/certs/tls.crt\"\n    SSL_DSN=\"$SSL_DSN;mysql_ssl_ca_file=/etc/mysql/certs/ca.crt\"\n    if ! pt-show-grants --defaults-file=/etc/mysql/admin_user.cnf $SSL_DSN \\\n{{- else }}\n    if ! pt-show-grants --defaults-file=/etc/mysql/admin_user.cnf \\\n{{- end }}\n         2>>\"$LOG_FILE\" > \"$TMP_DIR\"/grants.sql; then\n      log ERROR \"Failed to create GRANT for all the users\"\n      return 1\n    fi\n  fi\n\n  #Retrieve and create the GRANT files per DB\n  for db in \"${MYSQL_DBNAMES[@]}\"\n  do\n    echo $($MYSQL --skip-column-names -e \"select concat('show grants for ',user,';') \\\n          from mysql.db where ucase(db)=ucase('$db');\") | \\\n          sed -r \"s/show grants for ([a-zA-Z0-9_-]*)/show grants for '\\1'/g\" | \\\n          $MYSQL --silent --skip-column-names 2>>$LOG_FILE > $TMP_DIR/${db}_grant.sql\n    if [ \"$?\" -eq 0 ]\n    then\n      sed -i 's/$/;/' $TMP_DIR/${db}_grant.sql\n    else\n      log ERROR \"Failed to create GRANT files for ${db}\"\n      return 1\n    fi\n  done\n\n  #Dumping the database\n\n  SQL_FILE=mariadb.$MARIADB_POD_NAMESPACE.${SCOPE}\n\n  $MYSQLDUMP $MYSQL_BACKUP_MYSQLDUMP_OPTIONS \"${MYSQL_DBNAMES[@]}\"  \\\n            > $TMP_DIR/${SQL_FILE}.sql 2>>$LOG_FILE\n  if [[ $? -eq 0 && -s $TMP_DIR/${SQL_FILE}.sql ]]\n  then\n    log INFO \"Database(s) dumped successfully. (SCOPE = ${SCOPE})\"\n    return 0\n  else\n    log ERROR \"Backup failed and need attention. (SCOPE = ${SCOPE})\"\n    return 1\n  fi\n}\n\n# functions from  mariadb-verifier chart\n\nget_time_delta_secs () {\n  second_delta=0\n  input_date_second=$( date --date=\"$1\" +%s )\n  if [ -n \"$input_date_second\" ]; then\n    current_date=$( date +\"%Y-%m-%dT%H:%M:%SZ\" )\n    current_date_second=$( date --date=\"$current_date\" +%s )\n    ((second_delta=current_date_second-input_date_second))\n    if [ \"$second_delta\" -lt 0 ]; then\n      second_delta=0\n    fi\n  fi\n  echo $second_delta\n}\n\n\ncheck_data_freshness () {\n  archive_file=$(basename \"$1\")\n  archive_date=$(echo \"$archive_file\" | cut -d'.' -f 4)\n  SCOPE=$2\n\n  if [[ \"${SCOPE}\" != \"all\" ]]; then\n    log \"Data freshness check is skipped for individual database.\"\n    return 0\n  fi\n\n  log \"Checking for data freshness in the backups...\"\n  # Get some idea of which database.table has changed in the last 30m\n  # Excluding the system DBs and aqua_test_database\n  #\n  changed_tables=$(${MYSQL_LIVE} -e \"select TABLE_SCHEMA,TABLE_NAME from \\\ninformation_schema.tables where UPDATE_TIME >= SUBTIME(now(),'00:30:00') AND TABLE_SCHEMA \\\nNOT IN('information_schema', 'mysql', 'performance_schema', 'sys', 'aqua_test_database');\" | \\\nawk '{print $1 \".\" $2}')\n\n  if [ -n \"${changed_tables}\" ]; then\n    delta_secs=$(get_time_delta_secs \"$archive_date\")\n    age_offset={{ .Values.conf.backup.validateData.ageOffset }}\n    ((age_threshold=delta_secs+age_offset))\n\n    data_freshness=false\n    skipped_freshness=false\n\n    for table in ${changed_tables}; do\n      tab_schema=$(echo \"$table\" | awk -F. '{print $1}')\n      tab_name=$(echo \"$table\" | awk -F. '{print $2}')\n\n      local_table_existed=$(${MYSQL_LOCAL_SHORT_SILENT} -e \"select TABLE_SCHEMA,TABLE_NAME from \\\nINFORMATION_SCHEMA.TABLES where TABLE_SCHEMA=\\\"${tab_schema}\\\" AND TABLE_NAME=\\\"${tab_name}\\\";\")\n\n      if [ -n \"$local_table_existed\" ]; then\n        # TODO: If last updated field of a table structure has different\n        # patterns (updated/timstamp), it may be worth to parameterize the patterns.\n        datetime=$(${MYSQL_LOCAL_SHORT_SILENT} -e \"describe ${table};\" | \\\n                   awk '(/updated/ || /timestamp/) && /datetime/ {print $1}')\n\n        if [ -n \"${datetime}\" ]; then\n          data_ages=$(${MYSQL_LOCAL_SHORT_SILENT} -e \"select \\\ntime_to_sec(timediff(now(),${datetime})) from ${table} where ${datetime} is not null order by 1 limit 10;\")\n\n          for age in $data_ages; do\n            if [ \"$age\" -le $age_threshold ]; then\n              data_freshness=true\n              break\n            fi\n          done\n\n          # As long as there is an indication of data freshness, no need to check further\n          if [ \"$data_freshness\" = true ] ; then\n            break\n          fi\n        else\n          skipped_freshness=true\n          log \"No indicator to determine data freshness for table $table. Skipped data freshness check.\"\n\n          # Dumping out table structure to determine if enhancement is needed to include this table\n          debug_info=$(${MYSQL_LOCAL} --skip-column-names -e \"describe ${table};\" | awk '{print $2 \" \" $1}')\n          log \"$debug_info\" \"DEBUG\"\n        fi\n      else\n        log \"Table $table doesn't exist in local database\"\n        skipped_freshness=true\n      fi\n    done\n\n    if [ \"$data_freshness\" = true ] ; then\n      log \"Database passed integrity (data freshness) check.\"\n    else\n      if [ \"$skipped_freshness\" = false ] ; then\n        log \"Local backup database restore failed integrity check.\" \"ERROR\"\n        log \"The backup may not have captured the up-to-date data.\" \"INFO\"\n        return 1\n      fi\n    fi\n  else\n    log \"No tables changed in this backup. Skipped data freshness check as the\"\n    log \"check should have been performed by previous validation runs.\"\n  fi\n\n  return 0\n}\n\n\ncleanup_local_databases () {\n  old_local_dbs=$(${MYSQL_LOCAL_SHORT_SILENT} -e 'show databases;' | \\\n    grep -ivE 'information_schema|performance_schema|mysql|sys' || true)\n\n  for db in $old_local_dbs; do\n    ${MYSQL_LOCAL_SHORT_SILENT} -e \"drop database $db;\"\n  done\n}\n\nlist_archive_dir () {\n  archive_dir_content=$(ls -1R \"$ARCHIVE_DIR\")\n  if [ -n \"$archive_dir_content\" ]; then\n    log \"Content of $ARCHIVE_DIR\"\n    log \"${archive_dir_content}\"\n  fi\n}\n\nremove_remote_archive_file () {\n  archive_file=$(basename \"$1\")\n  token_req_file=$(mktemp --suffix \".json\")\n  header_file=$(mktemp)\n  resp_file=$(mktemp --suffix \".json\")\n  http_resp=\"404\"\n\n  HEADER_CONTENT_TYPE=\"Content-Type: application/json\"\n  HEADER_ACCEPT=\"Accept: application/json\"\n\n  cat << JSON_EOF > \"$token_req_file\"\n{\n    \"auth\": {\n        \"identity\": {\n            \"methods\": [\n                \"password\"\n            ],\n            \"password\": {\n                \"user\": {\n                    \"domain\": {\n                        \"name\": \"${OS_USER_DOMAIN_NAME}\"\n                    },\n                    \"name\": \"${OS_USERNAME}\",\n                    \"password\": \"${OS_PASSWORD}\"\n                }\n            }\n        },\n        \"scope\": {\n            \"project\": {\n                \"domain\": {\n                    \"name\": \"${OS_PROJECT_DOMAIN_NAME}\"\n                },\n                \"name\": \"${OS_PROJECT_NAME}\"\n            }\n        }\n    }\n}\nJSON_EOF\n\n  http_resp=$(curl -s -X POST \"$OS_AUTH_URL/auth/tokens\"  -H \"${HEADER_CONTENT_TYPE}\" \\\n       -H \"${HEADER_ACCEPT}\" -d @\"${token_req_file}\" -D \"$header_file\" -o \"$resp_file\" -w \"%{http_code}\")\n\n  if [ \"$http_resp\" = \"201\" ]; then\n    OS_TOKEN=$(grep -i \"x-subject-token\" \"$header_file\" | cut -d' ' -f2 | tr -d \"\\r\")\n\n    if [ -n \"$OS_TOKEN\" ]; then\n      OS_OBJ_URL=$(python3 -c \"import json,sys;print([[ep['url'] for ep in obj['endpoints'] if ep['interface']=='public'] for obj in json.load(sys.stdin)['token']['catalog'] if obj['type']=='object-store'][0][0])\" < \"$resp_file\")\n\n      if [ -n \"$OS_OBJ_URL\" ]; then\n        http_resp=$(curl -s -X DELETE \"$OS_OBJ_URL/$CONTAINER_NAME/$archive_file\" \\\n                         -H \"${HEADER_CONTENT_TYPE}\" -H \"${HEADER_ACCEPT}\" \\\n                         -H \"X-Auth-Token: ${OS_TOKEN}\" -D \"$header_file\" -o \"$resp_file\" -w \"%{http_code}\")\n      fi\n    fi\n  fi\n\n  if [ \"$http_resp\" == \"404\" ] ; then\n    log \"Failed to cleanup remote backup. Container object $archive_file is not on RGW.\"\n    return 1\n  fi\n\n  if [ \"$http_resp\" != \"204\" ] ; then\n    log \"Failed to cleanup remote backup. Cannot delete container object $archive_file\" \"ERROR\"\n    cat \"$header_file\"\n    cat \"$resp_file\"\n  fi\n  return 0\n}\n\nhandle_bad_archive_file () {\n  archive_file=$1\n\n  if [ ! -d \"$BAD_ARCHIVE_DIR\" ]; then\n    mkdir -p \"$BAD_ARCHIVE_DIR\"\n  fi\n\n  # Move the file to quarantine directory such that\n  # file won't be used for restore in case of recovery\n  #\n  log \"Moving $i to $BAD_ARCHIVE_DIR...\"\n  mv \"$i\" \"$BAD_ARCHIVE_DIR\"\n  log \"Removing $i from remote RGW...\"\n  if remove_remote_archive_file \"$i\"; then\n    log \"File $i has been successfully removed from RGW.\"\n  else\n    log \"FIle $i cannot be removed form RGW.\" \"ERROR\"\n    return 1\n  fi\n\n  # Atmost only three bad files are kept. Deleting the oldest if\n  # number of files exceeded the threshold.\n  #\n  bad_files=$(find \"$BAD_ARCHIVE_DIR\" -name \"*.tar.gz\" 2>/dev/null | wc -l)\n  if [ \"$bad_files\" -gt 3 ]; then\n    ((bad_files=bad_files-3))\n    delete_files=$(find \"$BAD_ARCHIVE_DIR\" -name \"*.tar.gz\" 2>/dev/null | sort | head --lines=$bad_files)\n    for b in $delete_files; do\n      log \"Deleting $b...\"\n      rm -f \"${b}\"\n    done\n  fi\n  return 0\n}\n\ncleanup_old_validation_result_file () {\n  clean_files=$(find \"$ARCHIVE_DIR\" -maxdepth 1 -name \"*.passed\" 2>/dev/null)\n  for d in $clean_files; do\n    archive_file=${d/.passed}\n    if [ ! -f \"$archive_file\" ]; then\n      log \"Deleting $d as its associated archive file $archive_file nolonger existed.\"\n      rm -f \"${d}\"\n    fi\n  done\n}\n\nvalidate_databases_backup () {\n  archive_file=$1\n  SCOPE=${2:-\"all\"}\n\n  restore_log='/tmp/restore_error.log'\n  tmp_dir=$(mktemp -d)\n\n  rm -f $restore_log\n  cd \"$tmp_dir\"\n  log \"Decompressing archive $archive_file...\"\n  if ! tar zxvf - < \"$archive_file\" 1>/dev/null; then\n    log \"Database restore from local backup failed. Archive decompression failed.\" \"ERROR\"\n    return 1\n  fi\n\n  db_list_file=\"$tmp_dir/db.list\"\n  if [[ -e \"$db_list_file\" ]]; then\n    dbs=$(sort < \"$db_list_file\" | grep -ivE sys | tr '\\n' ' ')\n  else\n    dbs=\" \"\n  fi\n\n  sql_file=\"${tmp_dir}/mariadb.${MARIADB_POD_NAMESPACE}.${SCOPE}.sql\"\n\n  if [[ \"${SCOPE}\" == \"all\" ]]; then\n    grant_file=\"${tmp_dir}/grants.sql\"\n  else\n    grant_file=\"${tmp_dir}/${SCOPE}_grant.sql\"\n  fi\n\n  if [[ -f $sql_file ]]; then\n    if $MYSQL_LOCAL < \"$sql_file\" 2>$restore_log; then\n      local_dbs=$(${MYSQL_LOCAL_SHORT_SILENT} -e 'show databases;' | \\\n        grep -ivE 'information_schema|performance_schema|mysql|sys' | sort | tr '\\n' ' ')\n\n      if [ \"$dbs\" = \"$local_dbs\" ]; then\n        log \"Databases restored successful.\"\n      else\n        log \"Database restore from local backup failed. Database mismatched between local backup and local server\" \"ERROR\"\n        log \"Databases restored on local server: $local_dbs\" \"DEBUG\"\n        log \"Databases in the local backup: $dbs\" \"DEBUG\"\n        return 1\n      fi\n    else\n      log \"Database restore from local backup failed. $dbs\" \"ERROR\"\n      cat $restore_log\n      return 1\n    fi\n\n    if [[ -f $grant_file ]]; then\n      if $MYSQL_LOCAL < \"$grant_file\" 2>$restore_log; then\n        if ! $MYSQL_LOCAL -e 'flush privileges;'; then\n          log \"Database restore from local backup failed. Failed to flush privileges.\" \"ERROR\"\n          return 1\n        fi\n        log \"Databases permission restored successful.\"\n      else\n        log \"Database restore from local backup failed. Databases permission failed to restore.\" \"ERROR\"\n        cat \"$restore_log\"\n        cat \"$grant_file\"\n        log \"Local DBs: $local_dbs\" \"DEBUG\"\n        return 1\n      fi\n    else\n      log \"Database restore from local backup failed. There is no permission file available\" \"ERROR\"\n      return 1\n    fi\n\n    if ! check_data_freshness \"$archive_file\" ${SCOPE}; then\n      # Log has already generated during check data freshness\n      return 1\n    fi\n  else\n    log \"Database restore from local backup failed. There is no database file available to restore from\" \"ERROR\"\n    return 1\n  fi\n\n  return 0\n}\n\n# end of functions form mariadb verifier chart\n\n# Verify all the databases backup archives\nverify_databases_backup_archives() {\n  SCOPE=${1:-\"all\"}\n\n  # verification code\n  export DB_NAME=\"mariadb\"\n  export ARCHIVE_DIR=${MARIADB_BACKUP_BASE_DIR}/db/${MARIADB_POD_NAMESPACE}/${DB_NAME}/archive\n  export BAD_ARCHIVE_DIR=${ARCHIVE_DIR}/quarantine\n  export MYSQL_OPTS=\"--silent --skip-column-names\"\n  export MYSQL_LIVE=\"mariadb ${MYSQL_OPTS}\"\n  export MYSQL_LOCAL_OPTS=\"\"\n  export MYSQL_LOCAL_SHORT=\"mariadb ${MYSQL_LOCAL_OPTS} --connect-timeout 2\"\n  export MYSQL_LOCAL_SHORT_SILENT=\"${MYSQL_LOCAL_SHORT} ${MYSQL_OPTS}\"\n  export MYSQL_LOCAL=\"mariadb ${MYSQL_LOCAL_OPTS} --connect-timeout 10\"\n\n  max_wait={{ .Values.conf.mariadb_server.setup_wait.iteration }}\n  duration={{ .Values.conf.mariadb_server.setup_wait.duration }}\n  counter=0\n  dbisup=false\n\n  log \"Waiting for Mariadb backup verification server to start...\"\n\n  # During Mariadb init/startup process, a temporary server is startup\n  # and shutdown prior to starting up the normal server.\n  # To avoid prematurely determine server availability, lets snooze\n  # a bit to give time for the process to complete prior to issue\n  # mysql commands.\n  #\n\n\n  while [ $counter -lt $max_wait ]; do\n    if ! $MYSQL_LOCAL_SHORT -e 'select 1' > /dev/null 2>&1 ; then\n      sleep $duration\n      ((counter=counter+1))\n    else\n      # Lets sleep for an additional duration just in case async\n      # init takes a bit more time to complete.\n      #\n      sleep $duration\n      dbisup=true\n      counter=$max_wait\n    fi\n  done\n\n  if ! $dbisup; then\n    log \"Mariadb backup verification server is not running\" \"ERROR\"\n    return 1\n  fi\n\n  # During Mariadb init process, a test database will be briefly\n  # created and deleted. Adding to the exclusion list for some\n  # edge cases\n  #\n  clean_db=$(${MYSQL_LOCAL_SHORT_SILENT} -e 'show databases;' | \\\n    grep -ivE 'information_schema|performance_schema|mysql|test|sys' || true)\n\n  if [[ -z \"${clean_db// }\" ]]; then\n    log \"Clean Server is up and running\"\n  else\n    cleanup_local_databases\n    log \"Old databases found on the Mariadb backup verification server were cleaned.\"\n    clean_db=$(${MYSQL_LOCAL_SHORT_SILENT} -e 'show databases;' | \\\n      grep -ivE 'information_schema|performance_schema|mysql|test|sys' || true)\n\n    if [[ -z \"${clean_db// }\" ]]; then\n      log \"Clean Server is up and running\"\n    else\n      log \"Cannot clean old databases on verification server.\" \"ERROR\"\n      return 1\n    fi\n    log \"The server is ready for verification.\"\n  fi\n\n  # Starting with 10.4.13, new definer mariadb.sys was added. However, mariadb.sys was deleted\n  # during init mariadb as it was not on the exclusion list. This corrupted the view of mysql.user.\n  # Insert the tuple back to avoid other similar issues with error i.e\n  #   The user specified as a definer ('mariadb.sys'@'localhost') does not exist\n  #\n  # Before insert the tuple mentioned above, we should make sure that the MariaDB version is 10.4.+\n  mariadb_version=$($MYSQL_LOCAL_SHORT -e \"status\" | grep -E '^Server\\s+version:')\n  log \"Current database ${mariadb_version}\"\n  if [[ ! -z ${mariadb_version} && -z $(grep '10.2' <<< ${mariadb_version}) ]]; then\n    if [[ -z $(grep 'mariadb.sys' <<< $($MYSQL_LOCAL_SHORT mysql  -e \"select * from global_priv where user='mariadb.sys'\")) ]]; then\n      $MYSQL_LOCAL_SHORT -e \"insert into mysql.global_priv values ('localhost','mariadb.sys',\\\n    '{\\\"access\\\":0,\\\"plugin\\\":\\\"mysql_native_password\\\",\\\"authentication_string\\\":\\\"\\\",\\\"account_locked\\\":true,\\\"password_last_changed\\\":0}');\"\n      $MYSQL_LOCAL_SHORT -e 'flush privileges;'\n    fi\n  fi\n\n  # Ensure archive dir existed\n  if [ -d \"$ARCHIVE_DIR\" ]; then\n    # List archive dir before\n    list_archive_dir\n\n      # Ensure the local databases are clean for each restore validation\n      #\n      cleanup_local_databases\n\n      if [[ \"${SCOPE}\" == \"all\" ]]; then\n        archive_files=$(find \"$ARCHIVE_DIR\" -maxdepth 1 -name \"*.tar.gz\" 2>/dev/null | sort)\n        for i in $archive_files; do\n          archive_file_passed=$i.passed\n          if [ ! -f \"$archive_file_passed\" ]; then\n            log \"Validating archive file $i...\"\n            if validate_databases_backup \"$i\"; then\n              touch \"$archive_file_passed\"\n            else\n              if handle_bad_archive_file \"$i\"; then\n                log \"File $i has been removed from RGW.\"\n              else\n                log \"File $i cannot be removed from RGW.\" \"ERROR\"\n                return 1\n              fi\n            fi\n          fi\n        done\n      else\n        archive_files=$(find \"$ARCHIVE_DIR\" -maxdepth 1 -name \"*.tar.gz\" 2>/dev/null | grep \"${SCOPE}\" | sort)\n        for i in $archive_files; do\n          archive_file_passed=$i.passed\n          if [ ! -f \"$archive_file_passed\" ]; then\n            log \"Validating archive file $i...\"\n            if validate_databases_backup \"${i}\" \"${SCOPE}\"; then\n              touch \"$archive_file_passed\"\n            else\n              if handle_bad_archive_file \"$i\"; then\n                log \"File $i has been removed from RGW.\"\n              else\n                log \"File $i cannot be removed from RGW.\" \"ERROR\"\n                return 1\n              fi\n            fi\n          fi\n        done\n      fi\n\n\n    # Cleanup passed files if its archive file nolonger existed\n    cleanup_old_validation_result_file\n\n    # List archive dir after\n    list_archive_dir\n  fi\n\n\n  return 0\n}\n\n# Call main program to start the database backup\nbackup_databases ${SCOPE}\n"
  },
  {
    "path": "mariadb-backup/templates/bin/_restore_mariadb.sh.tpl",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n{{- $envAll := . }}\n\n# Capture the user's command line arguments\nARGS=(\"$@\")\n\nif [[ -s /tmp/restore_main.sh ]]; then\n  source /tmp/restore_main.sh\nelse\n  echo \"File /tmp/restore_main.sh does not exist.\"\n  exit 1\nfi\n\n# Export the variables needed by the framework\nexport DB_NAME=\"mariadb\"\nexport DB_NAMESPACE=${MARIADB_POD_NAMESPACE}\nexport ARCHIVE_DIR=${MARIADB_BACKUP_BASE_DIR}/db/${DB_NAMESPACE}/${DB_NAME}/archive\n\nRESTORE_USER='restoreuser'\nRESTORE_PW=$(pwgen 16 1)\nRESTORE_LOG='/tmp/restore_error.log'\nrm -f $RESTORE_LOG\n\n# This is for commands which require admin access\nMYSQL=\"mariadb \\\n       --defaults-file=/etc/mysql/admin_user.cnf \\\n       --connect-timeout 10\"\n\n# This is for commands which we want the temporary \"restore\" user\n# to execute\nRESTORE_CMD=\"mariadb \\\n             --user=${RESTORE_USER} \\\n             --password=${RESTORE_PW} \\\n             --host={{ tuple \"oslo_db\" \"direct\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }} \\\n             --port={{ tuple \"oslo_db\" \"direct\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }} \\\n{{- if .Values.manifests.certificates }}\n             --ssl-ca=/etc/mysql/certs/ca.crt \\\n             --ssl-key=/etc/mysql/certs/tls.key \\\n             --ssl-cert=/etc/mysql/certs/tls.crt \\\n{{- end }}\n             --connect-timeout 10\"\n\n# Get a single database data from the SQL file.\n# $1 - database name\n# $2 - sql file path\ncurrent_db_desc() {\n  PATTERN=\"-- Current Database:\"\n  sed -n \"/${PATTERN} \\`$1\\`/,/${PATTERN}/p\" $2\n}\n\n#Return all database from an archive\nget_databases() {\n  TMP_DIR=$1\n  DB_FILE=$2\n\n  if [[ -e ${TMP_DIR}/db.list ]]\n  then\n    DBS=$(cat ${TMP_DIR}/db.list | \\\n              grep -ivE 'information_schema|performance_schema|mysql|sys' )\n  else\n    DBS=\" \"\n  fi\n\n  echo $DBS > $DB_FILE\n}\n\n# Determine sql file from 2 options - current and legacy one\n# if current is not found check that there is no other namespaced dump file\n# before falling back to legacy one\n_get_sql_file() {\n  TMP_DIR=$1\n  SQL_FILE=\"${TMP_DIR}/mariadb.${MARIADB_POD_NAMESPACE}.*.sql\"\n  LEGACY_SQL_FILE=\"${TMP_DIR}/mariadb.*.sql\"\n  INVALID_SQL_FILE=\"${TMP_DIR}/mariadb.*.*.sql\"\n  if [ -f ${SQL_FILE} ]\n  then\n    echo \"Found $(ls ${SQL_FILE})\" > /dev/stderr\n    printf ${SQL_FILE}\n  elif [ -f ${INVALID_SQL_FILE} ]\n  then\n    echo \"Expected to find ${SQL_FILE} or ${LEGACY_SQL_FILE}, but found $(ls ${INVALID_SQL_FILE})\" > /dev/stderr\n  elif [ -f ${LEGACY_SQL_FILE} ]\n  then\n    echo \"Falling back to legacy naming ${LEGACY_SQL_FILE}. Found $(ls ${LEGACY_SQL_FILE})\" > /dev/stderr\n    printf ${LEGACY_SQL_FILE}\n  fi\n}\n\n# Extract all tables of a database from an archive and put them in the requested\n# file.\nget_tables() {\n  DATABASE=$1\n  TMP_DIR=$2\n  TABLE_FILE=$3\n\n  SQL_FILE=$(_get_sql_file $TMP_DIR)\n  if [ ! -z $SQL_FILE ]; then\n    current_db_desc ${DATABASE} ${SQL_FILE} \\\n        | grep \"^CREATE TABLE\" | awk -F '`' '{print $2}' \\\n        > $TABLE_FILE\n  else\n    # Error, cannot report the tables\n    echo \"No SQL file found - cannot extract the tables\"\n    return 1\n  fi\n}\n\n# Extract all rows in the given table of a database from an archive and put\n# them in the requested file.\nget_rows() {\n  DATABASE=$1\n  TABLE=$2\n  TMP_DIR=$3\n  ROW_FILE=$4\n\n  SQL_FILE=$(_get_sql_file $TMP_DIR)\n  if [ ! -z $SQL_FILE ]; then\n    current_db_desc ${DATABASE} ${SQL_FILE} \\\n        | grep \"INSERT INTO \\`${TABLE}\\` VALUES\" > $ROW_FILE\n    return 0\n  else\n    # Error, cannot report the rows\n    echo \"No SQL file found - cannot extract the rows\"\n    return 1\n  fi\n}\n\n# Extract the schema for the given table in the given database belonging to\n# the archive file found in the TMP_DIR.\nget_schema() {\n  DATABASE=$1\n  TABLE=$2\n  TMP_DIR=$3\n  SCHEMA_FILE=$4\n\n  SQL_FILE=$(_get_sql_file $TMP_DIR)\n  if [ ! -z $SQL_FILE ]; then\n    DB_FILE=$(mktemp -p /tmp)\n    current_db_desc ${DATABASE} ${SQL_FILE} > ${DB_FILE}\n    sed -n /'CREATE TABLE `'$TABLE'`'/,/'--'/p ${DB_FILE} > ${SCHEMA_FILE}\n    if [[ ! (-s ${SCHEMA_FILE}) ]]; then\n      sed -n /'CREATE TABLE IF NOT EXISTS `'$TABLE'`'/,/'--'/p ${DB_FILE} \\\n          > ${SCHEMA_FILE}\n    fi\n    rm -f ${DB_FILE}\n  else\n    # Error, cannot report the rows\n    echo \"No SQL file found - cannot extract the schema\"\n    return 1\n  fi\n}\n\n# Create temporary user for restoring specific databases.\ncreate_restore_user() {\n  restore_db=$1\n\n  # Ensure any old restore user is removed first, if it exists.\n  # If it doesn't exist it may return error, so do not exit the\n  # script if that's the case.\n  delete_restore_user \"dont_exit_on_error\"\n\n  $MYSQL --execute=\"GRANT SELECT ON *.* TO ${RESTORE_USER}@'%' IDENTIFIED BY '${RESTORE_PW}';\" 2>>$RESTORE_LOG\n  if [[ \"$?\" -eq 0 ]]\n  then\n    $MYSQL --execute=\"GRANT ALL ON ${restore_db}.* TO ${RESTORE_USER}@'%' IDENTIFIED BY '${RESTORE_PW}';\" 2>>$RESTORE_LOG\n    if [[ \"$?\" -ne 0 ]]\n    then\n      cat $RESTORE_LOG\n      echo \"Failed to grant restore user ALL permissions on database ${restore_db}\"\n      return 1\n    fi\n  else\n    cat $RESTORE_LOG\n    echo \"Failed to grant restore user select permissions on all databases\"\n    return 1\n  fi\n}\n\n# Delete temporary restore user\ndelete_restore_user() {\n  error_handling=$1\n\n  $MYSQL --execute=\"DROP USER ${RESTORE_USER}@'%';\" 2>>$RESTORE_LOG\n  if [[ \"$?\" -ne 0 ]]\n  then\n    if [ \"$error_handling\" == \"exit_on_error\" ]\n    then\n      cat $RESTORE_LOG\n      echo \"Failed to delete temporary restore user - needs attention to avoid a security hole\"\n      return 1\n    fi\n  fi\n}\n\n#Restore a single database\nrestore_single_db() {\n  SINGLE_DB_NAME=$1\n  TMP_DIR=$2\n\n  if [[ -z \"$SINGLE_DB_NAME\" ]]\n  then\n    echo \"Restore single DB called but with wrong parameter.\"\n    return 1\n  fi\n\n  SQL_FILE=$(_get_sql_file $TMP_DIR)\n  if [ ! -z $SQL_FILE ]; then\n    # Restoring a single database requires us to create a temporary user\n    # which has capability to only restore that ONE database. One gotcha\n    # is that the mysql command to restore the database is going to throw\n    # errors because of all the other databases that it cannot access. So\n    # because of this reason, the --force option is used to prevent the\n    # command from stopping on an error.\n    create_restore_user $SINGLE_DB_NAME\n    if [[ $? -ne 0 ]]\n    then\n      echo \"Restore $SINGLE_DB_NAME failed create restore user.\"\n      return 1\n    fi\n    $RESTORE_CMD --force < $SQL_FILE 2>>$RESTORE_LOG\n    if [[ \"$?\" -eq 0 ]]\n    then\n      echo \"Database $SINGLE_DB_NAME Restore successful.\"\n    else\n      cat $RESTORE_LOG\n      delete_restore_user \"exit_on_error\"\n      echo \"Database $SINGLE_DB_NAME Restore failed.\"\n      return 1\n    fi\n    delete_restore_user \"exit_on_error\"\n    if [[ $? -ne 0 ]]\n    then\n      echo \"Restore $SINGLE_DB_NAME failed delete restore user.\"\n      return 1\n    fi\n    if [ -f ${TMP_DIR}/${SINGLE_DB_NAME}_grant.sql ]\n    then\n      $MYSQL < ${TMP_DIR}/${SINGLE_DB_NAME}_grant.sql 2>>$RESTORE_LOG\n      if [[ \"$?\" -eq 0 ]]\n      then\n        if ! $MYSQL --execute=\"FLUSH PRIVILEGES;\"; then\n          echo \"Failed to flush privileges for $SINGLE_DB_NAME.\"\n          return 1\n        fi\n        echo \"Database $SINGLE_DB_NAME Permission Restore successful.\"\n      else\n        cat $RESTORE_LOG\n        echo \"Database $SINGLE_DB_NAME Permission Restore failed.\"\n        return 1\n      fi\n    else\n      echo \"There is no permission file available for $SINGLE_DB_NAME\"\n      return 1\n    fi\n  else\n    echo \"There is no database file available to restore from\"\n    return 1\n  fi\n  return 0\n}\n\n#Restore all the databases\nrestore_all_dbs() {\n  TMP_DIR=$1\n\n  SQL_FILE=$(_get_sql_file $TMP_DIR)\n  if [ ! -z $SQL_FILE ]; then\n    # Check the scope of the archive.\n    SCOPE=$(echo ${SQL_FILE} | awk -F'.' '{print $(NF-1)}')\n    if [[ \"${SCOPE}\" != \"all\" ]]; then\n      # This is just a single database backup. The user should\n      # instead use the single database restore option.\n      echo \"Cannot use the restore all option for an archive containing only a single database.\"\n      echo \"Please use the single database restore option.\"\n      return 1\n    fi\n\n    $MYSQL < $SQL_FILE 2>$RESTORE_LOG\n    if [[ \"$?\" -eq 0 ]]\n    then\n      echo \"Databases $( echo $DBS | tr -d '\\n') Restore successful.\"\n    else\n      cat $RESTORE_LOG\n      echo \"Databases $( echo $DBS | tr -d '\\n') Restore failed.\"\n      return 1\n    fi\n    if [[ -f ${TMP_DIR}/grants.sql ]]\n    then\n      $MYSQL < ${TMP_DIR}/grants.sql 2>$RESTORE_LOG\n      if [[ \"$?\" -eq 0 ]]\n      then\n        if ! $MYSQL --execute=\"FLUSH PRIVILEGES;\"; then\n          echo \"Failed to flush privileges.\"\n          return 1\n        fi\n        echo \"Databases Permission Restore successful.\"\n      else\n        cat $RESTORE_LOG\n        echo \"Databases Permission Restore failed.\"\n        return 1\n      fi\n    else\n      echo \"There is no permission file available\"\n      return 1\n    fi\n  else\n    echo \"There is no database file available to restore from\"\n    return 1\n  fi\n  return 0\n}\n\n# Call the CLI interpreter, providing the archive directory path and the\n# user arguments passed in\ncli_main ${ARGS[@]}\n"
  },
  {
    "path": "mariadb-backup/templates/bin/_start_mariadb_verify_server.sh.tpl",
    "content": "#!/bin/bash -ex\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nlog () {\n  msg_default=\"Need some text to log\"\n  level_default=\"INFO\"\n  component_default=\"Mariadb Backup Verifier\"\n\n  msg=${1:-$msg_default}\n  level=${2:-$level_default}\n  component=${3:-\"$component_default\"}\n\n  echo \"$(date +'%Y-%m-%d %H:%M:%S,%3N') - ${component} - ${level} - ${msg}\"\n}\n\nlog \"Starting Mariadb server for backup verification...\"\nmysql_install_db --user=nobody --ldata=/var/lib/mysql >/dev/null 2>&1\nMYSQL_ALLOW_EMPTY_PASSWORD=1 mysqld --user=nobody --verbose >/dev/null 2>&1\n"
  },
  {
    "path": "mariadb-backup/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n{{ if eq .Values.endpoints.oslo_db.auth.admin.username .Values.endpoints.oslo_db.auth.sst.username }}\n{{ fail \"the DB admin username should not match the sst user username\" }}\n{{ end }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: mariadb-backup-bin\ndata:\n  backup_mariadb.sh: |\n{{ tuple \"bin/_backup_mariadb.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  start_verification_server.sh: |\n{{ tuple \"bin/_start_mariadb_verify_server.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  restore_mariadb.sh: |\n{{ tuple \"bin/_restore_mariadb.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  backup_main.sh: |\n{{ include \"helm-toolkit.scripts.db-backup-restore.backup_main\" . | indent 4 }}\n  restore_main.sh: |\n{{ include \"helm-toolkit.scripts.db-backup-restore.restore_main\" . | indent 4 }}\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n{{- if .Values.manifests.job_ks_user }}\n  ks-user.sh: |\n{{ include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n{{- end }}\n{{- end }}\n...\n"
  },
  {
    "path": "mariadb-backup/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\" );\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: mariadb-backup-etc\ndata:\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" ( index $envAll.Values.conf.database \"my\" ) \"key\" \"my.cnf\" ) | indent 2 }}\n{{- end }}\n"
  },
  {
    "path": "mariadb-backup/templates/cron-job-backup-mariadb.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.cron_job_mariadb_backup }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"mariadb-backup\" }}\n{{- $failoverUserClass := .Values.conf.backup.remote_backup.failover_user_class }}\n{{ tuple $envAll \"mariadb_backup\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: mariadb-backup\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"mariadb-backup\" \"backup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  schedule: {{ .Values.jobs.mariadb_backup.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.mariadb_backup.history.success }}\n  failedJobsHistoryLimit: {{ .Values.jobs.mariadb_backup.history.failed }}\n  concurrencyPolicy: Forbid\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"mariadb-backup\" \"backup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" \"mariadb-backup\" \"containerNames\" (list \"init\" \"backup-perms\" \"mariadb-backup\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{- if .Values.jobs.mariadb_backup.backoffLimit }}\n      backoffLimit: {{ .Values.jobs.mariadb_backup.backoffLimit }}\n{{- end }}\n{{- if .Values.jobs.mariadb_backup.activeDeadlineSeconds }}\n      activeDeadlineSeconds: {{ .Values.jobs.mariadb_backup.activeDeadlineSeconds }}\n{{- end }}\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"mariadb-backup\" \"backup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n        spec:\n{{ dict \"envAll\" $envAll \"application\" \"mariadb_backup\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 10 }}\n          serviceAccountName: {{ $serviceAccountName }}\n          restartPolicy: OnFailure\n          shareProcessNamespace: true\n{{- if $envAll.Values.pod.tolerations.mariadb.enabled }}\n{{ tuple $envAll \"mariadb\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 10 }}\n{{- end }}\n{{- if $envAll.Values.pod.affinity }}\n{{- if $envAll.Values.pod.affinity.mariadb_backup }}\n          affinity:\n{{  index $envAll.Values.pod.affinity \"mariadb_backup\"  | toYaml | indent 12}}\n{{- end }}\n{{- end }}\n          nodeSelector:\n            {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n          initContainers:\n{{ tuple $envAll \"mariadb_backup\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 12 }}\n            - name: backup-perms\n{{ tuple $envAll \"mariadb_backup\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.mariadb_backup | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{ dict \"envAll\" $envAll \"application\" \"mariadb_backup\" \"container\" \"backup_perms\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 14 }}\n              command:\n                - chown\n                - -R\n                - \"65534:65534\"\n                - $(MARIADB_BACKUP_BASE_DIR)\n              env:\n                - name: MARIADB_BACKUP_BASE_DIR\n                  value: {{ .Values.conf.backup.base_path | quote }}\n              volumeMounts:\n                - mountPath: /tmp\n                  name: pod-tmp\n                - mountPath: {{ .Values.conf.backup.base_path }}\n                  name: mariadb-backup-dir\n            - name: verify-perms\n{{ tuple $envAll \"mariadb_backup\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.mariadb_backup | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{ dict \"envAll\" $envAll \"application\" \"mariadb_backup\" \"container\" \"verify_perms\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 14 }}\n              command:\n                - chown\n                - -R\n                - \"65534:65534\"\n                - /var/lib/mysql\n              volumeMounts:\n                - mountPath: /tmp\n                  name: pod-tmp\n                - mountPath: /var/lib/mysql\n                  name: mysql-data\n          containers:\n            - name: mariadb-backup\n              command:\n                - /bin/sh\n              args:\n                - -c\n                - >-\n                    ( /tmp/start_verification_server.sh ) &\n                    /tmp/backup_mariadb.sh\n              env:\n                - name: MARIADB_BACKUP_BASE_DIR\n                  value: {{ .Values.conf.backup.base_path | quote }}\n                - name: MYSQL_BACKUP_MYSQLDUMP_OPTIONS\n                  value: {{ .Values.conf.backup.mysqldump_options | quote }}\n                - name: MARIADB_LOCAL_BACKUP_DAYS_TO_KEEP\n                  value: {{ .Values.conf.backup.days_to_keep | quote }}\n                - name: MARIADB_POD_NAMESPACE\n                  valueFrom:\n                    fieldRef:\n                      fieldPath: metadata.namespace\n                - name: REMOTE_BACKUP_ENABLED\n                  value: \"{{ .Values.conf.backup.remote_backup.enabled }}\"\n{{- if .Values.conf.backup.remote_backup.enabled }}\n                - name: MARIADB_REMOTE_BACKUP_DAYS_TO_KEEP\n                  value: {{ .Values.conf.backup.remote_backup.days_to_keep | quote }}\n                - name: CONTAINER_NAME\n                  value: {{ .Values.conf.backup.remote_backup.container_name | quote }}\n                - name: STORAGE_POLICY\n                  value: \"{{ .Values.conf.backup.remote_backup.storage_policy }}\"\n                - name: NUMBER_OF_RETRIES_SEND_BACKUP_TO_REMOTE\n                  value: {{ .Values.conf.backup.remote_backup.number_of_retries | quote }}\n                - name: MIN_DELAY_SEND_BACKUP_TO_REMOTE\n                  value: {{ .Values.conf.backup.remote_backup.delay_range.min | quote }}\n                - name: MAX_DELAY_SEND_BACKUP_TO_REMOTE\n                  value: {{ .Values.conf.backup.remote_backup.delay_range.max | quote }}\n                - name: THROTTLE_BACKUPS_ENABLED\n                  value: \"{{ .Values.conf.backup.remote_backup.throttle_backups.enabled }}\"\n                - name: THROTTLE_LIMIT\n                  value: {{ .Values.conf.backup.remote_backup.throttle_backups.sessions_limit | quote }}\n                - name: THROTTLE_LOCK_EXPIRE_AFTER\n                  value: {{ .Values.conf.backup.remote_backup.throttle_backups.lock_expire_after | quote }}\n                - name: THROTTLE_RETRY_AFTER\n                  value: {{ .Values.conf.backup.remote_backup.throttle_backups.retry_after | quote }}\n                - name: THROTTLE_CONTAINER_NAME\n                  value: {{ .Values.conf.backup.remote_backup.throttle_backups.container_name | quote }}\n{{- with $env := dict \"ksUserSecret\" $envAll.Values.secrets.identity.mariadb }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 16 }}\n{{- $failoverIdentityClass := index $envAll.Values.endpoints.identity.auth $failoverUserClass }}\n{{- if $failoverIdentityClass }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_failover_env_vars\" $env | indent 16 }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{ tuple $envAll \"mariadb_backup\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.mariadb_backup | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{ dict \"envAll\" $envAll \"application\" \"mariadb_backup\" \"container\" \"mariadb_backup\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 14 }}\n              volumeMounts:\n                - name: pod-tmp\n                  mountPath: /tmp\n                - mountPath: /tmp/backup_mariadb.sh\n                  name: mariadb-backup-bin\n                  readOnly: true\n                  subPath: backup_mariadb.sh\n                - mountPath: /tmp/backup_main.sh\n                  name: mariadb-backup-bin\n                  readOnly: true\n                  subPath: backup_main.sh\n                - mountPath: {{ .Values.conf.backup.base_path }}\n                  name: mariadb-backup-dir\n                - name: mariadb-backup-secrets\n                  mountPath: /etc/mysql/admin_user.cnf\n                  subPath: admin_user.cnf\n                  readOnly: true\n                - name: mariadb-backup-bin\n                  mountPath: /tmp/start_verification_server.sh\n                  readOnly: true\n                  subPath: start_verification_server.sh\n                - name: mysql-data\n                  mountPath: /var/lib/mysql\n                - name: var-run\n                  mountPath: /run/mysqld\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 16 }}\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: mycnfd\n              emptyDir: {}\n            - name: var-run\n              emptyDir: {}\n            - name: mariadb-backup-etc\n              configMap:\n                name: mariadb-backup-etc\n                defaultMode: 0444\n            - name: mysql-data\n              emptyDir: {}\n            - name: mariadb-backup-secrets\n              secret:\n                secretName: mariadb-backup-secrets\n                defaultMode: 420\n            - configMap:\n                defaultMode: 365\n                name: mariadb-backup-bin\n              name: mariadb-backup-bin\n            {{- if and .Values.volume.backup.enabled  .Values.manifests.pvc_backup }}\n            - name: mariadb-backup-dir\n              persistentVolumeClaim:\n                claimName: mariadb-backup-data\n            {{- else }}\n            - hostPath:\n                path: {{ .Values.conf.backup.base_path }}\n                type: DirectoryOrCreate\n              name: mariadb-backup-dir\n            {{- end }}\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n{{- end }}\n"
  },
  {
    "path": "mariadb-backup/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "mariadb-backup/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $serviceName := tuple \"oslo_db\" \"server\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" $serviceName -}}\n{{- if .Values.pod.tolerations.mariadb.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "mariadb-backup/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $backoffLimit := .Values.jobs.ks_user.backoffLimit }}\n{{- $activeDeadlineSeconds := .Values.jobs.ks_user.activeDeadlineSeconds }}\n{{- $serviceName := tuple \"oslo_db\" \"server\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" $serviceName \"configMapBin\" \"mariadb-backup-bin\" \"backoffLimit\" $backoffLimit \"activeDeadlineSeconds\" $activeDeadlineSeconds -}}\n{{- if .Values.pod.tolerations.mariadb.enabled -}}\n{{- $_ := set $ksUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "mariadb-backup/templates/mariadb-backup-pvc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.volume.backup.enabled .Values.manifests.pvc_backup }}\n---\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: mariadb-backup-data\nspec:\n  accessModes: [\"ReadWriteOnce\"]\n  resources:\n    requests:\n      storage: {{ .Values.volume.backup.size }}\n  storageClassName: {{ .Values.volume.backup.class_name }}\n...\n{{- end }}\n\n"
  },
  {
    "path": "mariadb-backup/templates/secret-backup-restore.yaml",
    "content": "{{/*\nThis manifest results a secret being created which has the key information\nneeded for backing up and restoring the Mariadb databases.\n*/}}\n\n{{- if and .Values.conf.backup.enabled .Values.manifests.secret_backup_restore }}\n\n{{- $envAll := . }}\n{{- $userClass := \"backup_restore\" }}\n{{- $secretName := index $envAll.Values.secrets.mariadb $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n  BACKUP_ENABLED: {{ $envAll.Values.conf.backup.enabled | quote | b64enc }}\n  BACKUP_BASE_PATH: {{ $envAll.Values.conf.backup.base_path | b64enc }}\n  LOCAL_DAYS_TO_KEEP: {{ $envAll.Values.conf.backup.days_to_keep | quote | b64enc }}\n  MYSQLDUMP_OPTIONS: {{ $envAll.Values.conf.backup.mysqldump_options | b64enc }}\n  REMOTE_BACKUP_ENABLED: {{ $envAll.Values.conf.backup.remote_backup.enabled | quote | b64enc }}\n  REMOTE_BACKUP_CONTAINER: {{ $envAll.Values.conf.backup.remote_backup.container_name | b64enc }}\n  REMOTE_BACKUP_DAYS_TO_KEEP: {{ $envAll.Values.conf.backup.remote_backup.days_to_keep | quote | b64enc }}\n  REMOTE_BACKUP_STORAGE_POLICY: {{ $envAll.Values.conf.backup.remote_backup.storage_policy | b64enc }}\n  REMOTE_BACKUP_RETRIES: {{ $envAll.Values.conf.backup.remote_backup.number_of_retries | quote | b64enc }}\n  REMOTE_BACKUP_SEND_DELAY_MIN: {{ $envAll.Values.conf.backup.remote_backup.delay_range.min | quote | b64enc }}\n  REMOTE_BACKUP_SEND_DELAY_MAX: {{ $envAll.Values.conf.backup.remote_backup.delay_range.max | quote | b64enc }}\n  THROTTLE_BACKUPS_ENABLED: {{ $envAll.Values.conf.backup.remote_backup.throttle_backups.enabled | quote | b64enc }}\n  THROTTLE_LIMIT: {{ $envAll.Values.conf.backup.remote_backup.throttle_backups.sessions_limit | quote | b64enc }}\n  THROTTLE_LOCK_EXPIRE_AFTER: {{ $envAll.Values.conf.backup.remote_backup.throttle_backups.lock_expire_after | quote | b64enc }}\n  THROTTLE_RETRY_AFTER: {{ $envAll.Values.conf.backup.remote_backup.throttle_backups.retry_after | quote | b64enc }}\n  THROTTLE_CONTAINER_NAME: {{ $envAll.Values.conf.backup.remote_backup.throttle_backups.container_name | quote | b64enc }}\n...\n{{- end }}\n"
  },
  {
    "path": "mariadb-backup/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "mariadb-backup/templates/secret-rgw.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\nThis manifest results in two secrets being created:\n  1) Keystone \"mariadb\" secret, which is needed to access the cluster\n     (remote or same cluster) for storing mariadb backups. If the\n     cluster is remote, the auth_url would be non-null.\n  2) Keystone \"admin\" secret, which is needed to create the\n     \"mariadb\" keystone account mentioned above. This may not\n     be needed if the account is in a remote cluster (auth_url is non-null\n     in that case).\n*/}}\n\n{{- if .Values.conf.backup.remote_backup.enabled }}\n\n{{- $envAll := . }}\n{{- $userClass := .Values.conf.backup.remote_backup.primary_user_class }}\n{{- $failoverUserClass := .Values.conf.backup.remote_backup.failover_user_class }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n{{- $identityClass := index .Values.endpoints.identity.auth $userClass }}\n{{- if $identityClass.auth_url }}\n  OS_AUTH_URL: {{ $identityClass.auth_url | b64enc }}\n{{- else }}\n  OS_AUTH_URL: {{ tuple \"identity\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n  OS_REGION_NAME: {{ $identityClass.region_name | b64enc }}\n  OS_INTERFACE: {{ $identityClass.interface | default \"internal\" | b64enc }}\n  OS_PROJECT_DOMAIN_NAME: {{ $identityClass.project_domain_name | b64enc }}\n  OS_PROJECT_NAME: {{ $identityClass.project_name | b64enc }}\n  OS_USER_DOMAIN_NAME: {{ $identityClass.user_domain_name | b64enc }}\n  OS_USERNAME: {{ $identityClass.username | b64enc }}\n  OS_PASSWORD: {{ $identityClass.password | b64enc }}\n  OS_DEFAULT_DOMAIN: {{ $identityClass.default_domain_id | default \"default\" | b64enc }}\n\n{{- $failoverIdentityClass := index .Values.endpoints.identity.auth $failoverUserClass }}\n{{- if $failoverIdentityClass }}\n{{- if $failoverIdentityClass.auth_url }}\n  OS_AUTH_URL_FAILOVER: {{ $failoverIdentityClass.auth_url | b64enc }}\n{{- else }}\n  OS_AUTH_URL_FAILOVER: {{ tuple \"identity\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n  OS_REGION_NAME_FAILOVER: {{ $failoverIdentityClass.region_name | b64enc }}\n  OS_INTERFACE_FAILOVER: {{ $failoverIdentityClass.interface | default \"internal\" | b64enc }}\n  OS_PROJECT_DOMAIN_NAME_FAILOVER: {{ $failoverIdentityClass.project_domain_name | b64enc }}\n  OS_PROJECT_NAME_FAILOVER: {{ $failoverIdentityClass.project_name | b64enc }}\n  OS_USER_DOMAIN_NAME_FAILOVER: {{ $failoverIdentityClass.user_domain_name | b64enc }}\n  OS_USERNAME_FAILOVER: {{ $failoverIdentityClass.username | b64enc }}\n  OS_PASSWORD_FAILOVER: {{ $failoverIdentityClass.password | b64enc }}\n  OS_DEFAULT_DOMAIN_FAILOVER: {{ $failoverIdentityClass.default_domain_id | default \"default\" | b64enc }}\n{{- end }}\n...\n{{- if .Values.manifests.job_ks_user }}\n{{- $userClass := \"admin\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n{{- $identityClass := index .Values.endpoints.identity.auth $userClass }}\n{{- if $identityClass.auth_url }}\n  OS_AUTH_URL: {{ $identityClass.auth_url | b64enc }}\n{{- else }}\n  OS_AUTH_URL: {{ tuple \"identity\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n  OS_REGION_NAME: {{ $identityClass.region_name | b64enc }}\n  OS_INTERFACE: {{ $identityClass.interface | default \"internal\" | b64enc }}\n  OS_PROJECT_DOMAIN_NAME: {{ $identityClass.project_domain_name | b64enc }}\n  OS_PROJECT_NAME: {{ $identityClass.project_name | b64enc }}\n  OS_USER_DOMAIN_NAME: {{ $identityClass.user_domain_name | b64enc }}\n  OS_USERNAME: {{ $identityClass.username | b64enc }}\n  OS_PASSWORD: {{ $identityClass.password | b64enc }}\n  OS_DEFAULT_DOMAIN: {{ $identityClass.default_domain_id | default \"default\" | b64enc }}\n...\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "mariadb-backup/templates/secrets/_admin_user.cnf.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n[client]\nuser = {{ .Values.endpoints.oslo_db.auth.admin.username }}\npassword = {{ .Values.endpoints.oslo_db.auth.admin.password }}\nhost = {{ tuple \"oslo_db\" \"direct\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nport = {{ tuple \"oslo_db\" \"direct\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- if .Values.manifests.certificates }}\nssl-ca = /etc/mysql/certs/ca.crt\nssl-key = /etc/mysql/certs/tls.key\nssl-cert = /etc/mysql/certs/tls.crt\n{{- end }}\n"
  },
  {
    "path": "mariadb-backup/templates/secrets/_admin_user_internal.cnf.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n[client]\nuser = {{ .Values.endpoints.oslo_db.auth.admin.username }}\npassword = {{ .Values.endpoints.oslo_db.auth.admin.password }}\nhost = {{ tuple \"oslo_db\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\nport = {{ tuple \"oslo_db\" \"internal\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- if .Values.manifests.certificates }}\nssl-ca = /etc/mysql/certs/ca.crt\nssl-key = /etc/mysql/certs/tls.key\nssl-cert = /etc/mysql/certs/tls.crt\n{{- end }}\n"
  },
  {
    "path": "mariadb-backup/templates/secrets-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_etc }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: mariadb-backup-secrets\ntype: Opaque\ndata:\n  admin_user.cnf: {{ tuple \"secrets/_admin_user.cnf.tpl\" . | include \"helm-toolkit.utils.template\"  | b64enc }}\n  admin_user_internal.cnf: {{ tuple \"secrets/_admin_user_internal.cnf.tpl\" . | include \"helm-toolkit.utils.template\"  | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "mariadb-backup/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for mariadb.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nrelease_group: null\n\nimages:\n  tags:\n    mariadb: quay.io/airshipit/mariadb:latest-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_noble\n    mariadb_backup: quay.io/airshipit/porthole-mysqlclient-utility:latest-ubuntu_noble\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n\nlabels:\n  server:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\npod:\n  security_context:\n    server:\n      pod:\n        runAsUser: 999\n      container:\n        perms:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        init:\n          runAsUser: 0\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: false\n        agent:\n          runAsUser: 0\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: false\n        mariadb:\n          runAsUser: 0\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: false\n    mariadb_backup:\n      pod:\n        runAsUser: 65534\n      container:\n        backup_perms:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        verify_perms:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        mariadb_backup:\n          runAsUser: 65534\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    tests:\n      pod:\n        runAsUser: 999\n      container:\n        test:\n          runAsUser: 999\n          readOnlyRootFilesystem: true\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    mariadb:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n        effect: NoSchedule\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n        effect: NoSchedule\n  replicas:\n    server: 3\n    prometheus_mysql_exporter: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    termination_grace_period:\n      prometheus_mysql_exporter:\n        timeout: 30\n      error_pages:\n        timeout: 10\n    disruption_budget:\n      mariadb:\n        min_available: 0\n  resources:\n    enabled: false\n    server:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      tests:\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      mariadb_backup:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - mariadb-server-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    mariadb_server_ks_user:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    mariadb_backup:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    tests:\n      services:\n        - endpoint: internal\n          service: oslo_db\n\nvolume:\n  backup:\n    enabled: true\n    class_name: general\n    size: 5Gi\n\njobs:\n  mariadb_backup:\n    # activeDeadlineSeconds == 0 means no deadline\n    activeDeadlineSeconds: 0\n    backoffLimit: 6\n    cron: \"0 0 * * *\"\n    history:\n      success: 3\n      failed: 1\n  ks_user:\n    # activeDeadlineSeconds == 0 means no deadline\n    activeDeadlineSeconds: 0\n    backoffLimit: 6\n\nconf:\n  mariadb_server:\n    setup_wait:\n      iteration: 30\n      duration: 5\n  database:\n    my: |\n      [mysqld]\n      datadir=/var/lib/mysql\n      basedir=/usr\n      ignore-db-dirs=lost+found\n\n      [client-server]\n      !includedir /etc/mysql/conf.d/\n  backup:\n    enabled: false\n    base_path: /var/backup\n    validateData:\n      ageOffset: 120\n    mysqldump_options: >\n      --single-transaction --quick --add-drop-database\n      --add-drop-table --add-locks --databases\n    days_to_keep: 3\n    remote_backup:\n      enabled: false\n      container_name: mariadb\n      days_to_keep: 14\n      storage_policy: default-placement\n      number_of_retries: 5\n      delay_range:\n        min: 30\n        max: 60\n      throttle_backups:\n        enabled: false\n        sessions_limit: 480\n        lock_expire_after: 7200\n        retry_after: 3600\n        container_name: throttle-backups-manager\n      primary_user_class: mariadb-server\n      failover_user_class: mariadb-server_failover\n\nsecrets:\n  identity:\n    admin: keystone-admin-user\n    mariadb-server: mariadb-backup-user\n  mariadb:\n    backup_restore: mariadb-backup-restore\n  oci_image_registry:\n    mariadb: mariadb-oci-image-registry-key\n  tls:\n    oslo_db:\n      server:\n        public: mariadb-tls-server\n        internal: mariadb-tls-direct\n\n# typically overridden by environmental\n# values, but should include all endpoints\n# required by this chart\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      mariadb:\n        username: mariadb\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  oslo_db:\n    namespace: null\n    auth:\n      admin:\n        username: root\n        password: password\n      sst:\n        username: sst\n        password: password\n      audit:\n        username: audit\n        password: password\n      exporter:\n        username: exporter\n        password: password\n    hosts:\n      default: mariadb-server-primary\n      direct: mariadb-server-internal\n      discovery: mariadb-discovery\n      server: mariadb-server\n    host_fqdn_override:\n      default: null\n    path: null\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n      wsrep:\n        default: 4567\n  identity:\n    name: backup-storage-auth\n    namespace: openstack\n    auth:\n      admin:\n        # Auth URL of null indicates local authentication\n        # HTK will form the URL unless specified here\n        auth_url: null\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      mariadb:\n        # Auth URL of null indicates local authentication\n        # HTK will form the URL unless specified here\n        auth_url: null\n        role: admin\n        region_name: RegionOne\n        username: mariadb-backup-user\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 80\n        internal: 5000\n\nnetwork_policy:\n  mariadb:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nmanifests:\n  certificates: false\n  configmap_bin: true\n  configmap_etc: true\n  job_ks_user: false\n  cron_job_mariadb_backup: true\n  pvc_backup: true\n  network_policy: false\n  pod_test: true\n  secret_dbadmin_password: true\n  secret_sst_password: true\n  secret_dbaudit_password: true\n  secret_backup_restore: true\n  secret_etc: true\n\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "mariadb-cluster/.helmignore",
    "content": "# Patterns to ignore when building packages.\n# This supports shell glob matching, relative path matching, and\n# negation (prefixed with !). Only one pattern per line.\n.DS_Store\n# Common VCS dirs\n.git/\n.gitignore\n.bzr/\n.bzrignore\n.hg/\n.hgignore\n.svn/\n# Common backup files\n*.swp\n*.bak\n*.tmp\n*~\n# Various IDEs\n.project\n.idea/\n*.tmproj\n"
  },
  {
    "path": "mariadb-cluster/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v10.6.14\ndescription: OpenStack-Helm MariaDB controlled by mariadb-operator\nname: mariadb-cluster\nversion: 2025.2.0\nhome: https://mariadb.com/kb/en/\nicon: http://badges.mariadb.org/mariadb-badge-180x60.png\nsources:\n  - https://github.com/MariaDB/server\n  - https://github.com/mariadb-operator/mariadb-operator\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "mariadb-cluster/README.rst",
    "content": "openstack-helm/mariadb\n======================\n\nBy default, this chart creates a 3-member mariadb galera cluster.\n\nThis chart depends on mariadb-operator chart.\n\nThe StatefulSets all leverage PVCs to provide stateful storage to\n``/var/lib/mysql``.\n\nYou must ensure that your control nodes that should receive mariadb\ninstances are labeled with ``openstack-control-plane=enabled``, or\nwhatever you have configured in values.yaml for the label\nconfiguration:\n\n::\n\n    kubectl label nodes openstack-control-plane=enabled --all\n"
  },
  {
    "path": "mariadb-cluster/templates/bin/_liveness.sh.tpl",
    "content": "#!/usr/bin/env bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -e\n\nMYSQL=\"mariadb \\\n  --defaults-file=/etc/mysql/admin_user.cnf \\\n  --host=localhost \\\n{{- if .Values.manifests.certificates }}\n  --ssl-verify-server-cert=false \\\n  --ssl-ca=/etc/mysql/certs/ca.crt \\\n  --ssl-key=/etc/mysql/certs/tls.key \\\n  --ssl-cert=/etc/mysql/certs/tls.crt \\\n{{- end }}\n  --connect-timeout 2\"\n\nmysql_status_query () {\n  STATUS=$1\n  $MYSQL -e \"show status like \\\"${STATUS}\\\"\" | \\\n    awk \"/${STATUS}/ { print \\$NF; exit }\"\n}\n\n{{- if eq (int .Values.pod.replicas.server) 1 }}\nif ! $MYSQL -e 'select 1' > /dev/null 2>&1 ; then\n  exit 1\nfi\n\n{{- else }}\n# if [ -f /var/lib/mysql/sst_in_progress ]; then\n#   # SST in progress, with this node receiving a snapshot.\n#   # MariaDB won't be up yet; avoid killing.\n#   exit 0\n# fi\n\nif [ \"x$(mysql_status_query wsrep_ready)\" != \"xON\" ]; then\n  # WSREP says the node can receive queries\n  exit 1\nfi\n\nif [ \"x$(mysql_status_query wsrep_connected)\" != \"xON\" ]; then\n  # WSREP connected\n  exit 1\nfi\n\nif [ \"x$(mysql_status_query wsrep_cluster_status)\" != \"xPrimary\" ]; then\n  # Not in primary cluster\n  exit 1\nfi\n\nwsrep_local_state_comment=$(mysql_status_query wsrep_local_state_comment)\nif [ \"x${wsrep_local_state_comment}\" != \"xSynced\" ] && [ \"x${wsrep_local_state_comment}\" != \"xDonor/Desynced\" ]; then\n  # WSREP not synced or not sending SST\n  exit 1\nfi\n{{- end }}\n"
  },
  {
    "path": "mariadb-cluster/templates/bin/_readiness.sh.tpl",
    "content": "#!/usr/bin/env bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -e\n\nMYSQL=\"mariadb \\\n  --defaults-file=/etc/mysql/admin_user.cnf \\\n  --host=localhost \\\n{{- if .Values.manifests.certificates }}\n  --ssl-verify-server-cert=false \\\n  --ssl-ca=/etc/mysql/certs/ca.crt \\\n  --ssl-key=/etc/mysql/certs/tls.key \\\n  --ssl-cert=/etc/mysql/certs/tls.crt \\\n{{- end }}\n  --connect-timeout 2\"\n\nmysql_status_query () {\n  STATUS=$1\n  $MYSQL -e \"show status like \\\"${STATUS}\\\"\" | \\\n    awk \"/${STATUS}/ { print \\$NF; exit }\"\n}\n\nif ! $MYSQL -e 'select 1' > /dev/null 2>&1 ; then\n  exit 1\nfi\n\n{{- if gt (int .Values.pod.replicas.server) 1 }}\nif [ \"x$(mysql_status_query wsrep_ready)\" != \"xON\" ]; then\n  # WSREP says the node can receive queries\n  exit 1\nfi\n\nif [ \"x$(mysql_status_query wsrep_connected)\" != \"xON\" ]; then\n  # WSREP connected\n  exit 1\nfi\n\nif [ \"x$(mysql_status_query wsrep_cluster_status)\" != \"xPrimary\" ]; then\n  # Not in primary cluster\n  exit 1\nfi\n\nif [ \"x$(mysql_status_query wsrep_local_state_comment)\" != \"xSynced\" ]; then\n  # WSREP not synced\n  exit 1\nfi\n{{- end }}\n"
  },
  {
    "path": "mariadb-cluster/templates/bin/_test.sh.tpl",
    "content": "#!/bin/bash\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nrm -f /tmp/test-success\n\nmysqlslap \\\n  --defaults-file=/etc/mysql/test-params.cnf \\\n  {{ include \"helm-toolkit.utils.joinListWithSpace\" $.Values.conf.tests.params }} -vv \\\n  --post-system=\"touch /tmp/test-success\"\n\nif ! [ -f /tmp/test-success ]; then\n  exit 1\nfi\n"
  },
  {
    "path": "mariadb-cluster/templates/certificates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.certificates -}}\n{{ dict \"envAll\" . \"service\" \"oslo_db\" \"type\" \"default\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end -}}\n"
  },
  {
    "path": "mariadb-cluster/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n{{ if eq .Values.endpoints.oslo_db.auth.admin.username .Values.endpoints.oslo_db.auth.sst.username }}\n{{ fail \"the DB admin username should not match the sst user username\" }}\n{{ end }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: mariadb-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" ( index $envAll.Values.conf.database \"init_script\" ) \"key\" \"init.sh\" ) | indent 2 }}\n  readiness.sh: |\n{{ tuple \"bin/_readiness.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  liveness.sh: |\n{{ tuple \"bin/_liveness.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  test.sh: |\n{{ tuple \"bin/_test.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- if .Values.manifests.job_ks_user }}\n  ks-user.sh: |\n{{ include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "mariadb-cluster/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\" );\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: mariadb-etc\ndata:\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" ( index $envAll.Values.conf.database \"my\" ) \"key\" \"my.cnf\" ) | indent 2 }}\n{{- end }}\n"
  },
  {
    "path": "mariadb-cluster/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "mariadb-cluster/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $serviceName := tuple \"oslo_db\" \"server\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" $serviceName -}}\n{{- if .Values.pod.tolerations.mariadb.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "mariadb-cluster/templates/job-refresh-statefulset.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.mariadb }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"mariadb-cluster-refresh-statefulset\" }}\n{{ tuple $envAll \"mariadb_cluster_refresh_statefulset\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\n  namespace: {{ $envAll.Release.Namespace }}\nrules:\n  - apiGroups:\n      - \"\"\n      - extensions\n      - batch\n      - apps\n    resources:\n      - statefulsets\n    verbs:\n      - get\n      - list\n      - delete\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\n  namespace: {{ $envAll.Release.Namespace }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: mariadb-cluster-refresh-statefulset\n  labels:\n{{ tuple $envAll \"mariadb-cluster\" \"refresh-statefulset\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": \"post-upgrade\"\n    \"helm.sh/hook-weight\": \"5\"\n    \"helm.sh/hook-delete-policy\": \"before-hook-creation\"\nspec:\n  backoffLimit: {{ .Values.jobs.mariadb_cluster_refresh_statefulset.backoffLimit }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"mariadb-cluster\" \"refresh-statefulset\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"\" \"containerNames\" (list \"init\" \"exporter-create-sql-user\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      shareProcessNamespace: true\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"job\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      activeDeadlineSeconds: {{ .Values.jobs.mariadb_cluster_refresh_statefulset.activeDeadlineSeconds }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"mariadb_cluster_refresh_statefulset\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: refresh-statefulset\n{{ tuple $envAll \"mariadb_cluster_refresh_statefulset\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"mariadb_cluster_refresh_statefulset\" \"container\" \"main\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.mariadb_cluster_refresh_statefulset | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command: [\"/bin/sh\", \"-c\"]\n          args: [\"kubectl delete statefulset ${STATEFULSET_NAME} --namespace=${NAMESPACE}\"]\n          env:\n            - name: STATEFULSET_NAME\n              value: {{ tuple \"oslo_db\" \"server\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n{{- end }}\n"
  },
  {
    "path": "mariadb-cluster/templates/mariadb.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"mariadbReadinessProbe\" }}\nexec:\n  command:\n    - /tmp/readiness.sh\n{{- end }}\n{{- define \"mariadbLivenessProbe\" }}\nexec:\n  command:\n    - /tmp/liveness.sh\n{{- end }}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if .Values.manifests.mariadb }}\n{{- $envAll := . }}\n\n---\napiVersion: mariadb.mmontes.io/v1alpha1\nkind: MariaDB\nmetadata:\n  # NOTE(portdirect): the statefulset name must match the POD_NAME_PREFIX env var for discovery to work\n  name: {{ tuple \"oslo_db\" \"server\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n    mariadb-dbadmin-password-hash: {{ tuple \"secret-dbadmin-password.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n  labels:\n{{ tuple $envAll \"mariadb\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  rootPasswordSecretKeyRef:\n    name: mariadb-dbadmin-password\n    key: MYSQL_DBADMIN_PASSWORD\n\n{{ tuple $envAll \"mariadb\" | include \"helm-toolkit.snippets.image\" | indent 2 }}\n\n  initContainers:\n    - command:\n      - /tmp/init.sh\n{{ tuple $envAll \"mariadb\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"server\" \"container\" \"perms\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.server | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n\n{{ if $envAll.Values.conf.galera.enabled }}\n  galera:\n    enabled: true\n    primary:\n      podIndex: {{ .Values.conf.galera.primary.podIndex }}\n      automaticFailover: {{ .Values.conf.galera.primary.automaticFailover }}\n    sst: {{ .Values.conf.galera.sst }}\n    replicaThreads: {{ .Values.conf.galera.replicaThreads }}\n    agent:\n{{ tuple $envAll \"agent\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{- dict \"envAll\" $envAll \"application\" \"server\" \"container\" \"agent\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      args:\n        - '--graceful-shutdown-timeout=5s'\n        - '--recovery-timeout=5m0s'\n        - '-log-dev'\n        - '-log-level=debug'\n      port: {{ .Values.conf.galera.agent.port }}\n      {{- if $envAll.Values.conf.galera.agent.kubernetesAuth.enabled }}\n      kubernetesAuth:\n        enabled: true\n      {{- end }}\n      gracefulShutdownTimeout: {{ .Values.conf.galera.agent.gracefulShutdownTimeout }}\n    {{- if $envAll.Values.conf.galera.recovery.enabled }}\n    recovery:\n      enabled: true\n      clusterHealthyTimeout: {{ .Values.conf.galera.recovery.clusterHealthyTimeout }}\n      clusterBootstrapTimeout: {{ .Values.conf.galera.recovery.clusterBootstrapTimeout }}\n      podRecoveryTimeout: {{ .Values.conf.galera.recovery.podRecoveryTimeout }}\n      podSyncTimeout: {{ .Values.conf.galera.recovery.podSyncTimeout }}\n    {{- end }}\n    initContainer:\n{{ tuple $envAll \"initContainer\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{- dict \"envAll\" $envAll \"application\" \"server\" \"container\" \"init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      args:\n        - '-log-dev'\n        - '-log-level=debug'\n    # galera volume templates\n    volumeClaimTemplate:\n      resources:\n        requests:\n          storage: {{ .Values.volume.galera.size }}\n      accessModes:\n        - ReadWriteOnce\n      storageClassName: {{ .Values.volume.galera.class_name }}\n{{ end }}\n\n{{ include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" ( index $envAll.Values.conf.database \"galera\" ) \"key\" \"myCnf\" ) | indent 2 }}\n\n  replicas: {{ .Values.pod.replicas.server }}\n\n  affinity:\n{{- tuple $envAll \"mariadb\" \"server\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 4 }}\n\n{{ if $envAll.Values.pod.tolerations.mariadb.enabled }}\n{{- tuple $envAll \"mariadb\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 2 }}\n{{- end }}\n\n  updateStrategy:\n    type: {{ .Values.pod.lifecycle.upgrades.deployments.pod_replacement_strategy }}\n\n{{ tuple $envAll $envAll.Values.pod.resources.server | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 2 }}\n{{ dict \"envAll\" $envAll \"application\" \"server\" \"container\" \"mariadb\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 2 }}\n\n  nodeSelector:\n    {{ .Values.labels.server.node_selector_key }}: {{ .Values.labels.server.node_selector_value }}\n\n  podAnnotations:\n{{- dict \"envAll\" $envAll \"podName\" \"mariadb-server\" \"containerNames\" (list \"init-0\" \"init\" \"agent\" \"mariadb\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\n\n  podDisruptionBudget:\n    minAvailable: {{ .Values.pod.lifecycle.disruption_budget.mariadb.min_available }}\n\n{{ dict \"envAll\" . \"component\" \"server\" \"container\" \"mariadb\" \"type\" \"readiness\" \"probeTemplate\" (include \"mariadbReadinessProbe\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 2 }}\n\n{{ dict \"envAll\" . \"component\" \"server\" \"container\" \"mariadb\" \"type\" \"liveness\" \"probeTemplate\" (include \"mariadbLivenessProbe\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 2 }}\n\n{{ if  .Values.monitoring.prometheus.enabled }}\n  metrics:\n    exporter:\n{{ tuple $envAll \"prometheus_mysql_exporter\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"prometheus_mysql_exporter\" \"container\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.prometheus_mysql_exporter | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n      port: {{ tuple \"prometheus_mysql_exporter\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- if $envAll.Values.manifests.certificates }}\n      volumeMounts:\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 8 }}\n{{- end }}\n    serviceMonitor:\n      prometheusRelease: prometheus-mysql-exporter\n      interval: 10s\n      scrapeTimeout: 10s\n{{ end }}\n\n  env:\n    - name: MYSQL_HISTFILE\n      value: {{ .Values.conf.database.mysql_histfile }}\n{{ if  .Values.conf.database.auto_upgrade.enabled }}\n    - name: MARIADB_AUTO_UPGRADE\n      value: {{ .Values.conf.database.auto_upgrade.enabled | quote }}\n    - name: MARIADB_DISABLE_UPGRADE_BACKUP\n      value: {{ .Values.conf.database.auto_upgrade.disable_upgrade_backup | quote }}\n{{ end }}\n\n  volumeMounts:\n    - name: pod-tmp\n      mountPath: /tmp\n    - name: mariadb-secrets\n      mountPath: /etc/mysql/admin_user.cnf\n      subPath: admin_user.cnf\n      readOnly: true\n    - name: mariadb-secrets\n      mountPath: /docker-entrypoint-initdb.d/privileges.sql\n      subPath: privileges.sql\n      readOnly: true\n    - name: mariadb-bin\n      mountPath: /tmp/init.sh\n      subPath: init.sh\n    - name: mariadb-bin\n      mountPath: /tmp/readiness.sh\n      subPath: readiness.sh\n      readOnly: true\n    - name: mariadb-bin\n      mountPath: /tmp/liveness.sh\n      subPath: liveness.sh\n      readOnly: true\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 4 }}\n\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: mariadb-bin\n      configMap:\n        name: mariadb-bin\n        defaultMode: 0555\n    - name: mariadb-etc\n      configMap:\n        name: mariadb-etc\n        defaultMode: 0444\n    - name: mariadb-secrets\n      secret:\n        secretName: mariadb-secrets\n        defaultMode: 0444\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 4 }}\n\n  # storage volume templates\n  volumeClaimTemplate:\n    resources:\n      requests:\n        storage: {{ .Values.volume.size }}\n    accessModes:\n      - ReadWriteOnce\n    {{- if ne .Values.volume.class_name \"default\" }}\n    storageClassName: {{ .Values.volume.class_name }}\n    {{- end }}\n\n{{- end }}\n"
  },
  {
    "path": "mariadb-cluster/templates/network_policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"mariadb\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "mariadb-cluster/templates/pod-test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if .Values.manifests.pod_test }}\n{{- $envAll := . }}\n{{- $dependencies := .Values.dependencies.static.tests }}\n\n{{- $serviceAccountName := print .deployment_name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: \"{{.deployment_name}}-test\"\n  labels:\n{{ tuple $envAll \"mariadb\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"mariadb-test\" \"containerNames\" (list \"init\" \"mariadb-test\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n  shareProcessNamespace: true\n  serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"tests\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 2 }}\n{{ if $envAll.Values.pod.tolerations.mariadb.enabled }}\n{{ tuple $envAll \"mariadb\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 2 }}\n{{ end }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n  restartPolicy: Never\n  initContainers:\n{{ tuple $envAll \"tests\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n  containers:\n    - name: mariadb-test\n{{ dict \"envAll\" $envAll \"application\" \"tests\" \"container\" \"test\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n{{ tuple $envAll \"scripted_test\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n      command:\n        - /tmp/test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: mariadb-bin\n          mountPath: /tmp/test.sh\n          subPath: test.sh\n          readOnly: true\n        - name: mariadb-secrets\n          mountPath: /etc/mysql/test-params.cnf\n          {{ if eq $envAll.Values.conf.tests.endpoint \"internal\" }}\n          subPath: admin_user_internal.cnf\n          {{ else if eq $envAll.Values.conf.tests.endpoint \"direct\" }}\n          subPath: admin_user.cnf\n          {{ else }}\n          {{ fail \"Either 'direct' or 'internal' should be specified for .Values.conf.tests.endpoint\" }}\n          {{ end }}\n          readOnly: true\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 8 }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: mariadb-bin\n      configMap:\n        name: mariadb-bin\n        defaultMode: 0555\n    - name: mariadb-secrets\n      secret:\n        secretName: mariadb-secrets\n        defaultMode: 0444\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "mariadb-cluster/templates/secret-dbadmin-password.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_dbadmin_password }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: mariadb-dbadmin-password\ntype: Opaque\ndata:\n  MYSQL_DBADMIN_PASSWORD: {{ .Values.endpoints.oslo_db.auth.admin.password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "mariadb-cluster/templates/secret-dbaudit-password.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_dbaudit_password }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: mariadb-dbaudit-password\ntype: Opaque\ndata:\n  MYSQL_DBAUDIT_PASSWORD: {{ .Values.endpoints.oslo_db.auth.audit.password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "mariadb-cluster/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "mariadb-cluster/templates/secret-sst-password.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_sst_password }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: mariadb-dbsst-password\ntype: Opaque\ndata:\n  MYSQL_DBSST_PASSWORD: {{ .Values.endpoints.oslo_db.auth.sst.password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "mariadb-cluster/templates/secrets/_admin_user.cnf.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n[client]\nuser = {{ .Values.endpoints.oslo_db.auth.admin.username }}\npassword = {{ .Values.endpoints.oslo_db.auth.admin.password }}\nhost = {{ tuple \"oslo_db\" \"direct\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nport = {{ tuple \"oslo_db\" \"direct\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- if .Values.manifests.certificates }}\nssl-ca = /etc/mysql/certs/ca.crt\nssl-key = /etc/mysql/certs/tls.key\nssl-cert = /etc/mysql/certs/tls.crt\n{{- end }}\n"
  },
  {
    "path": "mariadb-cluster/templates/secrets/_admin_user_internal.cnf.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n[client]\nuser = {{ .Values.endpoints.oslo_db.auth.admin.username }}\npassword = {{ .Values.endpoints.oslo_db.auth.admin.password }}\nhost = {{ tuple \"oslo_db\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\nport = {{ tuple \"oslo_db\" \"internal\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- if .Values.manifests.certificates }}\nssl-ca = /etc/mysql/certs/ca.crt\nssl-key = /etc/mysql/certs/tls.key\nssl-cert = /etc/mysql/certs/tls.crt\n{{- end }}\n"
  },
  {
    "path": "mariadb-cluster/templates/secrets/_privileges.sql.tpl",
    "content": "###########################################\n# The lines not confirmed to be working with operator are disabled\n###########################################\n# DELETE FROM mysql.user WHERE user != 'mariadb.sys';\n# CREATE OR REPLACE USER '{{ .Values.endpoints.oslo_db.auth.admin.username }}'@'%' IDENTIFIED BY '{{ .Values.endpoints.oslo_db.auth.admin.password }}';\n{{- if .Values.manifests.certificates }}\nGRANT ALL ON *.* TO '{{ .Values.endpoints.oslo_db.auth.admin.username }}'@'%' REQUIRE X509 WITH GRANT OPTION;\n{{- else }}\nGRANT ALL ON *.* TO '{{ .Values.endpoints.oslo_db.auth.admin.username }}'@'%' WITH GRANT OPTION;\n{{- end }}\nDROP DATABASE IF EXISTS test ;\n# CREATE OR REPLACE USER '{{ .Values.endpoints.oslo_db.auth.sst.username }}'@'127.0.0.1' IDENTIFIED BY '{{ .Values.endpoints.oslo_db.auth.sst.password }}';\n# GRANT PROCESS, RELOAD, LOCK TABLES, REPLICATION CLIENT ON *.* TO '{{ .Values.endpoints.oslo_db.auth.sst.username }}'@'127.0.0.1' ;\nCREATE OR REPLACE USER '{{ .Values.endpoints.oslo_db.auth.audit.username }}'@'%' IDENTIFIED BY '{{ .Values.endpoints.oslo_db.auth.audit.password }}';\n{{- if .Values.manifests.certificates }}\nGRANT SELECT ON *.* TO '{{ .Values.endpoints.oslo_db.auth.audit.username }}'@'%' REQUIRE X509;\n{{- else }}\nGRANT SELECT ON *.* TO '{{ .Values.endpoints.oslo_db.auth.audit.username }}'@'%' ;\n{{- end }}\nFLUSH PRIVILEGES ;\n"
  },
  {
    "path": "mariadb-cluster/templates/secrets-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_etc }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: mariadb-secrets\ntype: Opaque\ndata:\n  admin_user.cnf: {{ tuple \"secrets/_admin_user.cnf.tpl\" . | include \"helm-toolkit.utils.template\"  | b64enc }}\n  admin_user_internal.cnf: {{ tuple \"secrets/_admin_user_internal.cnf.tpl\" . | include \"helm-toolkit.utils.template\"  | b64enc }}\n  privileges.sql: {{ tuple \"secrets/_privileges.sql.tpl\" . | include \"helm-toolkit.utils.template\"  | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "mariadb-cluster/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for mariadb.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nrelease_group: null\n\nimages:\n  tags:\n    agent: ghcr.io/mariadb-operator/agent:v0.0.3\n    initContainer: ghcr.io/mariadb-operator/init:v0.0.6\n    mariadb: quay.io/airshipit/mariadb:latest-ubuntu_noble\n    prometheus_mysql_exporter: docker.io/prom/mysqld-exporter:v0.12.1\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_noble\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n    scripted_test: quay.io/airshipit/mariadb:latest-ubuntu_noble\n    mariadb_cluster_refresh_statefulset: quay.io/airshipit/porthole-mysqlclient-utility:latest-ubuntu_noble\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  server:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\npod:\n  probes:\n    server:\n      mariadb:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            periodSeconds: 30\n            timeoutSeconds: 15\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 120\n            periodSeconds: 30\n            timeoutSeconds: 15\n  security_context:\n    server:\n      pod:\n        runAsUser: 0\n      container:\n        init-0:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        init:\n          runAsUser: 0\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: false\n        agent:\n          runAsUser: 0\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: false\n        mariadb:\n          runAsUser: 0\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: false\n    mariadb_cluster_refresh_statefulset:\n      pod:\n        runAsUser: 0\n      container:\n        main:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    tests:\n      pod:\n        runAsUser: 999\n      container:\n        test:\n          runAsUser: 999\n          readOnlyRootFilesystem: true\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    mariadb:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n        effect: NoSchedule\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n        effect: NoSchedule\n  replicas:\n    server: 3\n    prometheus_mysql_exporter: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    termination_grace_period:\n      prometheus_mysql_exporter:\n        timeout: 30\n    disruption_budget:\n      mariadb:\n        min_available: 0\n  resources:\n    enabled: false\n    server:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      tests:\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n      mariadb_cluster_refresh_statefulset:\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - mariadb-server-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    tests:\n      services:\n        - endpoint: internal\n          service: oslo_db\n\nvolume:\n  enabled: true\n  class_name: general\n  size: 5Gi\n  backup:\n    enabled: true\n    class_name: general\n    size: 5Gi\n  galera:\n    enabled: true\n    class_name: general\n    size: 300Mi\n\njobs:\n  mariadb_cluster_refresh_statefulset:\n    backoffLimit: 87600\n    activeDeadlineSeconds: 3600\n\nconf:\n  galera:\n    enabled: true\n    primary:\n      podIndex: 0\n      automaticFailover: true\n    sst: mariabackup\n    replicaThreads: 1\n    agent:\n      port: 5555\n      kubernetesAuth:\n        enabled: true\n      gracefulShutdownTimeout: 5s\n    recovery:\n      enabled: true\n      clusterHealthyTimeout: 3m\n      clusterBootstrapTimeout: 10m\n      podRecoveryTimeout: 5m\n      podSyncTimeout: 5m\n  tests:\n    # This may either be:\n    # * internal: which will hit the endpoint exposed by the ingress controller\n    # * direct: which will hit the backends directly via a k8s service ip\n    # Note, deadlocks and failure are to be expected with concurrency if\n    # hitting the `direct` endpoint.\n    endpoint: internal\n    # This is a list of tuning params passed to mysqlslap:\n    params:\n      - --auto-generate-sql\n      - --concurrency=100\n      - --number-of-queries=1000\n      - --number-char-cols=1\n      - --number-int-cols=1\n  mariadb_server:\n    setup_wait:\n      iteration: 30\n      duration: 5\n  database:\n    auto_upgrade:\n      enabled: true\n      disable_upgrade_backup: false\n    mysql_histfile: \"/dev/null\"\n    init_script: |\n      #!/usr/bin/env bash\n\n      {{/*\n      Licensed under the Apache License, Version 2.0 (the \"License\");\n      you may not use this file except in compliance with the License.\n      You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n      Unless required by applicable law or agreed to in writing, software\n      distributed under the License is distributed on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n      See the License for the specific language governing permissions and\n      limitations under the License.\n      */}}\n\n      set -x\n\n      chown -R \"mysql:mysql\" /var/lib/mysql;\n      chmod 771 /var/lib/mysql;\n    galera: |\n      [mariadb]\n      bind-address=0.0.0.0\n      default_storage_engine=InnoDB\n      binlog_format=row\n      innodb_autoinc_lock_mode=2\n      max_allowed_packet=256M\n      ########################\n      #\n      ########################\n      ignore-db-dirs=lost+found\n\n      # Charset\n      character_set_server=utf8\n      collation_server=utf8_general_ci\n      skip-character-set-client-handshake\n\n      # Logging\n      slow_query_log=off\n      slow_query_log_file=/var/log/mysql/mariadb-slow.log\n      log_warnings=2\n\n      # General logging has huge performance penalty therefore is disabled by default\n      general_log=off\n      general_log_file=/var/log/mysql/mariadb-error.log\n\n      long_query_time=3\n      log_queries_not_using_indexes=on\n\n      # Networking\n      bind_address=0.0.0.0\n      port={{ tuple \"oslo_db\" \"direct\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n\n      # When a client connects, the server will perform hostname resolution,\n      # and when DNS is slow, establishing the connection will become slow as well.\n      # It is therefore recommended to start the server with skip-name-resolve to\n      # disable all DNS lookups. The only limitation is that the GRANT statements\n      # must then use IP addresses only.\n      skip_name_resolve\n\n      # Tuning\n      user=mysql\n      max_allowed_packet=256M\n      open_files_limit=10240\n      max_connections=8192\n      max-connect-errors=1000000\n\n      # General security settings\n      # Reference: https://dev.mysql.com/doc/mysql-security-excerpt/8.0/en/general-security-issues.html\n      # secure_file_priv is set to '/home' because it is read-only, which will\n      # disable this feature completely.\n      secure_file_priv=/home\n      local_infile=0\n      symbolic_links=0\n      sql_mode=\"STRICT_ALL_TABLES,STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION\"\n\n\n      ## Generally, it is unwise to set the query cache to be larger than 64-128M\n      ## as the costs associated with maintaining the cache outweigh the performance\n      ## gains.\n      ## The query cache is a well known bottleneck that can be seen even when\n      ## concurrency is moderate. The best option is to disable it from day 1\n      ## by setting query_cache_size=0 (now the default on MySQL 5.6)\n      ## and to use other ways to speed up read queries: good indexing, adding\n      ## replicas to spread the read load or using an external cache.\n      query_cache_size=0\n      query_cache_type=0\n\n      sync_binlog=0\n      thread_cache_size=16\n      table_open_cache=2048\n      table_definition_cache=1024\n\n      #\n      # InnoDB\n      #\n      # The buffer pool is where data and indexes are cached: having it as large as possible\n      # will ensure you use memory and not disks for most read operations.\n      # Typical values are 50..75% of available RAM.\n      # TODO(tomasz.paszkowski): This needs to by dynamic based on available RAM.\n      innodb_buffer_pool_size=1024M\n      innodb_doublewrite=0\n      innodb_file_per_table=1\n      innodb_flush_method=O_DIRECT\n      innodb_io_capacity=500\n      innodb_log_file_size=128M\n      innodb_old_blocks_time=1000\n      innodb_read_io_threads=8\n      innodb_write_io_threads=8\n\n      {{ if .Values.manifests.certificates }}\n      # TLS\n      ssl_ca=/etc/mysql/certs/ca.crt\n      ssl_key=/etc/mysql/certs/tls.key\n      ssl_cert=/etc/mysql/certs/tls.crt\n      # tls_version = TLSv1.2,TLSv1.3\n      {{ end }}\n\n\n      [mysqldump]\n      max-allowed-packet=16M\n\n      [client]\n      default_character_set=utf8\n      {{ if .Values.manifests.certificates }}\n      # TLS\n      ssl_ca=/etc/mysql/certs/ca.crt\n      ssl_key=/etc/mysql/certs/tls.key\n      ssl_cert=/etc/mysql/certs/tls.crt\n      # tls_version = TLSv1.2,TLSv1.3\n      {{ end }}\n\n    my: |\n      [mysqld]\n      datadir=/var/lib/mysql\n      basedir=/usr\n      ignore-db-dirs=lost+found\n\n      [client-server]\n      !includedir /etc/mysql/conf.d/\n\n    config_override: null\n    # Any configuration here will override the base config.\n    # config_override: |-\n    #   [mysqld]\n    #   wsrep_slave_threads=1\n\nmonitoring:\n  prometheus:\n    enabled: false\n    mysqld_exporter:\n      scrape: true\n\nsecrets:\n  identity:\n    admin: keystone-admin-user\n  oci_image_registry:\n    mariadb: mariadb-oci-image-registry-key\n  tls:\n    oslo_db:\n      server:\n        public: mariadb-tls-server\n        internal: mariadb-tls-direct\n\n# typically overridden by environmental\n# values, but should include all endpoints\n# required by this chart\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      mariadb:\n        username: mariadb\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  monitoring:\n    name: prometheus\n    namespace: null\n    hosts:\n      default: prom-metrics\n      public: prometheus\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 9090\n        public: 80\n  prometheus_mysql_exporter:\n    namespace: null\n    hosts:\n      default: mysql-exporter\n    host_fqdn_override:\n      default: null\n    path:\n      default: /metrics\n    scheme:\n      default: 'http'\n    port:\n      metrics:\n        default: 9104\n  oslo_db:\n    namespace: null\n    auth:\n      admin:\n        username: root\n        password: password\n      sst:\n        username: sst\n        password: password\n      audit:\n        username: audit\n        password: password\n      exporter:\n        username: exporter\n        password: password\n    hosts:\n      default: mariadb-server-primary\n      direct: mariadb-server-internal\n      discovery: mariadb-discovery\n      server: mariadb-server\n    host_fqdn_override:\n      default: null\n    path: null\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n      wsrep:\n        default: 4567\n  kube_dns:\n    namespace: kube-system\n    name: kubernetes-dns\n    hosts:\n      default: kube-dns\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: http\n    port:\n      dns_tcp:\n        default: 53\n      dns:\n        default: 53\n        protocol: UDP\n  identity:\n    name: backup-storage-auth\n    namespace: openstack\n    auth:\n      admin:\n        # Auth URL of null indicates local authentication\n        # HTK will form the URL unless specified here\n        auth_url: null\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      mariadb-server:\n        # Auth URL of null indicates local authentication\n        # HTK will form the URL unless specified here\n        auth_url: null\n        role: admin\n        region_name: RegionOne\n        username: mariadb-backup-user\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 80\n        internal: 5000\n\nnetwork_policy:\n  mariadb:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nmanifests:\n  certificates: false\n  configmap_bin: true\n  configmap_etc: true\n  job_image_repo_sync: true\n  network_policy: false\n  pod_test: true\n  secret_dbadmin_password: true\n  secret_sst_password: true\n  secret_dbaudit_password: true\n  secret_etc: true\n  secret_registry: true\n  service_primary: true\n  mariadb: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "masakari/.helmignore",
    "content": "# Patterns to ignore when building packages.\n# This supports shell glob matching, relative path matching, and\n# negation (prefixed with !). Only one pattern per line.\n.DS_Store\n# Common VCS dirs\n.git/\n.gitignore\n.bzr/\n.bzrignore\n.hg/\n.hgignore\n.svn/\n# Common backup files\n*.swp\n*.bak\n*.tmp\n*.orig\n*~\n# Various IDEs\n.project\n.idea/\n*.tmproj\n.vscode/\n"
  },
  {
    "path": "masakari/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Masakari\nname: masakari\nversion: 2025.2.0\nhome: https://docs.openstack.org/developer/masakari\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Masakari/OpenStack_Project_masakari_vertical.png\nsources:\n  - https://opendev.org/openstack/masakari\n  - https://opendev.org/openstack/masakari-monitors\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "masakari/templates/bin/_manage-db.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nexec -ex\n\nmasakari-manage db sync"
  },
  {
    "path": "masakari/templates/bin/_masakari-api.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n   exec masakari-api --config-file /etc/masakari/masakari.conf\n}\n\nfunction stop () {\n   kill -TERM 1\n}\n\n$COMMAND"
  },
  {
    "path": "masakari/templates/bin/_masakari-engine.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n   exec masakari-engine --config-file /etc/masakari/masakari.conf\n}\n\nfunction stop () {\n   kill -TERM 1\n}\n\n$COMMAND"
  },
  {
    "path": "masakari/templates/bin/_masakari-host-monitor.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n   exec masakari-hostmonitor --config-file /etc/masakari/masakarimonitors.conf \\\n        --config-file /tmp/pod-shared/masakarimonitors.conf\n}\n\nfunction stop () {\n   kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "masakari/templates/bin/_masakari-instance-monitor.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n   exec masakari-instancemonitor --config-file /etc/masakari/masakarimonitors.conf \\\n         --config-file /tmp/pod-shared/masakarimonitors.conf\n}\n\nfunction stop () {\n   kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "masakari/templates/bin/_masakari-introspective-instance-monitor.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n   exec masakari-introspectiveinstancemonitor --config-file /etc/masakari/masakarimonitors.conf \\\n        --config-file /tmp/pod-shared/masakarimonitors.conf\n}\n\nfunction stop () {\n   kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "masakari/templates/bin/_masakari-monitors-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nnova_compute_hostname=\"$COMPUTE_NODE_NAME\"\ncat <<EOF>/tmp/pod-shared/masakarimonitors.conf\n[DEFAULT]\nhostname=$nova_compute_hostname\nEOF"
  },
  {
    "path": "masakari/templates/bin/_masakari-process-monitor.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n   exec masakari-processmonitor --config-file /etc/masakari/masakarimonitors.conf \\\n         --config-file /tmp/pod-shared/masakarimonitors.conf\n}\n\nfunction stop () {\n   kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "masakari/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n{{- $rallyTests := .Values.conf.rally_tests }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: masakari-bin\ndata:\n  masakari-engine.sh: |\n{{ tuple \"bin/_masakari-engine.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  masakari-api.sh: |\n{{ tuple \"bin/_masakari-api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  manage-db.sh: |\n{{ tuple \"bin/_manage-db.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" . | indent 4 }}\n  masakari-host-monitor.sh: |\n{{ tuple \"bin/_masakari-host-monitor.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  masakari-process-monitor.sh: |\n{{ tuple \"bin/_masakari-process-monitor.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  masakari-instance-monitor.sh: |\n{{ tuple \"bin/_masakari-instance-monitor.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  masakari-introspective-instance-monitor.sh: |\n{{ tuple \"bin/_masakari-introspective-instance-monitor.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  masakari-monitors-init.sh: |\n{{ tuple \"bin/_masakari-monitors-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "masakari/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"masakari.configmap.etc\" }}\n{{- $configMapName := index . 0 }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n\n{{- if empty .Values.conf.masakari.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.masakari.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.masakari.keystone_authtoken.region_name -}}\n{{- $_ := set .Values.conf.masakari.keystone_authtoken \"region_name\" .Values.endpoints.identity.auth.masakari.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.masakari.keystone_authtoken.project_name -}}\n{{- $_ := set .Values.conf.masakari.keystone_authtoken \"project_name\" .Values.endpoints.identity.auth.masakari.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.masakari.keystone_authtoken.project_domain_name -}}\n{{- $_ := set .Values.conf.masakari.keystone_authtoken \"project_domain_name\" .Values.endpoints.identity.auth.masakari.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.masakari.keystone_authtoken.user_domain_name -}}\n{{- $_ := set .Values.conf.masakari.keystone_authtoken \"user_domain_name\" .Values.endpoints.identity.auth.masakari.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.masakari.keystone_authtoken.username -}}\n{{- $_ := set .Values.conf.masakari.keystone_authtoken \"username\" .Values.endpoints.identity.auth.masakari.username -}}\n{{- end -}}\n{{- if empty .Values.conf.masakari.keystone_authtoken.password -}}\n{{- $_ := set .Values.conf.masakari.keystone_authtoken \"password\" .Values.endpoints.identity.auth.masakari.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.masakari.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.masakari.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n{{- if empty .Values.conf.masakari.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.masakari.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.masakari.database.connection)) (empty .Values.conf.masakari.database.connection) -}}\n{{- $connection := tuple \"oslo_db\" \"internal\" \"masakari\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | set .Values.conf.masakari.database \"connection\" -}}\n{{- else -}}\n{{- $_ := set .Values.conf.masakari.database \"connection\" $connection -}}\n{{- end -}}\n{{- end -}}\n\n{{- if empty .Values.conf.masakari.DEFAULT.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"masakari\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.masakari.DEFAULT \"transport_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.masakari.DEFAULT.os_privileged_user_name -}}\n{{- $_ := set .Values.conf.masakari.DEFAULT \"os_privileged_user_name\" .Values.endpoints.identity.auth.masakari.username }}\n{{- end -}}\n{{- if empty .Values.conf.masakari.DEFAULT.os_privileged_user_password -}}\n{{- $_ := set .Values.conf.masakari.DEFAULT \"os_privileged_user_password\" .Values.endpoints.identity.auth.masakari.password }}\n{{- end -}}\n{{- if empty .Values.conf.masakari.DEFAULT.os_privileged_user_auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.masakari.DEFAULT \"os_privileged_user_auth_url\" }}\n{{- end -}}\n{{- if empty .Values.conf.masakari.DEFAULT.os_privileged_user_tenant -}}\n{{- $_ := set .Values.conf.masakari.DEFAULT \"os_privileged_user_tenant\" .Values.endpoints.identity.auth.masakari.project_name }}\n{{- end -}}\n\n{{- if empty .Values.conf.masakari.DEFAULT.os_region_name -}}\n{{- $_ := set .Values.conf.masakari.DEFAULT \"os_region_name\" .Values.endpoints.identity.auth.masakari.region_name }}\n{{- end -}}\n\n{{- if empty .Values.conf.masakari.DEFAULT.os_user_domain_name -}}\n{{- $_ := set .Values.conf.masakari.DEFAULT \"os_user_domain_name\" .Values.endpoints.identity.auth.masakari.user_domain_name }}\n{{- end -}}\n\n{{- if empty .Values.conf.masakari.DEFAULT.os_project_domain_name -}}\n{{- $_ := set .Values.conf.masakari.DEFAULT \"os_project_domain_name\" .Values.endpoints.identity.auth.masakari.user_domain_name }}\n{{- end -}}\n\n{{- if empty .Values.conf.masakarimonitors.api.region -}}\n{{- $_ := set .Values.conf.masakarimonitors.api \"region\" .Values.endpoints.identity.auth.masakari.region_name -}}\n{{- end -}}\n\n{{- if empty .Values.conf.masakarimonitors.api.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.masakarimonitors.api \"auth_url\" }}\n{{- end -}}\n\n{{- if empty .Values.conf.masakarimonitors.api.project_name -}}\n{{- $_ := set .Values.conf.masakarimonitors.api \"project_name\" .Values.endpoints.identity.auth.masakari.project_name }}\n{{- end -}}\n\n{{- if empty .Values.conf.masakarimonitors.api.project_domain_name -}}\n{{- $_ := set .Values.conf.masakarimonitors.api \"project_domain_name\" .Values.endpoints.identity.auth.masakari.project_name }}\n{{- end -}}\n\n{{- if empty .Values.conf.masakarimonitors.api.username -}}\n{{- $_ := set .Values.conf.masakarimonitors.api \"username\" .Values.endpoints.identity.auth.masakari.username }}\n{{- end -}}\n\n{{- if empty .Values.conf.masakarimonitors.api.user_domain_name -}}\n{{- $_ := set .Values.conf.masakarimonitors.api \"user_domain_name\" .Values.endpoints.identity.auth.masakari.user_domain_name }}\n{{- end -}}\n\n{{- if empty .Values.conf.masakarimonitors.api.password -}}\n{{- $_ := set .Values.conf.masakarimonitors.api \"password\" .Values.endpoints.identity.auth.masakari.password }}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.masakari.taskflow.connection)) (empty .Values.conf.masakari.taskflow.connection) -}}\n{{- $connection := tuple \"oslo_db\" \"internal\" \"masakari\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | set .Values.conf.masakari.database \"connection\" -}}\n{{- else -}}\n{{- $_ := set .Values.conf.masakari.taskflow \"connection\" $connection -}}\n{{- end -}}\n{{- end -}}\n\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $configMapName }}\ntype: Opaque\ndata:\n  masakari.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.masakari | b64enc }}\n  api-paste.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.paste | b64enc }}\n  masakarimonitors.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.masakarimonitors | b64enc }}\n  masakari_sudoers: {{ $envAll.Values.conf.masakari_sudoers | b64enc }}\n{{- end }}\n{{- end }}\n{{- if .Values.manifests.configmap_etc }}\n{{- list \"masakari-etc\" . | include \"masakari.configmap.etc\" }}\n{{- end }}\n"
  },
  {
    "path": "masakari/templates/daemonset-host-monitor.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.host_monitor }}\n{{- $envAll := . }}\n{{- $daemonset := \"masakari-host-monitor\" }}\n\n{{- $mounts_masakari_host_monitor := .Values.pod.mounts.masakari_host_monitor.masakari_host_monitor }}\n{{- $mounts_masakari_host_monitor_init := .Values.pod.mounts.masakari_host_monitor.init_container }}\n\n{{- $serviceAccountName := \"masakari-host-monitor\" }}\n{{- tuple $envAll \"masakari_host_monitor\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: masakari-host-monitor\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll $daemonset | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{- dict \"envAll\" $envAll \"podName\" \"masakari-host-monitor\" \"containerNames\" (list \"masakari-monitor\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"masakari_host_monitor\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"masakari-host-monitor\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      nodeSelector:\n        {{ .Values.labels.monitors.node_selector_key }}: {{ .Values.labels.monitors.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"masakari_host_monitor\" $mounts_masakari_host_monitor_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: hostmonitor-init\n{{ tuple $envAll \"masakari_host_monitor\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.masakari_host_monitor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"masakari\" \"container\" \"masakari_host_monitor\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/masakari-monitors-init.sh\n          env:\n            - name: COMPUTE_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: masakari-bin\n              mountPath: /tmp/masakari-monitors-init.sh\n              subPath: masakari-monitors-init.sh\n              readOnly: true\n      hostNetwork: true\n      containers:\n        - name: masakari-host-monitor\n{{ tuple $envAll \"masakari_host_monitor\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.masakari_host_monitor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"masakari\" \"container\" \"masakari_host_monitor\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/masakari-host-monitor.sh\n            - start\n          env:\n            - name: COMPUTE_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/masakari-host-monitor.sh\n                  - stop\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.masakari.oslo_concurrency.lock_path }}\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: masakari-bin\n              mountPath: /tmp/masakari-host-monitor.sh\n              subPath: masakari-host-monitor.sh\n            - name: masakari-etc\n              mountPath: /etc/masakari/masakarimonitors.conf\n              subPath: masakarimonitors.conf\n            - name: masakari-etc\n              mountPath: /etc/sudoers.d/masakari_sudoers\n              subPath: masakari_sudoers\n            - name: masakarietc\n              mountPath: /etc/masakari\n            - name: varrun\n              mountPath: /var/run\n            - name: run\n              mountPath: /run\n            - name: shm\n              mountPath: /dev/shm\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-shared\n          emptyDir: {}\n        - name: masakarietc\n          emptyDir: {}\n        - name: shm\n          hostPath:\n            path: /dev/shm\n        - name: varrun\n          hostPath:\n            path: /var/run\n        - name: run\n          hostPath:\n            path: /run\n        - name: masakari-bin\n          configMap:\n            name: masakari-bin\n            defaultMode: 0555\n        - name: masakari-etc\n          secret:\n            secretName: masakari-etc\n            defaultMode: 0444\n{{- end }}\n"
  },
  {
    "path": "masakari/templates/daemonset-instance-monitor.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.instance_monitor }}\n{{- $envAll := . }}\n{{- $daemonset := \"masakari-instance-monitor\" }}\n\n{{- $mounts_masakari_instance_monitor := .Values.pod.mounts.masakari_instance_monitor.masakari_instance_monitor }}\n{{- $mounts_masakari_instance_monitor_init := .Values.pod.mounts.masakari_instance_monitor.init_container }}\n\n{{- $serviceAccountName := \"masakari-instance-monitor\" }}\n{{- tuple $envAll \"masakari_instance_monitor\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: masakari-instance-monitor\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll $daemonset | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{- dict \"envAll\" $envAll \"podName\" \"masakari-instance-monitor\" \"containerNames\" (list \"masakari-monitor\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"masakari_instance_monitor\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"masakari-instance-monitor\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      nodeSelector:\n        {{ .Values.labels.monitors.node_selector_key }}: {{ .Values.labels.monitors.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"masakari_instance_monitor\" $mounts_masakari_instance_monitor_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: instancemonitor-init\n{{ tuple $envAll \"masakari_instance_monitor\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.masakari_instance_monitor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"masakari\" \"container\" \"masakari_instance_monitor\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/masakari-monitors-init.sh\n          env:\n            - name: COMPUTE_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: masakari-bin\n              mountPath: /tmp/masakari-monitors-init.sh\n              subPath: masakari-monitors-init.sh\n              readOnly: true\n      containers:\n        - name: masakari-instance-monitor\n{{ tuple $envAll \"masakari_instance_monitor\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.masakari_instance_monitor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"masakari\" \"container\" \"masakari_instance_monitor\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/masakari-instance-monitor.sh\n            - start\n          env:\n            - name: COMPUTE_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/masakari-instance-monitor.sh\n                  - stop\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.masakari.oslo_concurrency.lock_path }}\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: masakari-bin\n              mountPath: /tmp/masakari-instance-monitor.sh\n              subPath: masakari-instance-monitor.sh\n            - name: masakari-etc\n              mountPath: /etc/masakari/masakarimonitors.conf\n              subPath: masakarimonitors.conf\n            - name: masakarietc\n              mountPath: /etc/masakari\n            - name: varrun\n              mountPath: /var/run\n            - name: run\n              mountPath: /run\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-shared\n          emptyDir: {}\n        - name: masakarietc\n          emptyDir: {}\n        - name: varrun\n          hostPath:\n            path: /var/run\n        - name: run\n          hostPath:\n            path: /run\n        - name: masakari-bin\n          configMap:\n            name: masakari-bin\n            defaultMode: 0555\n        - name: masakari-etc\n          secret:\n            secretName: masakari-etc\n            defaultMode: 0444\n{{- end }}\n"
  },
  {
    "path": "masakari/templates/daemonset-introspective-instance-monitor.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.introspective_instance_monitor }}\n{{- $envAll := . }}\n{{- $daemonset := \"masakari-introspective-instance-monitor\" }}\n\n{{- $mounts_masakari_introspective_instance_monitor := .Values.pod.mounts.masakari_introspective_instance_monitor.masakari_introspective_instance_monitor }}\n{{- $mounts_masakari_introspective_instance_monitor_init := .Values.pod.mounts.masakari_introspective_instance_monitor.init_container }}\n\n{{- $serviceAccountName := \"masakari-introspective-instance-monitor\" }}\n{{- tuple $envAll \"masakari_introspective_instance_monitor\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: masakari-introspective-instance-monitor\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll $daemonset | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{- dict \"envAll\" $envAll \"podName\" \"masakari-introspective-instance-monitor\" \"containerNames\" (list \"masakari-monitor\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"masakari_introspective_instance_monitor\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"masakari-introspective-instance-monitor\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      nodeSelector:\n        {{ .Values.labels.monitors.node_selector_key }}: {{ .Values.labels.monitors.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"masakari_introspective_instance_monitor\" $mounts_masakari_introspective_instance_monitor_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: introspective-instance-monitor-init\n{{ tuple $envAll \"masakari_introspective_instance_monitor\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.masakari_introspective_instance_monitor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"masakari\" \"container\" \"masakari_introspective_instance_monitor\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/masakari-monitors-init.sh\n          env:\n            - name: COMPUTE_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: masakari-bin\n              mountPath: /tmp/masakari-monitors-init.sh\n              subPath: masakari-monitors-init.sh\n              readOnly: true\n      hostNetwork: true\n      containers:\n        - name: masakari-introspective-instance-monitor\n{{ tuple $envAll \"masakari_introspective_instance_monitor\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.masakari_introspective_instance_monitor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"masakari\" \"container\" \"masakari_introspective_instance_monitor\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: COMPUTE_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n          command:\n            - /tmp/masakari-introspective-instance-monitor.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/masakari-introspective-instance-monitor.sh\n                  - stop\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.masakari.oslo_concurrency.lock_path }}\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: masakari-bin\n              mountPath: /tmp/masakari-introspective-instance-monitor.sh\n              subPath: masakari-introspective-instance-monitor.sh\n            - name: masakari-etc\n              mountPath: /etc/masakari/masakarimonitors.conf\n              subPath: masakarimonitors.conf\n            - name: masakari-etc\n              mountPath: /etc/sudoers.d/masakari_sudoers\n              subPath: masakari_sudoers\n            - name: masakarietc\n              mountPath: /etc/masakari\n            - name: varrun\n              mountPath: /var/run\n            - name: run\n              mountPath: /run\n            - name: shm\n              mountPath: /dev/shm\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-shared\n          emptyDir: {}\n        - name: masakarietc\n          emptyDir: {}\n        - name: shm\n          hostPath:\n            path: /dev/shm\n        - name: varrun\n          hostPath:\n            path: /var/run\n        - name: run\n          hostPath:\n            path: /run\n        - name: masakari-bin\n          configMap:\n            name: masakari-bin\n            defaultMode: 0555\n        - name: masakari-etc\n          secret:\n            secretName: masakari-etc\n            defaultMode: 0444\n{{- end }}\n"
  },
  {
    "path": "masakari/templates/daemonset-process-monitor.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.process_monitor }}\n{{- $envAll := . }}\n{{- $daemonset := \"masakari-process-monitor\" }}\n\n{{- $mounts_masakari_process_monitor := .Values.pod.mounts.masakari_process_monitor.masakari_process_monitor }}\n{{- $mounts_masakari_process_monitor_init := .Values.pod.mounts.masakari_process_monitor.init_container }}\n\n{{- $serviceAccountName := \"masakari-process-monitor\" }}\n{{- tuple $envAll \"masakari_process_monitor\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: masakari-process-monitor\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll $daemonset | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{- dict \"envAll\" $envAll \"podName\" \"masakari-process-monitor\" \"containerNames\" (list \"masakari-monitor\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"masakari_process_monitor\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"masakari-process-monitor\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      nodeSelector:\n        {{ .Values.labels.monitors.node_selector_key }}: {{ .Values.labels.monitors.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"masakari_process_monitor\" $mounts_masakari_process_monitor_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: procressemonitor-init\n{{ tuple $envAll \"masakari_instance_monitor\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.masakari_instance_monitor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"masakari\" \"container\" \"masakari_process_monitor\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/masakari-monitors-init.sh\n          env:\n            - name: COMPUTE_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: masakari-bin\n              mountPath: /tmp/masakari-monitors-init.sh\n              subPath: masakari-monitors-init.sh\n              readOnly: true\n      containers:\n        - name: masakari-process-monitor\n{{ tuple $envAll \"masakari_process_monitor\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.masakari_process_monitor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"masakari\" \"container\" \"masakari_process_monitor\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/masakari-process-monitor.sh\n            - start\n          env:\n            - name: COMPUTE_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/masakari-process-monitor.sh\n                  - stop\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.masakari.oslo_concurrency.lock_path }}\n            - name: etcmasakari\n              mountPath: /etc/masakari\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: masakari-bin\n              mountPath: /tmp/masakari-process-monitor.sh\n              subPath: masakari-process-monitor.sh\n            - name: masakari-etc\n              mountPath: /etc/masakari/masakarimonitors.conf\n              subPath: masakarimonitors.conf\n            - name: varrun\n              mountPath: /var/run\n            - name: run\n              mountPath: /run\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: etcmasakari\n          emptyDir: {}\n        - name: pod-shared\n          emptyDir: {}\n        - name: varrun\n          hostPath:\n            path: /var/run\n        - name: run\n          hostPath:\n            path: /run\n        - name: masakari-bin\n          configMap:\n            name: masakari-bin\n            defaultMode: 0555\n        - name: masakari-etc\n          secret:\n            secretName: masakari-etc\n            defaultMode: 0444\n{{- end }}\n"
  },
  {
    "path": "masakari/templates/deployment-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"masakariApiLivenessProbeTemplate\" }}\nhttpGet:\n  scheme: {{ tuple \"instance_ha\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n  path: /\n  port: {{ tuple \"instance_ha\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- define \"masakariApiReadinessProbeTemplate\" }}\nhttpGet:\n  scheme: HTTP\n  path: /\n  port: {{ tuple \"instance_ha\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n\n{{- if .Values.manifests.deployment_api }}\n{{- $envAll := . }}\n\n{{- $mounts_masakari_api := .Values.pod.mounts.masakari_api.masakari_api }}\n{{- $mounts_masakari_api_init := .Values.pod.mounts.masakari_api.init_container }}\n\n{{- $serviceAccountName := \"masakari-api\" }}\n{{- tuple $envAll \"masakari_api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: masakari-api\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"masakari\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.masakari_api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"masakari\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"masakari\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"masakari_api\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"masakari-api\" \"containerNames\" (list \"masakari-api-init\" \"masakari-api\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"masakari\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"masakari\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.masakari.node_selector_key }}: {{ .Values.labels.masakari.node_selector_value }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.masakari_api.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"masakari_api\" $mounts_masakari_api_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: masakari-api\n{{ tuple $envAll \"masakari_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.masakari_api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"masakari\" \"container\" \"masakari_api\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/masakari-api.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/masakari-api.sh\n                  - stop\n          ports:\n            - name: n-api\n              containerPort: {{ tuple \"instance_ha\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{ dict \"envAll\" $envAll \"component\" \"masakari\" \"container\" \"default\" \"type\" \"liveness\" \"probeTemplate\" (include \"masakariApiLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"masakari\" \"container\" \"default\" \"type\" \"readiness\" \"probeTemplate\" (include \"masakariApiReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.masakari.oslo_concurrency.lock_path }}\n            - name: masakari-bin\n              mountPath: /tmp/masakari-api.sh\n              subPath: masakari-api.sh\n            - name: etcmasakari\n              mountPath: /etc/masakari\n            - name: masakari-etc\n              mountPath: /etc/masakari/masakari.conf\n              subPath: masakari.conf\n            - name: masakari-etc\n              mountPath: /etc/masakari/api-paste.ini\n              subPath: api-paste.ini\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: etcmasakari\n          emptyDir: {}\n        - name: masakari-bin\n          configMap:\n            name: masakari-bin\n            defaultMode: 0555\n        - name: masakari-etc\n          secret:\n            secretName: masakari-etc\n            defaultMode: 0444\n{{- end }}\n"
  },
  {
    "path": "masakari/templates/deployment-engine.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_engine }}\n{{- $envAll := . }}\n\n{{- $mounts_masakari_engine := .Values.pod.mounts.masakari_engine.masakari_engine }}\n{{- $mounts_masakari_engine_init := .Values.pod.mounts.masakari_engine.init_container }}\n\n{{- $serviceAccountName := \"masakari-engine\" }}\n{{- tuple $envAll \"masakari_engine\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: masakari-engine\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"masakari\" \"engine\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.masakari_engine }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"masakari\" \"engine\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"masakari\" \"engine\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"masakari_engine\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"masakari-engine\" \"containerNames\" (list \"masakari-engine-init\" \"masakari-engine\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"masakari-engine\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"masakari\" \"engine\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.masakari.node_selector_key }}: {{ .Values.labels.masakari.node_selector_value }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.masakari_engine.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"masakari_engine\" $mounts_masakari_engine_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: masakari-engine\n{{ tuple $envAll \"masakari_engine\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.masakari_engine | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"masakari\" \"container\" \"masakari_engine\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/masakari-engine.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/masakari-engine.sh\n                  - stop\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.masakari.oslo_concurrency.lock_path }}\n            - name: masakari-bin\n              mountPath: /tmp/masakari-engine.sh\n              subPath: masakari-engine.sh\n              readOnly: true\n            - name: etcmasakari\n              mountPath: /etc/masakari\n            - name: masakari-etc\n              mountPath: /etc/masakari/masakari.conf\n              subPath: masakari.conf\n{{ if $mounts_masakari_engine.volumeMounts }}{{ toYaml $mounts_masakari_engine.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: etcmasakari\n          emptyDir: {}\n        - name: masakari-bin\n          configMap:\n            name: masakari-bin\n            defaultMode: 0555\n        - name: masakari-etc\n          secret:\n            secretName: masakari-etc\n            defaultMode: 0444\n{{ if $mounts_masakari_engine.volumes}}{{ toYaml $mounts_masakari_engine.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "masakari/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "masakari/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $dbToDrop := dict \"inputType\" \"secret\" \"adminSecret\" .Values.secrets.oslo_db.admin \"userSecret\" .Values.secrets.oslo_db.masakari -}}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" \"masakari\" \"dbToDrop\" $dbToDrop -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "masakari/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"masakari\" -}}\n{{- $_ := set $dbInitJob \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml ) }}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "masakari/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"masakari-db-sync\" }}\n{{ tuple $envAll \"db_sync\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: masakari-db-sync\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n    \"helm.sh/hook\": \"post-install,post-upgrade\"\n    \"helm.sh/hook-weight\": \"-4\"\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"masakari\" \"db-migrate\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"db_migrate\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: masakari-db-sync\n{{ tuple $envAll \"db_sync\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.db_sync | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"masakari\" \"container\" \"masakari_db_migrate\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/manage-db.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: masakari-bin\n              mountPath: /tmp/manage-db.sh\n              subPath: manage-db.sh\n            - name: etcmasakari\n              mountPath: /etc/masakari\n            - name: masakari-etc\n              mountPath: /etc/masakari/masakari.conf\n              subPath: masakari.conf\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: etcmasakari\n          emptyDir: {}\n        - name: masakari-etc\n          secret:\n            secretName: masakari-etc\n            defaultMode: 0444\n        - name: masakari-bin\n          configMap:\n            name: masakari-bin\n            defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "masakari/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_endpoints\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-2\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"masakari\" \"serviceTypes\" ( tuple \"instance-ha\" ) -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_endpoints\" . | fromYaml ) }}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "masakari/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_service\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"masakari\" \"serviceTypes\" ( tuple \"instance-ha\" ) -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_service\" . | fromYaml ) }}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "masakari/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"masakari\" -}}\n{{- $_ := set $ksUserJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml ) }}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "masakari/templates/job-rabbitmq-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.rabbit_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rmqUserJob := dict \"envAll\" . \"serviceName\" \"masakari\" -}}\n{{- $_ := set $rmqUserJob \"jobAnnotations\" (include \"metadata.annotations.job.rabbit_init\" . | fromYaml) }}\n{{ $rmqUserJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "masakari/templates/pbd-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_api }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: masakari-api\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.masakari_api.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"masakari\" \"masakari_api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "masakari/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"masakari\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n{{- $connection := tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- if $envAll.Values.manifests.certificates }}\n  DB_CONNECTION: {{ (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | b64enc -}}\n{{- else }}\n  DB_CONNECTION: {{  $connection | b64enc -}}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "masakari/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"masakari\" \"test\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "masakari/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- $rabbitmqProtocol := \"http\" }}\n{{- if $envAll.Values.manifests.certificates }}\n{{- $rabbitmqProtocol = \"https\" }}\n{{- end }}\n{{- range $key1, $userClass := tuple \"admin\" \"masakari\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_messaging\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass $rabbitmqProtocol $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n  TRANSPORT_URL: {{ tuple \"oslo_messaging\" \"internal\" $userClass \"amqp\" $envAll | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "masakari/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "masakari/templates/service-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_api }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"instance_ha\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: n-api\n    port: {{ tuple \"instance_ha\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.masakari_api.node_port.enabled }}\n    nodePort: {{ .Values.network.masakari_api.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"masakari\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.masakari_api.node_port.enabled }}\n  type: NodePort\n  {{ if .Values.network.masakari_api.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{ end }}\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "masakari/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_sync: quay.io/airshipit/masakari:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    masakari_api: quay.io/airshipit/masakari:2025.1-ubuntu_noble\n    masakari_engine: quay.io/airshipit/masakari:2025.1-ubuntu_noble\n    masakari_host_monitor: quay.io/airshipit/masakari-monitors:2025.1-ubuntu_noble\n    masakari_process_monitor: quay.io/airshipit/masakari-monitors:2025.1-ubuntu_noble\n    masakari_instance_monitor: quay.io/airshipit/masakari-monitors:2025.1-ubuntu_noble\n    masakari_introspective_instance_monitor: quay.io/airshipit/masakari-monitors:2025.1-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  masakari:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  monitors:\n    node_selector_key: openstack-compute-node\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      masakari:\n        username: masakari\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  instance_ha:\n    name: masakari\n    hosts:\n      default: masakari-api\n      public: masakari-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: \"/v1/%(tenant_id)s\"\n    scheme:\n      default: \"http\"\n    port:\n      api:\n        default: 15868\n        public: 80\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n        secret:\n          tls:\n            internal: mariadb-tls-direct\n      masakari:\n        username: masakari\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /masakari\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      masakari:\n        role: admin\n        region_name: RegionOne\n        username: masakari\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      test:\n        role: admin\n        region_name: RegionOne\n        username: neutron-test\n        password: password\n        project_name: test\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  oslo_messaging:\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n        secret:\n          tls:\n            internal: rabbitmq-tls-direct\n      masakari:\n        username: masakari\n        password: password\n    statefulset:\n      replicas: 2\n      name: rabbitmq-rabbitmq\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /masakari\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n  oslo_cache:\n    auth:\n      # NOTE(portdirect): this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  fluentd:\n    namespace: null\n    name: fluentd\n    hosts:\n      default: fluentd-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: \"http\"\n    port:\n      service:\n        default: 24224\n      metrics:\n        default: 24220\n  # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress\n  # They are using to enable the Egress K8s network policy.\n  kube_dns:\n    namespace: kube-system\n    name: kubernetes-dns\n    hosts:\n      default: kube-dns\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: http\n    port:\n      dns:\n        default: 53\n        protocol: UDP\n  ingress:\n    namespace: null\n    name: ingress\n    hosts:\n      default: ingress\n    port:\n      ingress:\n        default: 80\n\nsecrets:\n  identity:\n    admin: masakari-keystone-admin\n    masakari: masakari-keystone-user\n    test: masakari-keystone-test\n  oslo_db:\n    admin: masakari-db-admin\n    masakari: masakari-db-user\n  oslo_messaging:\n    admin: masakari-rabbitmq-admin\n    masakari: masakari-rabbitmq-user\n  oci_image_registry:\n    masakari: masakari-oci-image-registry\n\ndependencies:\n  static:\n    masakari_api:\n      jobs:\n        - masakari-db-sync\n        - masakari-ks-user\n        - masakari-ks-endpoints\n        - masakari-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    masakari_engine:\n      jobs:\n        - masakari-db-sync\n        - masakari-ks-user\n        - masakari-ks-endpoints\n        - masakari-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - masakari-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    ks_endpoints:\n      jobs:\n        - masakari-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n\npod:\n  security_context:\n    masakari:\n      pod:\n        runAsUser: 42424\n      container:\n        masakari_api:\n          readOnlyRootFilesystem: false\n          allowPrivilegeEscalation: false\n          runAsUser: 42424\n        masakari_engine:\n          readOnlyRootFilesystem: false\n          allowPrivilegeEscalation: false\n          runAsUser: 42424\n        masakari_db_sync:\n          readOnlyRootFilesystem: false\n          allowPrivilegeEscalation: false\n          runAsUser: 42424\n        masakari_host_monitor:\n          readOnlyRootFilesystem: false\n          allowPrivilegeEscalation: true\n          runAsUser: 42424\n        masakari_process_monitir:\n          readOnlyRootFilesystem: false\n          allowPrivilegeEscalation: false\n          runAsUser: 42424\n        masakari_instance_monitor:\n          readOnlyRootFilesystem: false\n          allowPrivilegeEscalation: false\n          runAsUser: 0\n        masakari_introspective_instance_monitor:\n          readOnlyRootFilesystem: false\n          allowPrivilegeEscalation: false\n          runAsUser: 0\n    test:\n      pod:\n        runAsUser: 42424\n      container:\n        horizon_test:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n  probes:\n    rpc_timeout: 60\n    rpc_retries: 2\n    masakari:\n      default:\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 120\n            periodSeconds: 90\n            timeoutSeconds: 70\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 80\n            periodSeconds: 90\n            timeoutSeconds: 70\n    masakari-engine:\n      default:\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            periodSeconds: 60\n            timeoutSeconds: 15\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            periodSeconds: 60\n            timeoutSeconds: 15\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  replicas:\n    masakari_api: 1\n    masakari_engine: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        compute:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n    disruption_budget:\n      masakari_api:\n        min_available: 0\n      masakari_engine:\n        min_available: 0\n    termination_grace_period:\n      masakari_api:\n        timeout: 30\n      masakari_engine:\n        timeout: 30\n  mounts:\n    masakari_api:\n      init_container: null\n      masakari_api:\n        volumeMounts:\n        volumes:\n    masakari_engine:\n      init_container: null\n      masakari_engine:\n        volumeMounts:\n        volumes:\n    masakari_instance_monitor:\n      init_container: null\n      masakari_instance_monitor:\n        volumeMounts:\n        volumes:\n    masakari_host_monitor:\n      init_container: null\n      masakari_host_monitor:\n        volumeMounts:\n        volumes:\n    masakari_process_monitor:\n      init_container: null\n      masakari_process_monitor:\n        volumeMounts:\n        volumes:\n    masakari_introspective_instance_monitor:\n      init_container: null\n      masakari_introspective_instance_monitor:\n        volumeMounts:\n        volumes:\n    masakari_db_sync:\n      masakari_db_sync:\n        volumeMounts:\n        volumes:\n    masakari_db_init:\n      masakari_db_sync:\n        volumeMounts:\n        volumes:\n    masakari_ks_users:\n      masakari_db_sync:\n        volumeMounts:\n        volumes:\n    masakari_ks_service:\n      masakari_db_sync:\n        volumeMounts:\n        volumes:\n  resources:\n    enabled: false\n    masakari_api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    masakari_engine:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    masakari_host_monitor:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    masakari_instance_monitor:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    masakari_process_monitor:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    masakari_introspective_instance_monitor:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      rabbit_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\nconf:\n  paste:\n    composite:masakari_api:\n      use: call:masakari.api.urlmap:urlmap_factory\n      /: apiversions\n      /v1: masakari_api_v1\n    composite:masakari_api_v1:\n      use: call:masakari.api.auth:pipeline_factory_v1\n      keystone: cors http_proxy_to_wsgi request_id faultwrap sizelimit authtoken keystonecontext osapi_masakari_app_v1\n      noauth2: cors http_proxy_to_wsgi request_id faultwrap sizelimit noauth2 osapi_masakari_app_v1\n    filter:cors:\n      paste.filter_factory: oslo_middleware.cors:filter_factory\n      oslo_config_project: masakari\n    filter:http_proxy_to_wsgi:\n      paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory\n    filter:request_id:\n      paste.filter_factory: oslo_middleware:RequestId.factory\n    filter:faultwrap:\n      paste.filter_factory: masakari.api.openstack:FaultWrapper.factory\n    filter:sizelimit:\n      paste.filter_factory: oslo_middleware:RequestBodySizeLimiter.factory\n    filter:authtoken:\n      paste.filter_factory: keystonemiddleware.auth_token:filter_factory\n    filter:keystonecontext:\n      paste.filter_factory: masakari.api.auth:MasakariKeystoneContext.factory\n    filter:noauth2:\n      paste.filter_factory: masakari.api.auth:NoAuthMiddleware.factory\n    app:osapi_masakari_app_v1:\n      paste.app_factory: masakari.api.openstack.ha:APIRouterV1.factory\n    pipeline:apiversions:\n      pipeline: faultwrap http_proxy_to_wsgi apiversionsapp\n    app:apiversionsapp:\n      paste.app_factory: masakari.api.openstack.ha.versions:Versions.factory\n  masakari:\n    DEFAULT:\n      auth_strategy: keystone\n      duplicate_notification_detection_interval: 180\n      host_failure_recovery_threads: 1\n      masakari_api_workers: 1\n      graceful_shutdown_timeout: 5\n    keystone_authtoken:\n      auth_type: password\n      service_type: instance-ha\n    database:\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    oslo_concurrency:\n      lock_path: /var/lock\n    # Connection string is evaluated though the endpoints for taskflow.\n    taskflow:\n      # -- Taskflow persistence connection URI. When empty the URI is\n      ## auto-generated from endpoints.oslo_db. Set to null to disable\n      ## auto-generation.\n      connection: \"\"\n    wsgi:\n      api_paste_config: /etc/masakari/api-paste.ini\n  masakarimonitors:\n    DEFAULT:\n      debug: False\n    api:\n      api_version: v1\n      api_interface: internal\n    callback:\n      retry_max: 10\n      retry_interval: 10\n    introspectiveinstancemonitor:\n      guest_monitor_interval: 10\n      guest_monitor_timeout: 5\n    host:\n      monitoring_driver: default\n      monitoring_interval: 120\n      monitoring_samples: 1\n      disable_ipmi_checks: true\n      corosync_multicast_ports: 5405\n      pacemaker_node_type: remote\n  masakari_sudoers: |\n    Defaults secure_path=\"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin\"\n    masakari-monitors ALL=(ALL:ALL) NOPASSWD: /var/lib/openstack/bin/privsep-helper\n\nnetwork:\n  masakari_api:\n    node_port:\n      enabled: false\n      port: 33033\n    external_policy_local: false\n\nmanifests:\n  job_ks_user: true\n  job_db_sync: true\n  job_db_init: true\n  job_db_drop: false\n  job_ks_endpoints: true\n  job_ks_service: true\n  deployment_api: true\n  deployment_engine: true\n  configmap_bin: true\n  configmap_etc: true\n  secret_db: true\n  secret_rabbitmq: true\n  secret_keystone: true\n  secret_registry: true\n  job_rabbit_init: true\n  service_api: true\n  pdb_api: true\n  # Host Monitors in containers needs pacemaker remote.\n  host_monitor: false\n  instance_monitor: false\n  process_monitor: false\n  introspective_instance_monitor: false\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "memcached/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.5.5\ndescription: OpenStack-Helm Memcached\nname: memcached\nversion: 2025.2.0\nhome: https://github.com/memcached/memcached\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "memcached/templates/bin/_memcached.sh.tpl",
    "content": "#!/bin/sh\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nmemcached --version\nexec memcached -v \\\n  -p ${MEMCACHED_PORT} \\\n  -U 0 \\\n{{- if not .Values.conf.memcached.stats_cachedump.enabled }}\n  -X \\\n{{- end }}\n  -c ${MEMCACHED_MAX_CONNECTIONS} \\\n  -m ${MEMCACHED_MEMORY}\n"
  },
  {
    "path": "memcached/templates/configmap-apparmor.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- dict \"envAll\" . \"component\" \"memcached\" | include \"helm-toolkit.snippets.kubernetes_apparmor_configmap\" }}\n"
  },
  {
    "path": "memcached/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n{{- $configMapBinName := printf \"%s-%s\" $envAll.deployment_name \"memcached-bin\"  }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ $configMapBinName }}\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n  memcached.sh: |\n{{ tuple \"bin/_memcached.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "memcached/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "memcached/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"memcached\" -}}\n{{- if .Values.pod.tolerations.memcached.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "memcached/templates/network_policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"memcached\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "memcached/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "memcached/templates/service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"oslo_cache\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  sessionAffinity: ClientIP\n  ports:\n    - name: memcache\n      port: {{ tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{- with .Values.memcached.extraServicePorts }}\n      {{- tpl (toYaml .) $ | nindent 4 }}\n    {{- end }}\n  selector:\n{{ tuple $envAll \"memcached\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{ .Values.network.memcached | include \"helm-toolkit.snippets.service_params\" | indent 2 }}\n{{- end }}\n"
  },
  {
    "path": "memcached/templates/statefulset.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- define \"memcachedProbeTemplate\" }}\ntcpSocket:\n  port: {{ tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- if .Values.manifests.statefulset }}\n{{- $envAll := . }}\n\n{{- $rcControllerName := printf \"%s-%s\" $envAll.deployment_name \"memcached\" }}\n{{- $configMapBinName := printf \"%s-%s\" $envAll.deployment_name \"memcached-bin\" }}\n\n{{ tuple $envAll \"memcached\" $rcControllerName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  name: {{ $rcControllerName | quote }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"memcached\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  podManagementPolicy: Parallel\n  replicas: {{ .Values.pod.replicas.server }}\n  serviceName: \"{{ tuple \"oslo_cache\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\"\n  selector:\n    matchLabels:\n{{ tuple $envAll \"memcached\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" \"memcached\" \"containerNames\" (list \"init\" \"memcached\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n      labels:\n{{ tuple $envAll \"memcached\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"server\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      shareProcessNamespace: true\n      serviceAccountName: {{ $rcControllerName | quote }}\n      affinity:\n{{ tuple $envAll \"memcached\" \"server\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.server.node_selector_key }}: {{ .Values.labels.server.node_selector_value | quote }}\n{{ if $envAll.Values.pod.tolerations.memcached.enabled }}\n{{ tuple $envAll \"memcached\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.memcached.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"memcached\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n{{ dict \"envAll\" $envAll | include \"helm-toolkit.snippets.kubernetes_apparmor_loader_init_container\" | indent 8 }}\n      containers:\n        - name: memcached\n{{ tuple $envAll \"memcached\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.server | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"server\" \"container\" \"memcached\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: MEMCACHED_PORT\n              value: {{ tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: MEMCACHED_MAX_CONNECTIONS\n              value: {{ .Values.conf.memcached.max_connections | quote }}\n            - name: MEMCACHED_MEMORY\n              value: {{ .Values.conf.memcached.memory | quote }}\n          command:\n            - /tmp/memcached.sh\n          ports:\n            - containerPort: {{ tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{ dict \"envAll\" $envAll \"component\" \"memcached\" \"container\" \"memcached\" \"type\" \"readiness\" \"probeTemplate\" (include \"memcachedProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"memcached\" \"container\" \"memcached\" \"type\" \"liveness\" \"probeTemplate\" (include \"memcachedProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: memcached-bin\n              mountPath: /tmp/memcached.sh\n              subPath: memcached.sh\n              readOnly: true\n        {{- with .Values.memcached.extraContainers }}\n          {{- tpl (toYaml .) $ | nindent 8 }}\n        {{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: memcached-bin\n          configMap:\n            name: {{ $configMapBinName | quote }}\n            defaultMode: 360\n{{ dict \"envAll\" $envAll \"component\" \"memcached\" \"requireSys\" true | include \"helm-toolkit.snippets.kubernetes_apparmor_volumes\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "memcached/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for memcached.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nconf:\n  memcached:\n    max_connections: 8192\n    # NOTE(pordirect): this should match the value in\n    # `pod.resources.memcached.memory`\n    memory: 1024\n    stats_cachedump:\n      enabled: true\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - memcached-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    memcached:\n      jobs: null\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\nsecrets:\n  oci_image_registry:\n    memcached: memcached-oci-image-registry-key\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      memcached:\n        username: memcached\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  oslo_cache:\n    namespace: null\n    host_fqdn_override:\n      default: null\n    hosts:\n      default: memcached\n    port:\n      memcache:\n        default: 11211\n  kube_dns:\n    namespace: kube-system\n    name: kubernetes-dns\n    hosts:\n      default: kube-dns\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: http\n    port:\n      dns_tcp:\n        default: 53\n      dns:\n        default: 53\n        protocol: UDP\n\nnetwork:\n  memcached: {}\n\nnetwork_policy:\n  memcached:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nimages:\n  pull_policy: IfNotPresent\n  tags:\n    dep_check: 'quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy'\n    memcached: 'quay.io/airshipit/memcached:1.6.32'\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  server:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nmanifests:\n  configmap_bin: true\n  statefulset: true\n  job_image_repo_sync: true\n  network_policy: false\n  service: true\n  secret_registry: true\n\nmemcached:\n  extraContainers: []\n  extraServicePorts: []\n\npod:\n  security_context:\n    server:\n      pod:\n        runAsUser: 65534\n        runAsNonRoot: true\n        fsGroup: 65534\n      container:\n        memcached:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n          capabilities:\n            drop:\n              - ALL\n\n  probes:\n    memcached:\n      memcached:\n        readiness:\n          enabled: True\n          params:\n            initialDelaySeconds: 0\n            periodSeconds: 10\n            timeoutSeconds: 5\n        liveness:\n          enabled: True\n          params:\n            initialDelaySeconds: 10\n            periodSeconds: 15\n            timeoutSeconds: 10\n\n  affinity:\n    anti:\n      topologyKey:\n        default: kubernetes.io/hostname\n      type:\n        default: requiredDuringSchedulingIgnoredDuringExecution\n      weight:\n        default: 10\n  tolerations:\n    memcached:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n        effect: NoSchedule\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n        effect: NoSchedule\n  lifecycle:\n    upgrades:\n      deployments:\n        pod_replacement_strategy: RollingUpdate\n        revision_history: 3\n        rolling_update:\n          max_unavailable: 1\n    termination_grace_period:\n      memcached:\n        timeout: 30\n  replicas:\n    server: 1\n  resources:\n    enabled: false\n    memcached:\n      limits:\n        cpu: \"2000m\"\n        memory: \"1024Mi\"\n      requests:\n        cpu: \"500m\"\n        memory: \"128Mi\"\n    jobs:\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "mistral/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Mistral\nname: mistral\nversion: 2025.2.0\nhome: https://docs.openstack.org/mistral/latest/\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Mistral/OpenStack_Project_Mistral_vertical.png\nsources:\n  - https://opendev.org/openstack/mistral\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "mistral/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "mistral/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nmistral-db-manage --config-file /etc/mistral/mistral.conf upgrade head\nmistral-db-manage --config-file /etc/mistral/mistral.conf populate\n"
  },
  {
    "path": "mistral/templates/bin/_mistral-api.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec mistral-server \\\n        --server api \\\n        --config-file /etc/mistral/mistral.conf\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "mistral/templates/bin/_mistral-engine.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexec mistral-server \\\n      --server engine \\\n      --config-file /etc/mistral/mistral.conf\n"
  },
  {
    "path": "mistral/templates/bin/_mistral-event-engine.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexec mistral-server \\\n      --server event-engine \\\n      --config-file /etc/mistral/mistral.conf\n"
  },
  {
    "path": "mistral/templates/bin/_mistral-executor.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexec mistral-server \\\n      --server executor \\\n      --config-file /etc/mistral/mistral.conf\n"
  },
  {
    "path": "mistral/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n{{- $rallyTests := .Values.conf.rally_tests }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: mistral-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  rally-test.sh: |\n{{ tuple $rallyTests | include \"helm-toolkit.scripts.rally_test\" | indent 4 }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  mistral-api.sh: |\n{{ tuple \"bin/_mistral-api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  mistral-engine.sh: |\n{{ tuple \"bin/_mistral-engine.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  mistral-event-engine.sh: |\n{{ tuple \"bin/_mistral-event-engine.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  mistral-executor.sh: |\n{{ tuple \"bin/_mistral-executor.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n{{- if empty .Values.conf.mistral.keystone_authtoken.auth_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.mistral.keystone_authtoken \"auth_uri\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.mistral.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.mistral.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.mistral.keystone_authtoken.region_name -}}\n{{- $_ := set .Values.conf.mistral.keystone_authtoken \"region_name\" .Values.endpoints.identity.auth.mistral.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.mistral.keystone_authtoken.project_name -}}\n{{- $_ := set .Values.conf.mistral.keystone_authtoken \"project_name\" .Values.endpoints.identity.auth.mistral.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.mistral.keystone_authtoken.project_domain_name -}}\n{{- $_ := set .Values.conf.mistral.keystone_authtoken \"project_domain_name\" .Values.endpoints.identity.auth.mistral.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.mistral.keystone_authtoken.user_domain_name -}}\n{{- $_ := set .Values.conf.mistral.keystone_authtoken \"user_domain_name\" .Values.endpoints.identity.auth.mistral.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.mistral.keystone_authtoken.username -}}\n{{- $_ := set .Values.conf.mistral.keystone_authtoken \"username\" .Values.endpoints.identity.auth.mistral.username -}}\n{{- end -}}\n{{- if empty .Values.conf.mistral.keystone_authtoken.password -}}\n{{- $_ := set .Values.conf.mistral.keystone_authtoken \"password\" .Values.endpoints.identity.auth.mistral.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.mistral.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.mistral.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n{{- if empty .Values.conf.mistral.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.mistral.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.mistral.database.connection)) (empty .Values.conf.mistral.database.connection) -}}\n{{- $_ := tuple \"oslo_db\" \"internal\" \"mistral\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\"| set .Values.conf.mistral.database \"connection\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.mistral.DEFAULT.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"mistral\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.mistral.DEFAULT \"transport_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.mistral.api.port -}}\n{{- $_ := tuple \"workflowv2\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set .Values.conf.mistral.api \"port\" -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.handler_fluent) (has \"fluent\" .Values.conf.logging.handlers.keys) -}}\n{{- $fluentd_host := tuple \"fluentd\" \"internal\" $envAll | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\n{{- $fluentd_port := tuple \"fluentd\" \"internal\" \"service\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $fluent_args := printf \"('%s.%s', '%s', %s)\" .Release.Namespace .Release.Name $fluentd_host $fluentd_port }}\n{{- $handler_fluent := dict \"class\" \"fluent.handler.FluentHandler\" \"formatter\" \"fluent\" \"args\" $fluent_args -}}\n{{- $_ := set .Values.conf.logging \"handler_fluent\" $handler_fluent -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.formatter_fluent) (has \"fluent\" .Values.conf.logging.formatters.keys) -}}\n{{- $formatter_fluent := dict \"class\" \"oslo_log.formatters.FluentFormatter\" -}}\n{{- $_ := set .Values.conf.logging \"formatter_fluent\" $formatter_fluent -}}\n{{- end -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: mistral-etc\ntype: Opaque\ndata:\n  rally_tests.yaml: {{ toYaml .Values.conf.rally_tests.tests | b64enc }}\n  mistral.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.mistral | b64enc }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.logging | b64enc }}\n  policy.yaml: {{ toYaml .Values.conf.policy | b64enc }}\n{{- range $key, $value := $envAll.Values.conf.rally_tests.templates }}\n  {{ printf \"test_template_%d\" $key }}: {{ $value.template | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/deployment-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_api }}\n{{- $envAll := . }}\n\n{{- $mounts_mistral_api := .Values.pod.mounts.mistral_api.mistral_api }}\n{{- $mounts_mistral_api_init := .Values.pod.mounts.mistral_api.init_container }}\n\n{{- $serviceAccountName := \"mistral-api\" }}\n{{ tuple $envAll \"api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mistral-api\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"mistral\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"mistral\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"mistral\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"mistral_api\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"mistral\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.api.node_selector_key }}: {{ .Values.labels.api.node_selector_value }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.api.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"api\" $mounts_mistral_api_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: mistral-api\n{{ tuple $envAll \"mistral_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          securityContext:\n            runAsUser: {{ .Values.pod.user.mistral.uid }}\n          command:\n            - /tmp/mistral-api.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/mistral-api.sh\n                  - stop\n          ports:\n            - name: w-api\n              containerPort: {{ tuple \"workflowv2\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            httpGet:\n              scheme: {{ tuple \"workflowv2\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n              path: /\n              port: {{ tuple \"workflowv2\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.mistral.oslo_concurrency.lock_path }}\n            - name: pod-etc-mistral\n              mountPath: /etc/mistral\n            - name: mistral-bin\n              mountPath: /tmp/mistral-api.sh\n              subPath: mistral-api.sh\n              readOnly: true\n            - name: mistral-etc\n              mountPath: /etc/mistral/mistral.conf\n              subPath: mistral.conf\n              readOnly: true\n            {{- if .Values.conf.mistral.DEFAULT.log_config_append }}\n            - name: mistral-etc\n              mountPath: {{ .Values.conf.mistral.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.mistral.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: mistral-etc\n              mountPath: /etc/mistral/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n{{ if $mounts_mistral_api.volumeMounts }}{{ toYaml $mounts_mistral_api.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-mistral\n          emptyDir: {}\n        - name: mistral-bin\n          configMap:\n            name: mistral-bin\n            defaultMode: 0555\n        - name: mistral-etc\n          secret:\n            secretName: mistral-etc\n            defaultMode: 0444\n{{ if $mounts_mistral_api.volumes }}{{ toYaml $mounts_mistral_api.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/deployment-executor.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_executor }}\n{{- $envAll := . }}\n\n{{- $mounts_mistral_executor := .Values.pod.mounts.mistral_executor.mistral_executor }}\n{{- $mounts_mistral_executor_init := .Values.pod.mounts.mistral_executor.init_container }}\n\n{{- $serviceAccountName := \"mistral-executor\" }}\n{{ tuple $envAll \"executor\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mistral-executor\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"mistral\" \"executor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.executor }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"mistral\" \"executor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"mistral\" \"executor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"mistral_executor\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"mistral\" \"executor\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.executor.node_selector_key }}: {{ .Values.labels.executor.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"executor\" $mounts_mistral_executor_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: mistral-executor\n{{ tuple $envAll \"mistral_executor\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.executor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          securityContext:\n            runAsUser: {{ .Values.pod.user.mistral.uid }}\n          command:\n            - /tmp/mistral-executor.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.mistral.oslo_concurrency.lock_path }}\n            - name: pod-etc-mistral\n              mountPath: /etc/mistral\n            - name: mistral-bin\n              mountPath: /tmp/mistral-executor.sh\n              subPath: mistral-executor.sh\n              readOnly: true\n            - name: mistral-etc\n              mountPath: /etc/mistral/mistral.conf\n              subPath: mistral.conf\n              readOnly: true\n            {{- if .Values.conf.mistral.DEFAULT.log_config_append }}\n            - name: mistral-etc\n              mountPath: {{ .Values.conf.mistral.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.mistral.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n{{ if $mounts_mistral_executor.volumeMounts }}{{ toYaml $mounts_mistral_executor.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-mistral\n          emptyDir: {}\n        - name: mistral-bin\n          configMap:\n            name: mistral-bin\n            defaultMode: 0555\n        - name: mistral-etc\n          secret:\n            secretName: mistral-etc\n            defaultMode: 0444\n{{ if $mounts_mistral_executor.volumes }}{{ toYaml $mounts_mistral_executor.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "mistral/templates/ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_api .Values.network.api.ingress.public }}\n{{- $ingressOpts := dict \"envAll\" . \"backendServiceType\" \"workflowv2\" \"backendPort\" \"w-api\" -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.bootstrap\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"5\"\n{{- end }}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $bootstrapJob := dict \"envAll\" . \"serviceName\" \"mistral\" \"keystoneUser\" .Values.bootstrap.ks_user \"logConfigFile\" .Values.conf.mistral.DEFAULT.log_config_append \"jobAnnotations\" (include \"metadata.annotations.job.bootstrap\" . | fromYaml) -}}\n{{ $bootstrapJob | include \"helm-toolkit.manifests.job_bootstrap\" }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" \"mistral\" -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"mistral\" \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/job-db-sync.yaml",
    "content": "\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"mistral\" \"podVolMounts\" .Values.pod.mounts.mistral_db_sync.mistral_db_sync.volumeMounts \"podVols\" .Values.pod.mounts.mistral_db_sync.mistral_db_sync.volumes \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.repo_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\n{{- end }}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"mistral\" \"jobAnnotations\" (include \"metadata.annotations.job.repo_sync\" . | fromYaml) -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_endpoints\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-2\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"mistral\" \"serviceTypes\" ( tuple \"workflowv2\" ) \"jobAnnotations\" (include \"metadata.annotations.job.ks_endpoints\" . | fromYaml) -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_service\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"mistral\" \"serviceTypes\" ( tuple \"workflowv2\" ) \"jobAnnotations\" (include \"metadata.annotations.job.ks_service\" . | fromYaml) -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"mistral\" \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/job-rabbit-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.rabbit_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rmqUserJob := dict \"envAll\" . \"serviceName\" \"mistral\" \"jobAnnotations\" (include \"metadata.annotations.job.rabbit_init\" . | fromYaml) -}}\n{{ $rmqUserJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/network_policy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"mistral\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "mistral/templates/pdb-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_api }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: mistral-api\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.api.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"mistral\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/pod-rally-test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pod_rally_test }}\n{{- $envAll := . }}\n\n{{- $mounts_tests := .Values.pod.mounts.mistral_tests.mistral_tests }}\n{{- $mounts_tests_init := .Values.pod.mounts.mistral_tests.init_container }}\n\n{{- $serviceAccountName := print $envAll.Release.Name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: {{ print $envAll.Release.Name \"-test\" }}\n  labels:\n{{ tuple $envAll \"mistral\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n  restartPolicy: Never\n  serviceAccountName: {{ $serviceAccountName }}\n  initContainers:\n{{ tuple $envAll \"tests\" $mounts_tests_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n    - name: {{ .Release.Name }}-test-ks-user\n{{ tuple $envAll \"ks_user\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.ks_user | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n      command:\n        - /tmp/ks-user.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: mistral-bin\n          mountPath: /tmp/ks-user.sh\n          subPath: ks-user.sh\n          readOnly: true\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_SERVICE_NAME\n          value: \"test\"\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.test }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_ROLE\n          value: {{ .Values.endpoints.identity.auth.test.role | quote }}\n  containers:\n    - name: {{ .Release.Name }}-test\n{{ tuple $envAll \"test\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.test }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: RALLY_ENV_NAME\n          value: {{.Release.Name}}\n      command:\n        - /tmp/rally-test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: mistral-etc\n          mountPath: /etc/rally/rally_tests.yaml\n          subPath: rally_tests.yaml\n          readOnly: true\n        - name: mistral-bin\n          mountPath: /tmp/rally-test.sh\n          subPath: rally-test.sh\n          readOnly: true\n        - name: rally-db\n          mountPath: /var/lib/rally\n        {{- range $key, $value := $envAll.Values.conf.rally_tests.templates }}\n        - name: mistral-etc\n          mountPath: {{ $value.name }}\n          subPath: {{ printf \"test_template_%d\" $key }}\n          readOnly: true\n        {{- end }}\n{{ if $mounts_tests.volumeMounts }}{{ toYaml $mounts_tests.volumeMounts | indent 8 }}{{ end }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: mistral-etc\n      secret:\n        secretName: mistral-etc\n        defaultMode: 0444\n    - name: mistral-bin\n      configMap:\n        name: mistral-bin\n        defaultMode: 0555\n    - name: rally-db\n      emptyDir: {}\n{{ if $mounts_tests.volumes }}{{ toYaml $mounts_tests.volumes | indent 4 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"mistral\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  DB_CONNECTION: {{ tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"mistral\" \"test\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"mistral\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_messaging\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass \"http\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/service-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_api }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"workflowv2\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: w-api\n      port: {{ tuple \"workflowv2\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.api.node_port.enabled }}\n      nodePort: {{ .Values.network.api.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"mistral\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.api.node_port.enabled }}\n  type: NodePort\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/service-ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_api .Values.network.api.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"workflowv2\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/statefulset-engine.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.statefulset_engine }}\n{{- $envAll := . }}\n\n{{- $mounts_mistral_engine := .Values.pod.mounts.mistral_engine.mistral_engine }}\n{{- $mounts_mistral_engine_init := .Values.pod.mounts.mistral_engine.init_container }}\n\n{{- $serviceAccountName := \"mistral-engine\" }}\n{{ tuple $envAll \"engine\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  name: mistral-engine\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"mistral\" \"engine\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  serviceName: mistral-engine\n  replicas: {{ .Values.pod.replicas.engine }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"mistral\" \"engine\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"mistral\" \"engine\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"mistral\" \"engine\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.engine.node_selector_key }}: {{ .Values.labels.engine.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"engine\" $mounts_mistral_engine_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: mistral-engine\n{{ tuple $envAll \"mistral_engine\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.engine | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          securityContext:\n            runAsUser: {{ .Values.pod.user.mistral.uid }}\n          command:\n            - /tmp/mistral-engine.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.mistral.oslo_concurrency.lock_path }}\n            - name: pod-etc-mistral\n              mountPath: /etc/mistral\n            - name: mistral-bin\n              mountPath: /tmp/mistral-engine.sh\n              subPath: mistral-engine.sh\n              readOnly: true\n            - name: mistral-etc\n              mountPath: /etc/mistral/mistral.conf\n              subPath: mistral.conf\n              readOnly: true\n            {{- if .Values.conf.mistral.DEFAULT.log_config_append }}\n            - name: mistral-etc\n              mountPath: {{ .Values.conf.mistral.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.mistral.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n{{ if $mounts_mistral_engine.volumeMounts }}{{ toYaml $mounts_mistral_engine.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-mistral\n          emptyDir: {}\n        - name: mistral-bin\n          configMap:\n            name: mistral-bin\n            defaultMode: 0555\n        - name: mistral-etc\n          secret:\n            secretName: mistral-etc\n            defaultMode: 0444\n{{ if $mounts_mistral_engine.volumes }}{{ toYaml $mounts_mistral_engine.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "mistral/templates/statefulset-event-engine.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.statefulset_event_engine }}\n{{- $envAll := . }}\n\n{{- $mounts_mistral_event_engine := .Values.pod.mounts.mistral_event_engine.mistral_event_engine }}\n{{- $mounts_mistral_event_engine_init := .Values.pod.mounts.mistral_event_engine.init_container }}\n\n{{- $serviceAccountName := \"mistral-event-engine\" }}\n{{ tuple $envAll \"event_engine\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  name: mistral-event-engine\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"mistral\" \"event-engine\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  serviceName: mistral-event-engine\n  replicas: {{ .Values.pod.replicas.event_engine }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"mistral\" \"event-engine\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"mistral\" \"event-engine\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"mistral\" \"event-engine\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.event_engine.node_selector_key }}: {{ .Values.labels.event_engine.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"event_engine\" $mounts_mistral_event_engine_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: mistral-event-engine\n{{ tuple $envAll \"mistral_event_engine\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.event_engine | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          securityContext:\n            runAsUser: {{ .Values.pod.user.mistral.uid }}\n          command:\n            - /tmp/mistral-event-engine.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.mistral.oslo_concurrency.lock_path }}\n            - name: pod-etc-mistral\n              mountPath: /etc/mistral\n            - name: mistral-bin\n              mountPath: /tmp/mistral-event-engine.sh\n              subPath: mistral-event-engine.sh\n              readOnly: true\n            - name: mistral-etc\n              mountPath: /etc/mistral/mistral.conf\n              subPath: mistral.conf\n              readOnly: true\n            {{- if .Values.conf.mistral.DEFAULT.log_config_append }}\n            - name: mistral-etc\n              mountPath: {{ .Values.conf.mistral.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.mistral.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n{{ if $mounts_mistral_event_engine.volumeMounts }}{{ toYaml $mounts_mistral_event_engine.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-mistral\n          emptyDir: {}\n        - name: mistral-bin\n          configMap:\n            name: mistral-bin\n            defaultMode: 0555\n        - name: mistral-etc\n          secret:\n            secretName: mistral-etc\n            defaultMode: 0444\n{{ if $mounts_mistral_event_engine.volumes }}{{ toYaml $mounts_mistral_event_engine.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "mistral/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for mistral.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  engine:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  event_engine:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  executor:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nrelease_group: null\n\nimages:\n  tags:\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    mistral_db_sync: quay.io/airshipit/mistral:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    mistral_api: quay.io/airshipit/mistral:2025.1-ubuntu_noble\n    mistral_engine: quay.io/airshipit/mistral:2025.1-ubuntu_noble\n    mistral_event_engine: quay.io/airshipit/mistral:2025.1-ubuntu_noble\n    mistral_executor: quay.io/airshipit/mistral:2025.1-ubuntu_noble\n    image_repo_sync: docker.io/docker:17.07.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    node_port:\n      enabled: false\n      port: 28989\n\nbootstrap:\n  enabled: false\n  ks_user: mistral\n  script: |\n    openstack token issue\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - mistral-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    api:\n      jobs:\n        - mistral-db-sync\n        - mistral-ks-user\n        - mistral-ks-endpoints\n        - mistral-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n    db_drop:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - mistral-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    engine:\n      jobs:\n        - mistral-db-sync\n        - mistral-ks-user\n        - mistral-ks-endpoints\n        - mistral-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n    event_engine:\n      jobs:\n        - mistral-db-sync\n        - mistral-ks-user\n        - mistral-ks-endpoints\n        - mistral-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n    executor:\n      jobs:\n        - mistral-db-sync\n        - mistral-ks-user\n        - mistral-ks-endpoints\n        - mistral-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n    ks_endpoints:\n      jobs:\n        - mistral-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    rabbit_init:\n      services:\n        - service: oslo_messaging\n          endpoint: internal\n    tests:\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: workflowv2\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\n# Names of secrets used by bootstrap and environmental checks\nsecrets:\n  identity:\n    admin: mistral-keystone-admin\n    mistral: mistral-keystone-user\n    test: mistral-keystone-test\n  oslo_db:\n    admin: mistral-db-admin\n    mistral: mistral-db-user\n  oslo_messaging:\n    admin: mistral-rabbitmq-admin\n    mistral: mistral-rabbitmq-user\n  oci_image_registry:\n    mistral: mistral-oci-image-registry\n\n# typically overridden by environmental\n# values, but should include all endpoints\n# required by this chart\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      mistral:\n        username: mistral\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      mistral:\n        role: admin\n        region_name: RegionOne\n        username: mistral\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      test:\n        role: admin\n        region_name: RegionOne\n        username: mistral-test\n        password: password\n        project_name: test\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  workflowv2:\n    name: mistral\n    hosts:\n      default: mistral-api\n      public: mistral\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v2\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 8989\n        public: 80\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n      mistral:\n        username: mistral\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /mistral\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_messaging:\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n      mistral:\n        username: mistral\n        password: password\n    statefulset:\n      replicas: 2\n      name: rabbitmq-rabbitmq\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /mistral\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n  oslo_cache:\n    auth:\n      # NOTE(portdirect): this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  fluentd:\n    namespace: null\n    name: fluentd\n    hosts:\n      default: fluentd-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: 'http'\n    port:\n      service:\n        default: 24224\n      metrics:\n        default: 24220\n\nconf:\n  rally_tests:\n    run_tempest: false\n    tests:\n      MistralWorkbooks.create_workbook:\n        - args:\n            definition: /tmp/rally-jobs/mistral_wb.yaml\n            do_delete: true\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      MistralExecutions.create_execution_from_workbook:\n        - args:\n            definition: /tmp/rally-jobs/mistral_wb.yaml\n            do_delete: true\n            params: /tmp/rally-jobs/mistral_params.json\n            wf_input: /tmp/rally-jobs/mistral_input.json\n            workflow_name: wf1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      MistralWorkbooks.list_workbooks:\n        - runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      MistralExecutions.list_executions:\n        - runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n    templates:\n      - name: /tmp/rally-jobs/mistral_wb.yaml\n        template: |\n          version: \"2.0\"\n          name: wb\n          workflows:\n            wf1:\n              type: direct\n              input:\n                - input1: input1\n                - some_json_input: {}\n              tasks:\n                hello:\n                  action: std.echo output=\"Hello\"\n                  publish:\n                      result: $\n      - name: /tmp/rally-jobs/mistral_input.json\n        template: |\n          {\"input1\": \"value1\", \"some_json_input\": {\"a\": \"b\"}}\n      - name: /tmp/rally-jobs/mistral_params.json\n        template: |\n          {\"env\": {\"env_param\": \"env_param_value\"}}\n  policy: {}\n  mistral:\n    DEFAULT:\n      log_config_append: /etc/mistral/logging.conf\n      transport_url: null\n    api:\n      # NOTE(portdirect): the bind port should not be defined, and is manipulated\n      # via the endpoints section.\n      port: null\n      api_workers: 8\n    coordination:\n      backend_url: \"\"\n    database:\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    keystone_authtoken:\n      auth_type: password\n      auth_version: v3\n      memcache_security_strategy: ENCRYPT\n    oslo_policy:\n      policy_file: /etc/mistral/policy.yaml\n    oslo_concurrency:\n      lock_path: /var/lock\n  logging:\n    loggers:\n      keys:\n        - root\n        - mistral\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: WARNING\n      handlers: 'null'\n    logger_mistral:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: mistral\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    formatter_default:\n      format: \"%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n\npod:\n  user:\n    mistral:\n      uid: 1000\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  mounts:\n    mistral_api:\n      init_container: null\n      mistral_api:\n        volumeMounts:\n        volumes:\n    mistral_executor:\n      init_container: null\n      mistral_executor:\n        volumeMounts:\n        volumes:\n    mistral_engine:\n      init_container: null\n      mistral_engine:\n        volumeMounts:\n        volumes:\n    mistral_event_engine:\n      init_container: null\n      mistral_event_engine:\n        volumeMounts:\n        volumes:\n    mistral_bootstrap:\n      init_container: null\n      mistral_bootstrap:\n        volumeMounts:\n        volumes:\n    mistral_tests:\n      init_container: null\n      mistral_tests:\n        volumeMounts:\n        volumes:\n    mistral_db_sync:\n      mistral_db_sync:\n        volumeMounts:\n        volumes:\n  replicas:\n    api: 1\n    engine: 1\n    event_engine: 1\n    executor: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    disruption_budget:\n      api:\n        min_available: 0\n    termination_grace_period:\n      api:\n        timeout: 30\n  resources:\n    enabled: false\n    api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    engine:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    event_engine:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    executor:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rabbit_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nnetwork_policy:\n  mistral:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  deployment_api: true\n  deployment_executor: true\n  ingress_api: true\n  job_bootstrap: true\n  job_db_init: true\n  job_db_sync: true\n  job_db_drop: false\n  job_image_repo_sync: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  job_rabbit_init: true\n  pdb_api: true\n  pod_rally_test: true\n  network_policy: false\n  secret_db: true\n  secret_keystone: true\n  secret_rabbitmq: true\n  secret_registry: true\n  service_ingress_api: true\n  service_api: true\n  statefulset_engine: true\n  statefulset_event_engine: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "nagios/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Nagios\nname: nagios\nversion: 2025.2.0\nhome: https://www.nagios.org\nsources:\n  - https://opendev.org/openstack/openstack-helm-addons\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "nagios/templates/bin/_apache.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ev\n\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n\n  if [ -f /etc/apache2/envvars ]; then\n     # Loading Apache2 ENV variables\n     source /etc/httpd/apache2/envvars\n  fi\n  # Apache gets grumpy about PID files pre-existing\n  rm -f /etc/httpd/logs/httpd.pid\n\n  if [ -f /usr/local/apache2/conf/.htpasswd ]; then\n    htpasswd -b /usr/local/apache2/conf/.htpasswd \"$NAGIOSADMIN_USER\" \"$NAGIOSADMIN_PASS\"\n  else\n    htpasswd -cb /usr/local/apache2/conf/.htpasswd \"$NAGIOSADMIN_USER\" \"$NAGIOSADMIN_PASS\"\n  fi\n\n  #Launch Apache on Foreground\n  exec httpd -DFOREGROUND\n}\n\nfunction stop () {\n  apachectl -k graceful-stop\n}\n\n$COMMAND\n"
  },
  {
    "path": "nagios/templates/bin/_nagios-readiness.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n# NOTE(sw5822): Redirect no-op operator output to Nagios log file to clean out\n# Nagios's log file, since Nagios doesn't support logging to /dev/null\n: > /opt/nagios/var/log/nagios.log\n\n# Check whether Nagios endpoint is reachable\nreply=$(curl -s -o /dev/null -w %{http_code} http://127.0.0.1:8000/nagios)\nif [ \\\"$reply\\\" -lt 200 -o \\\"$reply\\\" -ge 400 ]; then\n  exit 1\nfi\n"
  },
  {
    "path": "nagios/templates/bin/_selenium-tests.py.tpl",
    "content": "#!/usr/bin/env python3\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nimport os\nimport logging\nimport sys\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.chrome.options import Options\n{{- if .Values.selenium_v4 }}\nfrom selenium.webdriver.chrome.service import Service\n{{- end }}\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import ScreenshotException\n\n# Create logger, console handler and formatter\nlogger = logging.getLogger('Nagios Selenium Tests')\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\n    '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n)\n\n# Set the formatter and add the handler\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\ndef get_variable(env_var):\n    if env_var in os.environ:\n        logger.info('Found \"{}\"'.format(env_var))\n        return os.environ[env_var]\n    else:\n        logger.critical('Variable \"{}\" is not defined!'.format(env_var))\n        sys.exit(1)\n\ndef click_link_by_name(link_name):\n    try:\n        logger.info(\"Clicking '{}' link\".format(link_name))\n{{- if .Values.selenium_v4 }}\n        link = browser.find_element(By.LINK_TEXT, link_name)\n{{- else }}\n        link = browser.find_element_by_link_text(link_name)\n{{- end }}\n        link.click()\n    except NoSuchElementException:\n        logger.error(\"Failed clicking '{}' link\".format(link_name))\n        browser.quit()\n        sys.exit(1)\n\ndef take_screenshot(page_name, artifacts_dir='/tmp/artifacts/'):  # nosec\n    file_name = page_name.replace(' ', '_')\n    try:\n        el = WebDriverWait(browser, 15)\n        browser.save_screenshot('{}{}.png'.format(artifacts_dir, file_name))\n        logger.info(\"Successfully captured {} screenshot\".format(page_name))\n    except ScreenshotException:\n        logger.error(\"Failed to capture {} screenshot\".format(page_name))\n        browser.quit()\n        sys.exit(1)\n\nusername = get_variable('NAGIOS_USER')\npassword = get_variable('NAGIOS_PASSWORD')\nnagios_uri = get_variable('NAGIOS_URI')\nnagios_url = 'http://{0}:{1}@{2}'.format(username, password, nagios_uri)\n\nchrome_driver = '/etc/selenium/chromedriver'\noptions = Options()\noptions.add_argument('--headless=new')\noptions.add_argument('--no-sandbox')\noptions.add_argument('--window-size=1920x1080')\n\n{{- if .Values.selenium_v4 }}\nservice = Service(executable_path=chrome_driver)\nbrowser = webdriver.Chrome(service=service, options=options)\n{{- else }}\nbrowser = webdriver.Chrome(chrome_driver, chrome_options=options)\n{{- end }}\n\ntry:\n    logger.info('Attempting to connect to Nagios')\n    browser.get(nagios_url)\n    el = WebDriverWait(browser, 15).until(\n        EC.title_contains('Nagios')\n    )\n    logger.info('Connected to Nagios')\nexcept TimeoutException:\n    logger.critical('Timed out waiting for Nagios')\n    browser.quit()\n    sys.exit(1)\n\ntry:\n    logger.info('Switching Focus to Navigation side frame')\n    sideFrame = browser.switch_to.frame('side')\nexcept NoSuchElementException:\n    logger.error('Failed selecting side frame')\n    browser.quit()\n    sys.exit(1)\n\ntry:\n    logger.info('Attempting to visit Services page')\n    click_link_by_name('Services')\n    take_screenshot('Nagios Services')\nexcept TimeoutException:\n    logger.error('Failed to load Services page')\n    browser.quit()\n    sys.exit(1)\n\ntry:\n    logger.info('Attempting to visit Host Groups page')\n    click_link_by_name('Host Groups')\n    take_screenshot('Nagios Host Groups')\nexcept TimeoutException:\n    logger.error('Failed to load Host Groups page')\n    browser.quit()\n    sys.exit(1)\n\ntry:\n    logger.info('Attempting to visit Hosts page')\n    click_link_by_name('Hosts')\n    take_screenshot('Nagios Hosts')\nexcept TimeoutException:\n    logger.error('Failed to load Hosts page')\n    browser.quit()\n    sys.exit(1)\n\nlogger.info(\"The following screenshots were captured:\")\nfor root, dirs, files in os.walk(\"/tmp/artifacts/\"):  # nosec\n    for name in files:\n        logger.info(os.path.join(root, name))\n\nbrowser.quit()\n"
  },
  {
    "path": "nagios/templates/configmap-additional-plugins.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_additional_plugins }}\n{{-   $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: nagios-additional-plugins\ntype: Opaque\ndata:\n{{-   range .Values.conf.nagios.additionalPlugins }}\n  {{ .name }}: {{ .content | b64enc | quote }}\n{{-   end }}\n{{- end }}\n"
  },
  {
    "path": "nagios/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: nagios-bin\ndata:\n  apache.sh: |\n{{ tuple \"bin/_apache.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  nagios-readiness.sh: |\n{{ tuple \"bin/_nagios-readiness.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  selenium-tests.py: |\n{{ tuple \"bin/_selenium-tests.py.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  image-repo-sync.sh: |+\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "nagios/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: nagios-etc\ntype: Opaque\ndata:\n  {{- if not (empty .Values.conf.nagios.query_es_clauses) }}\n  query_es_clauses.json: {{ .Values.conf.nagios.query_es_clauses | toJson | b64enc }}\n  {{- end }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.nagios.nagios.template \"key\" \"nagios.cfg\" \"format\" \"Secret\") | indent 2 }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.nagios.cgi.template \"key\" \"cgi.cfg\" \"format\" \"Secret\") | indent 2 }}\n{{- range $objectType, $config := $envAll.Values.conf.nagios.objects }}\n{{- $objectFile := printf \"%s.cfg\" $objectType -}}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" $config.template \"key\" $objectFile \"format\" \"Secret\") | indent 2 }}\n{{- end }}\n  # NOTE(portdirect): this must be last, to work round helm ~2.7 bug.\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.httpd \"key\" \"httpd.conf\" \"format\" \"Secret\") | indent 2 }}\n{{- end }}\n"
  },
  {
    "path": "nagios/templates/deployment.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"apacheProxyReadinessProbeTemplate\" }}\ntcpSocket:\n  port: {{ tuple \"nagios\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- define \"nagiosReadinessProbeTemplate\" }}\nexec:\n  command:\n    - /tmp/nagios-readiness.sh\n{{- end }}\n\n{{- if .Values.manifests.deployment }}\n{{- $envAll := . }}\n\n{{- $nagiosUserSecret := .Values.secrets.nagios.admin }}\n\n{{- $serviceAccountName := \"nagios\" }}\n{{ tuple $envAll \"nagios\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - nodes\n      - nodes/proxy\n      - services\n      - endpoints\n      - pods\n      - pods/exec\n      - persistentvolumes\n      - persistentvolumeclaims\n    verbs:\n      - get\n      - list\n      - watch\n  - apiGroups:\n      - \"\"\n    resources:\n      - configmaps\n    verbs:\n      - get\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ .Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $serviceAccountName }}\n  apiGroup: rbac.authorization.k8s.io\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: nagios\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"nagios\" \"monitoring\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.nagios }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"nagios\" \"monitoring\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"nagios\" \"monitoring\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"nagios\" \"containerNames\" (list \"apache-proxy\" \"nagios\" \"init\" \"define-nagios-hosts\")  | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"monitoring\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"nagios\" \"monitoring\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.nagios.node_selector_key }}: {{ .Values.labels.nagios.node_selector_value | quote }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.nagios.timeout | default \"30\" }}\n      {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n      shareProcessNamespace: true\n      {{- else }}\n      hostPID: true\n      {{- end }}\n      initContainers:\n{{ tuple $envAll \"nagios\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: define-nagios-hosts\n{{ tuple $envAll \"nagios\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.nagios | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"monitoring\" \"container\" \"define_nagios_hosts\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /usr/lib/nagios/plugins/define-nagios-hosts.py\n            - --object_file_loc\n            - /opt/nagios/etc/conf.d/nagios-hosts.cfg\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: nagios-confd\n              mountPath: /opt/nagios/etc/conf.d\n          env:\n{{- if .Values.pod.env }}\n{{ include \"helm-toolkit.utils.to_k8s_env_vars\" .Values.pod.env | indent 12 }}\n{{- end }}\n      containers:\n        - name: apache-proxy\n{{ tuple $envAll \"apache_proxy\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.apache_proxy | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"monitoring\" \"container\" \"apache_proxy\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"monitoring\" \"container\" \"apache_proxy\" \"type\" \"readiness\" \"probeTemplate\" (include \"apacheProxyReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          command:\n            - /tmp/apache.sh\n            - start\n          ports:\n            - name: http\n              containerPort: {{ tuple \"nagios\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          env:\n            - name: NAGIOSADMIN_USER\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $nagiosUserSecret }}\n                  key: NAGIOSADMIN_USER\n            - name: NAGIOSADMIN_PASS\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $nagiosUserSecret }}\n                  key: NAGIOSADMIN_PASS\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: nagios-bin\n              mountPath: /tmp/apache.sh\n              subPath: apache.sh\n              readOnly: true\n            - name: nagios-etc\n              mountPath: /usr/local/apache2/conf/httpd.conf\n              subPath: httpd.conf\n              readOnly: true\n        - name: nagios\n{{ tuple $envAll \"nagios\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.nagios | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"monitoring\" \"container\" \"nagios\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"monitoring\" \"container\" \"nagios\" \"type\" \"readiness\" \"probeTemplate\" (include \"nagiosReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          ports:\n            - name: nagios\n              containerPort: {{ tuple \"nagios\" \"internal\" \"nagios\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          env:\n{{- if .Values.pod.env }}\n{{ include \"helm-toolkit.utils.to_k8s_env_vars\" .Values.pod.env | indent 12 }}\n{{- end }}\n            - name: SNMP_NOTIF_PRIMARY_TARGET_WITH_PORT\n              value: {{ $envAll.Values.conf.nagios.notification.snmp.primary_target }}\n            - name: SNMP_NOTIF_SECONDARY_TARGET_WITH_PORT\n              value: {{ $envAll.Values.conf.nagios.notification.snmp.secondary_target }}\n            - name: REST_NOTIF_PRIMARY_TARGET_URL\n              value: {{ $envAll.Values.conf.nagios.notification.http.primary_target }}\n            - name: REST_NOTIF_SECONDARY_TARGET_URL\n              value: {{ $envAll.Values.conf.nagios.notification.http.secondary_target }}\n            - name: CEPH_MGR_SERVICE\n              value: {{ tuple \"ceph_mgr\" \"internal\" \"metrics\" $envAll | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}/metrics\n            - name: PROMETHEUS_SERVICE\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $nagiosUserSecret }}\n                  key: PROMETHEUS_SERVICE\n            - name: ELASTICSEARCH_SERVICE\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $nagiosUserSecret }}\n                  key: ELASTICSEARCH_SERVICE\n            - name: NAGIOSADMIN_USER\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $nagiosUserSecret }}\n                  key: NAGIOSADMIN_USER\n            - name: NAGIOSADMIN_PASS\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $nagiosUserSecret }}\n                  key: NAGIOSADMIN_PASS\n{{- if .Values.manifests.certificates }}\n            - name: CA_CERT_PATH\n              value: \"/etc/ssl/certs/ca.crt\"\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: nagios-confd\n              mountPath: /opt/nagios/etc/conf.d\n            - name: nagios-etc\n              mountPath: /opt/nagios/etc/nagios.cfg\n              subPath: nagios.cfg\n              readOnly: true\n            - name: nagios-etc\n              mountPath: /opt/nagios/etc/cgi.cfg\n              subPath: cgi.cfg\n              readOnly: true\n            {{- $objectKeys := keys $envAll.Values.conf.nagios.objects -}}\n            {{- range $objectType := $objectKeys }}\n            - name: nagios-etc\n              mountPath: /opt/nagios/etc/{{$objectType}}.cfg\n              subPath: {{$objectType}}.cfg\n              readOnly: true\n            {{- end }}\n            - name: nagios-bin\n              mountPath: /tmp/nagios-readiness.sh\n              subPath: nagios-readiness.sh\n              readOnly: true\n{{- if not (empty .Values.conf.nagios.query_es_clauses) }}\n            - name: nagios-etc\n              mountPath: /opt/nagios/etc/objects/query_es_clauses.json\n              subPath: query_es_clauses.json\n              readOnly: true\n{{- end }}\n            - name: pod-var-log\n              mountPath: /opt/nagios/var/log\n{{- if not (empty .Values.conf.nagios.additionalPlugins) }}\n{{-   range .Values.conf.nagios.additionalPlugins }}\n            - name: additional-plugins\n              mountPath: /usr/lib/nagios/plugins/{{ .name }}\n              subPath: {{ .name }}\n              readOnly: true\n{{-   end }}\n{{- end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" $envAll.Values.endpoints.monitoring.auth.admin.secret.tls.internal \"path\" \"/etc/ssl/certs\" \"certs\" tuple \"ca.crt\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: pod-var-log\n          emptyDir: {}\n        - name: nagios-confd\n          emptyDir: {}\n        - name: nagios-etc\n          secret:\n            secretName: nagios-etc\n            defaultMode: 0444\n        - name: nagios-bin\n          configMap:\n            name: nagios-bin\n            defaultMode: 0555\n        - name: additional-plugins\n          secret:\n            secretName: nagios-additional-plugins\n            defaultMode: 0755\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" $envAll.Values.endpoints.monitoring.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "nagios/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "nagios/templates/ingress-nagios.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress .Values.network.nagios.ingress.public }}\n{{- $ingressOpts := dict \"envAll\" . \"backendService\" \"nagios\" \"backendServiceType\" \"nagios\" \"backendPort\" \"http\" -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "nagios/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"nagios\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "nagios/templates/network_policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"nagios\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "nagios/templates/pod-helm-tests.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pod_helm_test }}\n{{- $envAll := . }}\n\n{{- $nagiosUserSecret := .Values.secrets.nagios.admin }}\n\n{{- $serviceAccountName := print .Release.Name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: \"{{.Release.Name}}-test\"\n  labels:\n{{ tuple $envAll \"nagios\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"nagios-test\" \"containerNames\" (list \"init\" \"nagios-helm-tests\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n{{ dict \"envAll\" $envAll \"application\" \"monitoring\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 2 }}\n  serviceAccountName: {{ $serviceAccountName }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n  restartPolicy: Never\n  initContainers:\n{{ tuple $envAll \"tests\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n  containers:\n    - name: nagios-helm-tests\n{{ tuple $envAll \"selenium_tests\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"monitoring\" \"container\" \"helm_tests\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      command:\n        - /tmp/selenium-tests.py\n      env:\n        - name: NAGIOS_USER\n          valueFrom:\n            secretKeyRef:\n              name: {{ $nagiosUserSecret }}\n              key: NAGIOSADMIN_USER\n        - name: NAGIOS_PASSWORD\n          valueFrom:\n            secretKeyRef:\n              name: {{ $nagiosUserSecret }}\n              key: NAGIOSADMIN_PASS\n        - name: NAGIOS_URI\n          value: {{ tuple \"nagios\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n        - name: CHROME_CONFIG_HOME\n          value: /tmp/google-chrome\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: artifacts\n          mountPath: /tmp/artifacts\n        - name: nagios-bin\n          mountPath: /tmp/selenium-tests.py\n          subPath: selenium-tests.py\n          readOnly: true\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: artifacts\n      emptyDir: {}\n    - name: nagios-bin\n      configMap:\n        name: nagios-bin\n        defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "nagios/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{- include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"nagios\" \"backendService\" \"nagios\" ) }}\n{{- end }}\n"
  },
  {
    "path": "nagios/templates/secret-nagios.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_nagios }}\n{{- $envAll := . }}\n{{- $secretName := index $envAll.Values.secrets.nagios.admin }}\n{{- $prometheusService := tuple \"monitoring\" \"internal\" \"admin\" \"http\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n{{- $elasticsearchService := tuple \"elasticsearch\" \"internal\" \"admin\" \"http\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n  NAGIOSADMIN_USER: {{ .Values.endpoints.nagios.auth.admin.username | b64enc }}\n  NAGIOSADMIN_PASS: {{ .Values.endpoints.nagios.auth.admin.password | b64enc }}\n  BIND_DN: {{ .Values.endpoints.ldap.auth.admin.bind | b64enc }}\n  BIND_PASSWORD: {{ .Values.endpoints.ldap.auth.admin.password | b64enc }}\n  PROMETHEUS_SERVICE: {{ $prometheusService | b64enc }}\n  ELASTICSEARCH_SERVICE: {{ $elasticsearchService | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "nagios/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "nagios/templates/service-ingress-nagios.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress .Values.network.nagios.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"nagios\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "nagios/templates/service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"nagios\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: http\n    port: {{ tuple \"nagios\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.nagios.node_port.enabled }}\n    nodePort: {{ .Values.network.nagios.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"nagios\" \"monitoring\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.nagios.node_port.enabled }}\n  type: NodePort\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "nagios/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for nagios.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nimages:\n  tags:\n    apache_proxy: docker.io/library/httpd:2.4\n    nagios: quay.io/airshipit/nagios:latest-ubuntu_jammy\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    selenium_tests: quay.io/airshipit/osh-selenium:latest-ubuntu_noble\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\n# Use selenium v4 syntax\nselenium_v4: true\n\nlabels:\n  nagios:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\ndependencies:\n  dynamic:\n    common:\n      jobs:\n        - nagios-image-repo-sync\n      services:\n        - service: local_image_registry\n          endpoint: node\n  static:\n    image_repo_sync:\n      services:\n        - service: local_image_registry\n          endpoint: internal\n    nagios:\n      services: null\n    tests:\n      services:\n        - service: nagios\n          endpoint: internal\n\nsecrets:\n  nagios:\n    admin: nagios-admin-creds\n  oci_image_registry:\n    nagios: nagios-oci-image-registry-key\n  tls:\n    nagios:\n      nagios:\n        public: nagios-tls-public\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      nagios:\n        username: nagios\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  monitoring:\n    name: prometheus\n    auth:\n      admin:\n        username: admin\n        password: changeme\n        secret:\n          tls:\n            internal: prometheus-tls-api\n    hosts:\n      default: prom-metrics\n      public: prometheus\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      http:\n        default: 80\n  nagios:\n    name: nagios\n    namespace: null\n    auth:\n      admin:\n        username: nagiosadmin\n        password: password\n    hosts:\n      default: nagios-metrics\n      public: nagios\n    host_fqdn_override:\n      default: null\n      # NOTE(srwilkers): this chart supports TLS for fqdn over-ridden public\n      # endpoints using the following format:\n      # public:\n      #   host: null\n      #   tls:\n      #     crt: null\n      #     key: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      nagios:\n        default: 8000\n      http:\n        default: 80\n  ldap:\n    hosts:\n      default: ldap\n    auth:\n      admin:\n        bind: \"cn=admin,dc=cluster,dc=local\"\n        password: password\n    host_fqdn_override:\n      default: null\n    path:\n      default: \"/ou=People,dc=cluster,dc=local\"\n    scheme:\n      default: ldap\n    port:\n      ldap:\n        default: 389\n  elasticsearch:\n    name: elasticsearch\n    namespace: null\n    auth:\n      admin:\n        username: admin\n        password: changeme\n    hosts:\n      default: elasticsearch-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: /\n    scheme:\n      default: http\n    port:\n      http:\n        default: 80\n  ceph_mgr:\n    namespace: null\n    hosts:\n      default: ceph-mgr\n    host_fqdn_override:\n      default: null\n    port:\n      mgr:\n        default: 7000\n      metrics:\n        default: 9283\n    scheme:\n      default: http\n\nnetwork:\n  nagios:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        nginx.ingress.kubernetes.io/affinity: cookie\n        nginx.ingress.kubernetes.io/session-cookie-name: kube-ingress-session-nagios\n        nginx.ingress.kubernetes.io/session-cookie-hash: sha1\n        nginx.ingress.kubernetes.io/session-cookie-expires: \"600\"\n        nginx.ingress.kubernetes.io/session-cookie-max-age: \"600\"\n        nginx.ingress.kubernetes.io/configuration-snippet: |\n          more_set_headers \"X-Content-Type-Options: 'nosniff'\";\n          more_set_headers \"X-Frame-Options: SAMEORIGIN\";\n          more_set_headers \"Content-Security-Policy: script-src 'self'\";\n          more_set_headers \"X-XSS-Protection: 1; mode=block\";\n        haproxy.org/path-rewrite: /\n        haproxy.org/cookie-persistence: \"kube-ingress-session-nagios\"\n        haproxy.org/response-set-header: |\n          X-Content-Type-Options nosniff\n          X-Frame-Options SAMEORIGIN\n          Content-Security-Policy \"script-src 'self'\"\n          X-XSS-Protection \"1; mode=block\"\n    node_port:\n      enabled: false\n      port: 30925\n\nnetwork_policy:\n  nagios:\n    ingress:\n      - {}\n    egress:\n      - {}\n\npod:\n  security_context:\n    monitoring:\n      pod:\n        runAsUser: 0\n      container:\n        define_nagios_hosts:\n          readOnlyRootFilesystem: false\n        apache_proxy:\n          readOnlyRootFilesystem: false\n        nagios:\n          readOnlyRootFilesystem: false\n        helm_tests:\n          readOnlyRootFilesystem: true\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    termination_grace_period:\n      nagios:\n        timeout: 30\n  # env:\n  #\n  # NOTE(megheisler): This value can be used to hold\n  # the domain name. Functionality has been added in\n  # plugins to append the domain to the host name in\n  # the nagios dashboard\n  #\n  #  NODE_DOMAIN:\n  replicas:\n    nagios: 1\n  probes:\n    monitoring:\n      nagios:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 60\n            periodSeconds: 30\n            timeoutSeconds: 10\n      apache_proxy:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 20\n            periodSeconds: 10\n  resources:\n    enabled: false\n    nagios:\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n    apache_proxy:\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n    jobs:\n      image_repo_sync:\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n      tests:\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n\nmanifests:\n  certificates: false\n  configmap_additional_plugins: false\n  configmap_bin: true\n  configmap_etc: true\n  deployment: true\n  ingress: true\n  job_image_repo_sync: true\n  network_policy: false\n  pod_helm_test: true\n  secret_nagios: true\n  secret_ingress_tls: true\n  secret_registry: true\n  service: true\n  service_ingress: true\n\nconf:\n  httpd: |\n    ServerRoot \"/usr/local/apache2\"\n\n    Listen 80\n\n    LoadModule mpm_event_module modules/mod_mpm_event.so\n    LoadModule authn_file_module modules/mod_authn_file.so\n    LoadModule authn_core_module modules/mod_authn_core.so\n    LoadModule authz_host_module modules/mod_authz_host.so\n    LoadModule authz_groupfile_module modules/mod_authz_groupfile.so\n    LoadModule authz_user_module modules/mod_authz_user.so\n    LoadModule authz_core_module modules/mod_authz_core.so\n    LoadModule access_compat_module modules/mod_access_compat.so\n    LoadModule auth_basic_module modules/mod_auth_basic.so\n    LoadModule ldap_module modules/mod_ldap.so\n    LoadModule authnz_ldap_module modules/mod_authnz_ldap.so\n    LoadModule reqtimeout_module modules/mod_reqtimeout.so\n    LoadModule filter_module modules/mod_filter.so\n    LoadModule proxy_html_module modules/mod_proxy_html.so\n    LoadModule log_config_module modules/mod_log_config.so\n    LoadModule env_module modules/mod_env.so\n    LoadModule headers_module modules/mod_headers.so\n    LoadModule setenvif_module modules/mod_setenvif.so\n    LoadModule version_module modules/mod_version.so\n    LoadModule proxy_module modules/mod_proxy.so\n    LoadModule proxy_connect_module modules/mod_proxy_connect.so\n    LoadModule proxy_http_module modules/mod_proxy_http.so\n    LoadModule proxy_balancer_module modules/mod_proxy_balancer.so\n    LoadModule slotmem_shm_module modules/mod_slotmem_shm.so\n    LoadModule slotmem_plain_module modules/mod_slotmem_plain.so\n    LoadModule unixd_module modules/mod_unixd.so\n    LoadModule status_module modules/mod_status.so\n    LoadModule autoindex_module modules/mod_autoindex.so\n\n    <IfModule unixd_module>\n    User daemon\n    Group daemon\n    </IfModule>\n\n    <Directory />\n        AllowOverride none\n        Require all denied\n    </Directory>\n\n    <Files \".ht*\">\n        Require all denied\n    </Files>\n\n    ErrorLog /dev/stderr\n\n    LogLevel warn\n\n    <IfModule log_config_module>\n        LogFormat \"%a %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" combined\n        LogFormat \"%{X-Forwarded-For}i %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" proxy\n        LogFormat \"%h %l %u %t \\\"%r\\\" %>s %b\" common\n\n        <IfModule logio_module>\n          LogFormat \"%a %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\" %I %O\" combinedio\n        </IfModule>\n\n        SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n        CustomLog /dev/stdout common\n        CustomLog /dev/stdout combined\n        CustomLog /dev/stdout proxy env=forwarded\n    </IfModule>\n\n    <Directory \"/usr/local/apache2/cgi-bin\">\n        AllowOverride None\n        Options None\n        Require all granted\n    </Directory>\n\n    <IfModule headers_module>\n        RequestHeader unset Proxy early\n    </IfModule>\n\n    <IfModule proxy_html_module>\n    Include conf/extra/proxy-html.conf\n    </IfModule>\n\n    <VirtualHost *:80>\n      <Location />\n          ProxyPass http://localhost:{{ tuple \"nagios\" \"internal\" \"nagios\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/\n          ProxyPassReverse http://localhost:{{ tuple \"nagios\" \"internal\" \"nagios\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/\n      </Location>\n      <Proxy *>\n          AuthName \"Nagios\"\n          AuthType Basic\n          AuthBasicProvider file ldap\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          AuthLDAPBindDN {{ .Values.endpoints.ldap.auth.admin.bind }}\n          AuthLDAPBindPassword {{ .Values.endpoints.ldap.auth.admin.password }}\n          AuthLDAPURL {{ tuple \"ldap\" \"default\" \"ldap\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | quote }}\n          Require valid-user\n      </Proxy>\n    </VirtualHost>\n  nagios:\n    notification:\n      snmp:\n        primary_target: 127.0.0.1:15162\n        secondary_target: 127.0.0.1:15162\n      http:\n        primary_target: 127.0.0.1:3904/events\n        secondary_target: 127.0.0.1:3904/events\n    objects:\n      base:\n        template: |\n          define host {\n            address 127.0.0.1\n            alias Prometheus Monitoring\n            check_command check-prometheus-host-alive\n            host_name {{ tuple \"monitoring\" \"public\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n            hostgroups prometheus-hosts\n            use linux-server\n          }\n\n          define contact {\n            alias notifying contact\n            contact_name notifying_contact\n            host_notification_options d,u,r,f,s\n            host_notification_period 24x7\n            name notifying_contact\n            register 0\n            service_notification_options w,u,c,r,f,s\n            service_notification_period 24x7\n          }\n\n          define contact {\n            alias snmp contact\n            contact_name snmp_notifying_contact\n            host_notification_commands send_host_snmp_trap\n            name snmp_notifying_contact\n            service_notification_commands send_service_snmp_trap\n            use notifying_contact\n          }\n\n          define contact {\n            alias HTTP contact\n            contact_name http_notifying_contact\n            host_notification_commands send_host_http_post\n            name http_notifying_contact\n            service_notification_commands send_service_http_post\n            use notifying_contact\n          }\n\n          define contactgroup {\n            alias SNMP and HTTP notifying group\n            contactgroup_name snmp_and_http_notifying_contact_group\n            members snmp_notifying_contact,http_notifying_contact\n          }\n\n          define hostgroup {\n            alias Prometheus Virtual Host\n            hostgroup_name prometheus-hosts\n          }\n\n          define hostgroup {\n            alias all\n            hostgroup_name all\n          }\n\n          define hostgroup {\n            alias base-os\n            hostgroup_name base-os\n          }\n\n          define command {\n            command_line $USER1$/send_service_trap.sh '$USER8$' '$HOSTNAME$' '$SERVICEDESC$' $SERVICESTATEID$ '$SERVICEOUTPUT$' '$USER4$' '$USER5$'\n            command_name send_service_snmp_trap\n          }\n\n          define command {\n            command_line $USER1$/send_host_trap.sh '$USER8$' '$HOSTNAME$' $HOSTSTATEID$ '$HOSTOUTPUT$' '$USER4$' '$USER5$'\n            command_name send_host_snmp_trap\n          }\n\n          define command {\n            command_line $USER1$/send_http_post_event.py --type service --hostname '$HOSTNAME$' --servicedesc '$SERVICEDESC$' --state_id $SERVICESTATEID$ --output '$SERVICEOUTPUT$' --monitoring_hostname '$HOSTNAME$' --primary_url '$USER6$' --secondary_url '$USER7$'\n            command_name send_service_http_post\n          }\n\n          define command {\n            command_line $USER1$/send_http_post_event.py --type host --hostname '$HOSTNAME$' --state_id $HOSTSTATEID$ --output '$HOSTOUTPUT$' --monitoring_hostname '$HOSTNAME$' --primary_url '$USER6$' --secondary_url '$USER7$'\n            command_name send_host_http_post\n          }\n\n          define command {\n            command_line $USER1$/check_rest_get_api.py --url $USER2$ --warning_response_seconds 5 --critical_response_seconds 10\n            command_name check-prometheus-host-alive\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname '$ARG1$' --labels_csv '$ARG2$' --msg_format '$ARG3$' --ok_message '$ARG4$'\n            command_name check_prom_alert_with_labels\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname '$ARG1$' --msg_format '$ARG2$' --ok_message '$ARG3$'\n            command_name check_prom_alert\n          }\n\n          define service {\n            check_interval 60\n            contact_groups snmp_and_http_notifying_contact_group\n            flap_detection_enabled 0\n            name notifying_service\n            notification_interval 120\n            process_perf_data 0\n            register 0\n            retry_interval 30\n            use generic-service\n          }\n      kubernetes:\n        template: |\n          define service {\n            check_command check_prom_alert!prom_exporter_calico_unavailable!CRITICAL- Calico exporter is not collecting metrics for alerting!OK- Calico exporter metrics are available.\n            hostgroup_name prometheus-hosts\n            service_description Prometheus-exporter_Calico\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!prom_exporter_kube_state_metrics_unavailable!CRITICAL- kube-state-metrics exporter is not collecting metrics for alerting!OK- kube-state-metrics exporter metrics are available.\n            hostgroup_name prometheus-hosts\n            service_description Prometheus-exporter_Kube-state-metrics\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!K8SNodesNotReady!CRITICAL- One or more nodes are not ready.\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Nodes_health\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert_with_labels!kube_statefulset_replicas_unavailable!statefulset=\"prometheus\"!statefulset {statefulset} has lesser than configured replicas\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Prometheus_replica-count\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert_with_labels!kube_statefulset_replicas_unavailable!statefulset=\"alertmanager\"!statefulset {statefulset} has lesser than configured replicas\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description PrometheusAlertmanager_replica-count\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!kube_statefulset_replicas_unavailable!CRITICAL- statefulset {statefulset} has lesser than configured replicas!OK- All statefulsets have configured amount of replicas\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Statefulset_replica-count\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!daemonsets_misscheduled!CRITICAL- Daemonset {daemonset} is incorrectly scheudled!OK- No daemonset misscheduling detected\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Daemonset_misscheduled\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!daemonsets_not_scheduled!CRITICAL- Daemonset {daemonset} is missing to be scheduled in some nodes!OK- All daemonset scheduling is as desired\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Daemonset_not-scheduled\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!daemonset_pods_unavailable!CRITICAL- Daemonset {daemonset} has pods unavailable!OK- All daemonset pods available\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Daemonset_pods-unavailable\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!deployment_replicas_unavailable!CRITICAL- Deployment {deployment} has less than desired replicas!OK- All deployments have desired replicas\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Deployment_replicas-unavailable\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!volume_claim_capacity_high_utilization!CRITICAL- Volume claim {persistentvolumeclaim} has exceed 80% utilization!OK- All volume claims less than 80% utilization\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Volume_claim_high_utilization\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!rollingupdate_deployment_replica_less_than_spec_max_unavailable!CRITICAL- Deployment {deployment} has less than desired replicas during a rolling update!OK- All deployments have desired replicas\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description RollingUpdate_Deployment-replicas-unavailable\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!job_status_failed!CRITICAL- Job {exported_job} has failed!OK- No Job failures\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Job_status-failed\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!pod_status_pending!CRITICAL- Pod {pod} in namespace {namespace} has been in pending status for more than 10 minutes!OK- No pods in pending status\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Pod_status-pending\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!pod_status_error_image_pull!CRITICAL- Pod {pod} in namespace {namespace} has been in errpr status of ErrImagePull for more than 10 minutes!OK- No pods in error status\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Pod_status-error-image-pull\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert! pod_status_error_image_pull_backoff!CRITICAL- Pod {pod} in namespace {namespace} has been in errpr status of ImagePullBackOff for more than 10 minutes!OK- No pods in error status\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Pod_status-error-image-pull\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert! pod_error_config_error!CRITICAL- Pod {pod} in namespace {namespace} has been in errpr status of CreateContainerConfigError for more than 10 minutes!OK- No pods in error status\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Pod_status-error-image-pull\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!pod_error_crash_loop_back_off!CRITICAL- Pod {pod} in namespace {namespace} has been in error status of CrashLoopBackOff for more than 10 minutes!OK- No pods in crashLoopBackOff status\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Pod_status-crashLoopBackOff\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!replicaset_missing_replicas!CRITICAL- Replicaset {replicaset} is missing replicas!OK- No replicas missing from replicaset\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Replicaset_missing-replicas\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!pod_container_terminated!CRITICAL- pod {pod} in namespace {namespace} has a container in terminated state!OK- pod container status looks good\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Pod_status-container-terminated\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert_with_labels!etcd_HighNumberOfFailedHTTPRequests!method=\"DELETE\"!CRITICAL- ETCD {instance} has a high HTTP DELETE operations failure!OK- ETCD at {instance} has low or no failures for HTTP DELETE\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description ETCD_high-http-delete-failures\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert_with_labels!etcd_HighNumberOfFailedHTTPRequests!method=~\"GET|QGET\"!CRITICAL- ETCD {instance} has a high HTTP GET operations failure!OK- ETCD at {instance} has low or no failures for HTTP GET\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description ETCD_high-http-get-failures\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert_with_labels!etcd_HighNumberOfFailedHTTPRequests!method=\"PUT\"!CRITICAL- ETCD {instance} has a high HTTP PUT operations failure!OK- ETCD at {instance} has low or no failures for HTTP PUT\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description ETCD_high-http-update-failures\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!calico_iptable_save_errors_high_1h!CRITICAL- Felix instance {instance} has seen high iptable save errors within the last hour!OK- iptables save errors are none or low\n            hostgroup_name prometheus-hosts\n            service_description Calico_iptables-save-errors\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!calico_ipset_errors_high_1h!CRITICAL- Felix instance {instance} has seen high ipset errors within the last hour!OK- ipset errors are none or low\n            hostgroup_name prometheus-hosts\n            service_description Calico_ipset-errors\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!calico_datapane_iface_msg_batch_size_high_5m!CRITICAL- Felix instance {instance} has seen a high value of dataplane interface message batch size!OK- dataplane interface message batch size are low\n            hostgroup_name prometheus-hosts\n            service_description Calico_interface-message-batch-size\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!calico_datapane_address_msg_batch_size_high_5m!CRITICAL- Felix instance {instance} has seen a high value of dataplane address message batch size!OK- dataplane address message batch size are low\n            hostgroup_name prometheus-hosts\n            service_description Calico_address-message-batch-size\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!calico_datapane_failures_high_1h!CRITICAL- Felix instance {instance} has seen high dataplane failures within the last hour!OK- datapane failures are none or low\n            hostgroup_name prometheus-hosts\n            service_description Calico_datapane_failures_high\n            use notifying_service\n          }\n      node:\n        template: |\n          define service {\n            check_command check_prom_alert!prom_exporter_node_unavailable!CRITICAL- Node exporter is not collecting metrics for alerting!OK- Node exporter metrics are available.\n            hostgroup_name prometheus-hosts\n            service_description Prometheus-exporter_Node\n            use generic-service\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname 'node_filesystem_full_in_4h' --labels_csv 'instance=~\"$HOSTADDRESS$.*\"' --msg_format 'CRITICAL- Mountpoint {mountpoint} will be full in four hours' --ok_message 'OK- All mountpoints usage rate is normal'\n            command_name check_filespace_mounts-usage-rate-fullin4hrs\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname 'node_filesystem_full_80percent' --labels_csv 'instance=~\"$HOSTADDRESS$.*\"' --msg_format 'CRITICAL- Mountpoint {mountpoint} is more than 80 pecent full' --ok_message 'OK- All mountpoints usage is normal'\n            command_name check_filespace_mounts-usage\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname 'node_load1_90percent' --labels_csv 'instance=~\"$HOSTADDRESS$.*\"' --msg_format 'CRITICAL- Node load average has been more than 90% for the pash hour' --ok_message 'OK- Node load average is normal'\n            command_name check_node_loadavg\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname 'node_cpu_util_90percent' --labels_csv 'instance=~\"$HOSTADDRESS$.*\"' --msg_format 'CRITICAL- Node CPU utilization has been more than 90% for the pash hour' --ok_message 'OK- Node cpu utilization is normal'\n            command_name check_node_cpu_util\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname 'node_network_conntrack_usage_80percent' --labels_csv 'instance=~\"$HOSTADDRESS$.*\"' --msg_format 'CRITICAL- Node network connections are more than 90% in use' --ok_message 'OK- Network connection utilization is normal'\n            command_name check_network_connections\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname 'node_high_memory_load' --labels_csv 'instance=~\"$HOSTADDRESS$.*\"' --msg_format 'CRITICAL- Node memory usage is more than 85%' --ok_message 'OK- Node memory usage is less than 85%'\n            command_name check_memory_usage\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname 'node_disk_write_latency' --labels_csv 'instance=~\"$HOSTADDRESS$.*\"' --msg_format 'CRITICAL- Disk write latency is high' --ok_message 'OK- Node disk write latency is normal'\n            command_name check_disk_write_latency\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname 'node_disk_read_latency' --labels_csv 'instance=~\"$HOSTADDRESS$.*\"' --msg_format 'CRITICAL- Disk read latency is high' --ok_message 'OK- Node disk read latency is normal'\n            command_name check_disk_read_latency\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname 'node_entropy_available_low' --labels_csv 'instance=~\"$HOSTADDRESS$.*\"' --msg_format 'CRITICAL- System has low entropy availability' --ok_message 'OK- System entropy availability is sufficient'\n            command_name check_entropy_availability\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname 'node_filedescriptors_full_in_3h' --labels_csv 'instance=~\"$HOSTADDRESS$.*\"' --msg_format 'CRITICAL- at current consumption rate no free file descriptors will be available in 3hrs.' --ok_message 'OK- System file descriptor consumption is ok.'\n            command_name check_filedescriptor_usage_rate\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname 'node_hwmon_high_cpu_temp' --labels_csv 'instance=~\"$HOSTADDRESS$.*\"' --msg_format 'CRITICAL- CPU temperature is 90 percent of critical temperature.' --ok_message 'OK- CPU temperatures are normal.'\n            command_name check_hwmon_high_cpu_temp\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname 'node_high_network_drop_rcv' --labels_csv 'instance=~\"$HOSTADDRESS$.*\"' --msg_format 'CRITICAL- Host system has an unusally high drop in network reception.' --ok_message 'OK- network packet receive drops not high.'\n            command_name check_network_receive_drop_high\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname 'node_high_network_drop_send' --labels_csv 'instance=~\"$HOSTADDRESS$.*\"' --msg_format 'CRITICAL- Host system has an unusally high drop in network transmission.' --ok_message 'OK- network packet tramsmit drops not high.'\n            command_name check_network_transmit_drop_high\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname 'node_high_network_errs_rcv' --labels_csv 'instance=~\"$HOSTADDRESS$.*\"' --msg_format 'CRITICAL- Host system has an unusally high error rate in network reception.' --ok_message 'OK- network reception errors not high.'\n            command_name check_network_receive_errors_high\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname 'node_high_network_errs_send' --labels_csv 'instance=~\"$HOSTADDRESS$.*\"' --msg_format 'CRITICAL- Host system has an unusally high error rate in network transmission.' --ok_message 'OK- network transmission errors not high.'\n            command_name check_network_transmit_errors_high\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname 'node_vmstat_paging_rate_high' --labels_csv 'instance=~\"$HOSTADDRESS$.*\"' --msg_format 'CRITICAL- Memory paging rate over 5 minutes is high.' --ok_message 'OK- Memory paging rate over 5 minutes is ok.'\n            command_name check_vmstat_paging_rate\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname 'node_xfs_block_allocation_high' --labels_csv 'instance=~\"$HOSTADDRESS$.*\"' --msg_format 'CRITICAL- XFS block allocation is more than 80 percent of available.' --ok_message 'OK- XFS block allocation is less than 80 percent of available.'\n            command_name check_xfs_block_allocation\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname 'node_network_bond_slaves_down' --labels_csv 'instance=~\"$HOSTADDRESS$.*\"' --msg_format 'CRITICAL- {master} is missing slave interfaces.' --ok_message 'OK- Network bonds have slave interfaces functional.'\n            command_name check_network_bond_status\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname 'node_numa_memory_used' --labels_csv 'instance=~\"$HOSTADDRESS$.*\"' --msg_format 'CRITICAL- NUMA memory usage is more than 80 percent of available.' --ok_message 'OK- NUMA memory usage is normal.'\n            command_name check_numa_memory_usage\n          }\n\n          define command {\n            command_line $USER1$/query_prometheus_alerts.py --prometheus_api $USER2$ --alertname 'node_ntp_clock_skew_high' --labels_csv 'instance=~\"$HOSTADDRESS$.*\"' --msg_format 'CRITICAL- NTP clock skew is more than 2 seconds.' --ok_message 'OK- NTP clock skew is less than 2 seconds.'\n            command_name check_ntp_sync\n          }\n\n          define service {\n            check_command check_filespace_mounts-usage-rate-fullin4hrs\n            check_interval 60\n            hostgroup_name base-os\n            service_description Filespace_mounts-usage-rate-fullin4hrs\n            use notifying_service\n          }\n\n          define service {\n            check_command check_filespace_mounts-usage\n            check_interval 60\n            hostgroup_name base-os\n            service_description Filespace_mounts-usage\n            use notifying_service\n          }\n\n          define service {\n            check_command check_node_loadavg\n            hostgroup_name base-os\n            service_description CPU_Load-average\n            use notifying_service\n          }\n\n          define service {\n            check_command check_node_cpu_util\n            hostgroup_name base-os\n            service_description CPU_utilization\n            use notifying_service\n          }\n\n          define service {\n            check_command check_network_connections\n            hostgroup_name base-os\n            service_description Network_connections\n            use notifying_service\n          }\n\n          define service {\n            check_command check_memory_usage\n            hostgroup_name base-os\n            service_description Memory_usage\n            use notifying_service\n          }\n\n          define service {\n            check_command check_disk_write_latency\n            hostgroup_name base-os\n            service_description Disk_write-latency\n            use notifying_service\n          }\n\n          define service {\n            check_command check_disk_read_latency\n            hostgroup_name base-os\n            service_description Disk_read-latency\n            use notifying_service\n          }\n\n          define service {\n            check_command check_entropy_availability\n            hostgroup_name base-os\n            service_description Entropy_availability\n            use notifying_service\n          }\n\n          define service {\n            check_command check_filedescriptor_usage_rate\n            hostgroup_name base-os\n            service_description FileDescriptors_usage-rate-high\n            use notifying_service\n          }\n\n          define service {\n            check_command check_hwmon_high_cpu_temp\n            hostgroup_name base-os\n            service_description HW_cpu-temp-high\n            use notifying_service\n          }\n\n          define service {\n            check_command check_network_receive_drop_high\n            hostgroup_name base-os\n            service_description Network_receive-drop-high\n            use notifying_service\n          }\n\n          define service {\n            check_command check_network_transmit_drop_high\n            hostgroup_name base-os\n            service_description Network_transmit-drop-high\n            use notifying_service\n          }\n\n          define service {\n            check_command check_network_receive_errors_high\n            hostgroup_name base-os\n            service_description Network_receive-errors-high\n            use notifying_service\n          }\n\n          define service {\n            check_command check_network_transmit_errors_high\n            hostgroup_name base-os\n            service_description Network_transmit-errors-high\n            use notifying_service\n          }\n\n          define service {\n            check_command check_vmstat_paging_rate\n            hostgroup_name base-os\n            service_description Memory_vmstat-paging-rate\n            use notifying_service\n          }\n\n          define service {\n            check_command check_xfs_block_allocation\n            hostgroup_name base-os\n            service_description XFS_block-allocation\n            use notifying_service\n          }\n\n          define service {\n            check_command check_network_bond_status\n            hostgroup_name base-os\n            service_description Network_bondstatus\n            use notifying_service\n          }\n\n          define service {\n            check_command check_numa_memory_usage\n            hostgroup_name base-os\n            service_description Memory_NUMA-usage\n            use notifying_service\n          }\n\n          define service {\n            check_command check_ntp_sync\n            hostgroup_name base-os\n            service_description NTP_sync\n            use notifying_service\n          }\n      ceph:\n        template: |\n          define service {\n            check_command check_prom_alert!prom_exporter_ceph_unavailable!CRITICAL- CEPH exporter is not collecting metrics for alerting!OK- CEPH exporter metrics are available.\n            hostgroup_name prometheus-hosts\n            service_description Prometheus-exporter_CEPH\n            use generic-service\n          }\n\n          define command {\n            command_line $USER1$/check_exporter_health_metric.py --exporter_api $USER10$ --health_metric ceph_health_status --critical 2 --warning 1\n            command_name check_ceph_health\n          }\n\n          define service {\n            check_command check_ceph_health\n            check_interval 300\n            hostgroup_name base-os\n            service_description CEPH_health\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!ceph_monitor_quorum_low!CRITICAL- ceph monitor quorum does not exist!OK- ceph monitor quorum exists\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description CEPH_quorum\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!ceph_monitor_quorum_absent!CRITICAL- ceph monitor quorum does not exist!OK- ceph monitor quorum exists\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description CEPH_quorum\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!ceph_cluster_usage_high!CRITICAL- ceph cluster storage is more than 80 percent!OK- ceph storage is less than 80 percent\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description CEPH_storage-usage\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!ceph_placement_group_degrade_pct_high!CRITICAL- ceph cluster PGs down are more than 80 percent!OK- ceph PG degradation is less than 80 percent\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description CEPH_PGs-degradation\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!ceph_osd_down!CRITICAL- One or more CEPH OSDs are down for more than 5 minutes!OK- All the CEPH OSDs are up\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description CEPH_OSDs-down\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert_with_labels!node_ntp_clock_skew_high!ceph-mon=\"enabled\"!CRITICAL- CEPH clock skew is more than 2 seconds!OK- CEPH clock skew is less than 2 seconds\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description CEPH_Clock-skew\n            use notifying_service\n          }\n    nagios:\n      template: |\n        accept_passive_host_checks=1\n        accept_passive_service_checks=1\n        additional_freshness_latency=15\n        allow_empty_hostgroup_assignment=1\n        auto_reschedule_checks=0\n        auto_rescheduling_interval=30\n        auto_rescheduling_window=180\n        bare_update_check=0\n        cached_host_check_horizon=15\n        cached_service_check_horizon=15\n        {{- $objectKeys := keys .Values.conf.nagios.objects -}}\n        {{- range $object := $objectKeys }}\n        cfg_file=/opt/nagios/etc/{{$object}}.cfg\n        {{- end }}\n        cfg_file=/opt/nagios/etc/objects/commands.cfg\n        cfg_file=/opt/nagios/etc/objects/contacts.cfg\n        cfg_file=/opt/nagios/etc/objects/timeperiods.cfg\n        cfg_file=/opt/nagios/etc/objects/templates.cfg\n        cfg_file=/opt/nagios/etc/conf.d/nagios-hosts.cfg\n\n        check_external_commands=1\n        check_for_orphaned_hosts=1\n        check_for_orphaned_services=1\n        check_for_updates=1\n        check_host_freshness=0\n        check_result_path=/opt/nagios/var/spool/checkresults\n        check_result_reaper_frequency=10\n        check_service_freshness=1\n        check_workers=4\n        command_file=/opt/nagios/var/rw/nagios.cmd\n        daemon_dumps_core=0\n        date_format=us\n        debug_file=/opt/nagios/var/nagios.debug\n        debug_level=0\n        debug_verbosity=1\n        enable_environment_macros=0\n        enable_event_handlers=1\n        enable_flap_detection=1\n        enable_notifications=1\n        enable_predictive_host_dependency_checks=1\n        enable_predictive_service_dependency_checks=1\n        event_broker_options=-1\n        event_handler_timeout=60\n        execute_host_checks=1\n        execute_service_checks=1\n        high_host_flap_threshold=20\n        high_service_flap_threshold=20\n        host_check_timeout=60\n        host_freshness_check_interval=60\n        host_inter_check_delay_method=s\n        illegal_macro_output_chars=`~$&|'<>\"\n        interval_length=1\n        lock_file=/var/run/nagios.lock\n        log_archive_path=/opt/nagios/var/log/archives\n        log_current_states=1\n        log_event_handlers=1\n        log_external_commands=1\n        log_file=/opt/nagios/var/log/nagios.log\n        log_host_retries=1\n        log_initial_states=0\n        log_notifications=0\n        log_passive_checks=1\n        log_rotation_method=d\n        log_service_retries=1\n        low_host_flap_threshold=5\n        low_service_flap_threshold=5\n        max_check_result_file_age=3600\n        max_check_result_reaper_time=30\n        max_concurrent_checks=10\n        max_debug_file_size=1e+06\n        max_host_check_spread=30\n        max_service_check_spread=30\n        nagios_group=nagios\n        nagios_user=nagios\n        notification_timeout=60\n        object_cache_file=/opt/nagios/var/objects.cache\n        obsess_over_hosts=0\n        obsess_over_services=0\n        ocsp_timeout=5\n        passive_host_checks_are_soft=0\n        perfdata_timeout=5\n        precached_object_file=/opt/nagios/var/objects.precache\n        process_performance_data=0\n        resource_file=/opt/nagios/etc/resource.cfg\n        retain_state_information=1\n        retained_contact_host_attribute_mask=0\n        retained_contact_service_attribute_mask=0\n        retained_host_attribute_mask=0\n        retained_process_host_attribute_mask=0\n        retained_process_service_attribute_mask=0\n        retained_service_attribute_mask=0\n        retention_update_interval=60\n        service_check_timeout=60\n        service_freshness_check_interval=60\n        service_inter_check_delay_method=s\n        service_interleave_factor=s\n        soft_state_dependencies=0\n        state_retention_file=/opt/nagios/var/retention.dat\n        status_file=/opt/nagios/var/status.dat\n        status_update_interval=10\n        temp_file=/opt/nagios/var/nagios.tmp\n        temp_path=/tmp\n        translate_passive_host_checks=0\n        use_aggressive_host_checking=0\n        use_large_installation_tweaks=0\n        use_regexp_matching=1\n        use_retained_program_state=1\n        use_retained_scheduling_info=1\n        use_syslog=0\n        use_true_regexp_matching=0\n    cgi:\n      template: |\n        action_url_target=_blank\n        authorized_for_all_host_commands=*\n        authorized_for_all_hosts=*\n        authorized_for_all_service_commands=*\n        authorized_for_all_services=*\n        authorized_for_configuration_information=*\n        authorized_for_system_commands=nagiosadmin\n        authorized_for_system_information=*\n        default_statuswrl_layout=4\n        enable_page_tour=0\n        escape_html_tags=1\n        lock_author_names=1\n        main_config_file=/opt/nagios/etc/nagios.cfg\n        navbar_search_for_addresses=1\n        navbar_search_for_aliases=1\n        notes_url_target=_blank\n        physical_html_path=/opt/nagios/share\n        ping_syntax=/bin/ping -n -U -c 5 $HOSTADDRESS$\n        refresh_rate=90\n        result_limit=100\n        show_context_help=0\n        url_html_path=/nagios\n        use_authentication=0\n        use_pending_states=1\n        use_ssl_authentication=0\n    query_es_clauses: null\n    additionalPlugins: []\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "namespace-config/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Namespace Config\nname: namespace-config\nversion: 2025.2.0\nhome: https://kubernetes.io/docs/concepts/policy/limit-range/\n...\n"
  },
  {
    "path": "namespace-config/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "namespace-config/templates/limit-range.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\napiVersion: v1\nkind: LimitRange\nmetadata:\n  name: {{ printf \"%s-%s\" .Release.Name \"limit-range\"  }}\nspec:\n{{ toYaml (dict \"limits\" .Values.limits) | indent 2 }}\n"
  },
  {
    "path": "namespace-config/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for memcached.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nlimits:\n  - type: Container\n    default:\n      cpu: 8\n      memory: 8192Mi\n    defaultRequest:\n      cpu: 0.1\n      memory: 64Mi\n\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "neutron/.helmignore",
    "content": "values_overrides\n"
  },
  {
    "path": "neutron/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Neutron\nname: neutron\nversion: 2025.2.0\nhome: https://docs.openstack.org/neutron/latest/\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Neutron/OpenStack_Project_Neutron_vertical.png\nsources:\n  - https://opendev.org/openstack/neutron\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "neutron/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "neutron/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nneutron-db-manage \\\n  --config-file /etc/neutron/neutron.conf \\\n  --config-file /etc/neutron/plugins/ml2/ml2_conf.ini \\\n  --config-dir /etc/neutron/neutron.conf.d \\\n  upgrade head\n\n{{- if .Values.conf.plugins.taas.taas.enabled }}\nneutron-db-manage \\\n  --config-file /etc/neutron/neutron.conf \\\n  --config-file /etc/neutron/plugins/ml2/ml2_conf.ini \\\n  --config-dir /etc/neutron/neutron.conf.d \\\n  --subproject tap-as-a-service \\\n  upgrade head\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/bin/_health-probe.py.tpl",
    "content": "#!/usr/bin/env python\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n\"\"\"\nHealth probe script for OpenStack agents that uses RPC/unix domain socket for\ncommunication. Sends message to agent through rpc call method and expects a\nreply. It is expected to receive a failure from the agent's RPC server as the\nmethod does not exist.\n\nScript returns failure to Kubernetes only when\n  a. agent is not reachable or\n  b. agent times out sending a reply.\n\nsys.stderr.write() writes to pod's events on failures.\n\nUsage example for Neutron L3 agent:\n# python health-probe.py --config-file /etc/neutron/neutron.conf \\\n#  --config-file /etc/neutron/l3_agent.ini --agent-queue-name l3_agent\n\nUsage example for Neutron metadata agent:\n# python health-probe.py --config-file /etc/neutron/neutron.conf \\\n#  --config-file /etc/neutron/metadata_agent.ini\n\"\"\"\n\nimport httplib2\nfrom http import client as httplib\nimport json\nimport os\nimport psutil\nimport signal\nimport socket\nimport sys\n\nfrom oslo_config import cfg\nfrom oslo_context import context\nfrom oslo_log import log\nimport oslo_messaging\n\nrpc_timeout = int(os.getenv('RPC_PROBE_TIMEOUT', '60'))\nrpc_retries = int(os.getenv('RPC_PROBE_RETRIES', '2'))\nrabbit_port = 5672\ntcp_established = \"ESTABLISHED\"\nlog.logging.basicConfig(level=log.{{ .Values.health_probe.logging.level }})\n\n\ndef _get_hostname(use_fqdn):\n    if use_fqdn:\n        return socket.getfqdn()\n    return socket.gethostname()\n\ndef check_agent_status(transport):\n    \"\"\"Verify agent status. Return success if agent consumes message\"\"\"\n    try:\n        use_fqdn = cfg.CONF.use_fqdn\n        target = oslo_messaging.Target(\n            topic=cfg.CONF.agent_queue_name,\n            server=_get_hostname(use_fqdn))\n        if hasattr(oslo_messaging, 'get_rpc_client'):\n            client = oslo_messaging.get_rpc_client(transport, target,\n                                                   timeout=rpc_timeout,\n                                                   retry=rpc_retries)\n        else:\n            client = oslo_messaging.RPCClient(transport, target,\n                                              timeout=rpc_timeout,\n                                              retry=rpc_retries)\n        client.call(context.RequestContext(),\n                    'pod_health_probe_method_ignore_errors')\n    except oslo_messaging.exceptions.MessageDeliveryFailure:\n        # Log to pod events\n        sys.stderr.write(\"Health probe unable to reach message bus\")\n        sys.exit(0)  # return success\n    except oslo_messaging.rpc.client.RemoteError as re:\n        message = getattr(re, \"message\", str(re))\n        if (\"Endpoint does not support RPC method\" in message) or \\\n                (\"Endpoint does not support RPC version\" in message):\n            sys.exit(0)  # Call reached the agent\n        else:\n            sys.stderr.write(\"Health probe unable to reach agent\")\n            sys.exit(1)  # return failure\n    except oslo_messaging.exceptions.MessagingTimeout:\n        sys.stderr.write(\"Health probe timed out. Agent is down or response \"\n                         \"timed out\")\n        sys.exit(1)  # return failure\n    except Exception as ex:\n        message = getattr(ex, \"message\", str(ex))\n        sys.stderr.write(\"Health probe caught exception sending message to \"\n                         \"agent: %s\" % message)\n        sys.exit(0)\n    except:\n        sys.stderr.write(\"Health probe caught exception sending message to\"\n                         \" agent\")\n        sys.exit(0)\n\n    finally:\n        if transport:\n            transport.cleanup()\n\n\ndef sriov_readiness_check():\n    \"\"\"Checks the sriov configuration on the sriov nic's\"\"\"\n    return_status = 1\n    with open('/etc/neutron/plugins/ml2/sriov_agent.ini') as nic:\n        for phy in nic:\n            if \"physical_device_mappings\" in phy:\n                phy_dev = phy.split('=', 1)[1]\n                phy_dev1 = phy_dev.rstrip().split(',')\n                if not phy_dev1:\n                    sys.stderr.write(\"No Physical devices\"\n                                     \" configured as SRIOV NICs\")\n                    sys.exit(1)\n                for intf in phy_dev1:\n                    phy, dev = intf.split(':')\n                    try:\n                        with open('/sys/class/net/%s/device/'\n                                  'sriov_numvfs' % dev) as f:\n                            for line in f:\n                                numvfs = line.rstrip('\\n')\n                                if numvfs:\n                                    return_status = 0\n                    except IOError:\n                        sys.stderr.write(\"IOError:No sriov_numvfs config file\")\n    sys.exit(return_status)\n\n\ndef get_rabbitmq_ports():\n    \"Get RabbitMQ ports\"\n\n    rabbitmq_ports = set()\n\n    try:\n        transport_url = oslo_messaging.TransportURL.parse(cfg.CONF)\n        for host in transport_url.hosts:\n            rabbitmq_ports.add(host.port)\n    except Exception as ex:\n        message = getattr(ex, \"message\", str(ex))\n        sys.stderr.write(\"Health probe caught exception reading \"\n                         \"RabbitMQ ports: %s\" % message)\n        sys.exit(0)  # return success\n\n    return rabbitmq_ports\n\n\ndef tcp_socket_state_check(agentq):\n    \"\"\"Check if the tcp socket to rabbitmq is in Established state\"\"\"\n    rabbit_sock_count = 0\n    if agentq == \"l3_agent\":\n        proc = \"neutron-l3-agen\"\n    elif agentq == \"dhcp_agent\":\n        proc = \"neutron-dhcp-ag\"\n    elif agentq == \"q-agent-notifier-tunnel-update\":\n        proc = \"neutron-openvsw\"\n    else:\n        proc = \"neutron-metadat\"\n\n    rabbitmq_ports = get_rabbitmq_ports()\n\n    for p in psutil.process_iter():\n        try:\n            with p.oneshot():\n                if proc in \" \".join(p.cmdline()):\n                    pcon = getattr(p, \"net_connections\", p.connections)()\n                    for con in pcon:\n                        try:\n                            port = con.raddr[1]\n                            status = con.status\n                        except IndexError:\n                            continue\n                        if port in rabbitmq_ports and\\\n                                status == tcp_established:\n                            rabbit_sock_count = rabbit_sock_count + 1\n        except psutil.Error:\n            continue\n\n    if rabbit_sock_count == 0:\n        sys.stderr.write(\"RabbitMQ sockets not Established\")\n        # Do not kill the pod if RabbitMQ is not reachable/down\n        if not cfg.CONF.liveness_probe:\n            sys.exit(1)\n\n\nclass UnixDomainHTTPConnection(httplib.HTTPConnection):\n    \"\"\"Connection class for HTTP over UNIX domain socket.\"\"\"\n\n    def __init__(self, host, port=None, strict=None, timeout=None,\n                 proxy_info=None):\n        httplib.HTTPConnection.__init__(self, host, port, strict)\n        self.timeout = timeout\n        self.socket_path = cfg.CONF.metadata_proxy_socket\n\n    def connect(self):\n        self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n        if self.timeout:\n            self.sock.settimeout(self.timeout)\n        self.sock.connect(self.socket_path)\n\n\ndef test_socket_liveness():\n    \"\"\"Test if agent can respond to message over the socket\"\"\"\n    cfg.CONF.register_cli_opt(cfg.BoolOpt('liveness-probe', default=False,\n                                          required=False))\n    cfg.CONF.register_cli_opt(cfg.BoolOpt('use-fqdn', default=False,\n                                          required=False))\n    cfg.CONF(sys.argv[1:])\n\n    try:\n        metadata_proxy_socket = cfg.CONF.metadata_proxy_socket\n    except cfg.NoSuchOptError:\n        cfg.CONF.register_opt(cfg.StrOpt(\n            'metadata_proxy_socket',\n            default='/var/lib/neutron/openstack-helm/metadata_proxy'))\n\n    headers = {'X-Forwarded-For': '169.254.169.254',\n               'X-Neutron-Router-ID': 'pod-health-probe-check-ignore-errors'}\n\n    h = httplib2.Http(timeout=30)\n\n    try:\n        resp, content = h.request(\n            'http://169.254.169.254',\n            method='GET',\n            headers=headers,\n            connection_type=UnixDomainHTTPConnection)\n    except socket.error as se:\n        msg = \"Socket error: Health probe failed to connect to \" \\\n              \"Neutron Metadata agent: \"\n        if se.strerror:\n            sys.stderr.write(msg + se.strerror)\n        else:\n            sys.stderr.write(msg + getattr(se, \"message\"))\n        sys.exit(1)  # return failure\n    except Exception as ex:\n        message = getattr(ex, \"message\", str(ex))\n        sys.stderr.write(\"Health probe caught exception sending message to \"\n                         \"Neutron Metadata agent: %s\" % message)\n        sys.exit(0)  # return success\n\n    if resp.status >= 500:  # Probe expects HTTP error code 404\n        msg = \"Health probe failed: Neutron Metadata agent failed to\" \\\n              \" process request: \"\n        sys.stderr.write(msg + str(resp.__dict__))\n        sys.exit(1)  # return failure\n\n\ndef test_rpc_liveness():\n    \"\"\"Test if agent can consume message from queue\"\"\"\n    oslo_messaging.set_transport_defaults(control_exchange='neutron')\n\n    rabbit_group = cfg.OptGroup(name='oslo_messaging_rabbit',\n                                title='RabbitMQ options')\n    cfg.CONF.register_group(rabbit_group)\n    cfg.CONF.register_cli_opt(cfg.StrOpt('agent-queue-name'))\n    cfg.CONF.register_cli_opt(cfg.BoolOpt('liveness-probe', default=False,\n                                          required=False))\n    cfg.CONF.register_cli_opt(cfg.BoolOpt('use-fqdn', default=False,\n                                          required=False))\n\n    cfg.CONF(sys.argv[1:])\n\n    try:\n        transport = oslo_messaging.get_rpc_transport(cfg.CONF)\n    except Exception as ex:\n        message = getattr(ex, \"message\", str(ex))\n        sys.stderr.write(\"Message bus driver load error: %s\" % message)\n        sys.exit(0)  # return success\n\n    if not cfg.CONF.transport_url or \\\n            not cfg.CONF.agent_queue_name:\n        sys.stderr.write(\"Both message bus URL and agent queue name are \"\n                         \"required for Health probe to work\")\n        sys.exit(0)  # return success\n\n    try:\n        cfg.CONF.set_override('rabbit_max_retries', 2,\n                              group=rabbit_group)  # 3 attempts\n    except cfg.NoSuchOptError as ex:\n        cfg.CONF.register_opt(cfg.IntOpt('rabbit_max_retries', default=2),\n                              group=rabbit_group)\n\n    agentq = cfg.CONF.agent_queue_name\n    tcp_socket_state_check(agentq)\n\n    check_agent_status(transport)\n\ndef check_pid_running(pid):\n    if psutil.pid_exists(int(pid)):\n       return True\n    else:\n       return False\n\nif __name__ == \"__main__\":\n\n    if \"liveness-probe\" in ','.join(sys.argv):\n        pidfile = \"/tmp/liveness.pid\"  #nosec\n    else:\n        pidfile = \"/tmp/readiness.pid\"  #nosec\n    data = {}\n    if os.path.isfile(pidfile):\n        with open(pidfile,'r') as f:\n            file_content = f.read().strip()\n            if file_content:\n                data = json.loads(file_content)\n\n    if 'pid' in data and check_pid_running(data['pid']):\n        if 'exit_count' in data and data['exit_count'] > 1:\n            # Third time in, kill the previous process\n            os.kill(int(data['pid']), signal.SIGTERM)\n        else:\n            data['exit_count'] = data.get('exit_count', 0) + 1\n            with open(pidfile, 'w') as f:\n                json.dump(data, f)\n            sys.exit(0)\n\n    data['pid'] = os.getpid()\n    data['exit_count'] = 0\n    with open(pidfile, 'w') as f:\n        json.dump(data, f)\n\n    if \"sriov_agent.ini\" in ','.join(sys.argv):\n        sriov_readiness_check()\n    elif \"metadata_agent.ini\" not in ','.join(sys.argv):\n        test_rpc_liveness()\n    else:\n        test_socket_liveness()\n\n    sys.exit(0)  # return success\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-bagpipe-bgp-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n{{- if (has \"openvswitch\" .Values.network.backend) }}\nchown neutron: /run/openvswitch/db.sock\n{{- end }}\n\n# handle any bridge mappings\nfor bmap in `sed 's/[{}\"]//g' /tmp/auto_bridge_add | tr \",\" \"\\n\"`; do\n  bridge=${bmap%:*}\n  iface=${bmap#*:}\n{{- if (has \"openvswitch\" .Values.network.backend) }}\n  ovs-vsctl --no-wait --may-exist add-br $bridge\n  if [ -n \"$iface\" -a \"$iface\" != \"null\" ]; then\n    ovs-vsctl --no-wait --may-exist add-port $bridge $iface\n    ip link set dev $iface up\n  fi\n{{- else if (has \"linuxbridge\" .Values.network.backend) }}\n  set +e; ip link add name $bridge type bridge; set -e\n  ip link set dev $bridge up\n  [ -n \"$iface\" -a \"$iface\" != \"null\" ] && ip link set dev $iface master $bridge\n{{- end }}\ndone\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-bagpipe-bgp.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\nexec bagpipe-bgp\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-bgp-dragent.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\nexec neutron-bgp-dragent \\\n      --config-file /etc/neutron/neutron.conf \\\n      --config-file /etc/neutron/bgp_dragent.ini \\\n      --config-dir /etc/neutron/neutron.conf.d \\\n      --debug\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-dhcp-agent-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n{{- if and ( empty .Values.conf.neutron.DEFAULT.host ) ( .Values.pod.use_fqdn.neutron_agent ) }}\nmkdir -p /tmp/pod-shared\ntee > /tmp/pod-shared/neutron-agent.ini << EOF\n[DEFAULT]\nhost = $(hostname --fqdn)\nEOF\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-dhcp-agent.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\nexec neutron-dhcp-agent \\\n  --config-file /etc/neutron/neutron.conf \\\n{{- if ( has \"ovn\" .Values.network.backend ) }}\n  --config-file /tmp/pod-shared/ovn.ini \\\n{{- end }}\n{{- if and ( empty .Values.conf.neutron.DEFAULT.host ) ( .Values.pod.use_fqdn.neutron_agent ) }}\n  --config-file /tmp/pod-shared/neutron-agent.ini \\\n{{- end }}\n{{- if ( has \"openvswitch\" .Values.network.backend ) }}\n  --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini \\\n{{- end }}\n  --config-file /etc/neutron/dhcp_agent.ini \\\n  --config-dir /etc/neutron/neutron.conf.d\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-ironic-agent-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n{{- if and ( empty .Values.conf.neutron.DEFAULT.host ) ( .Values.pod.use_fqdn.neutron_agent ) }}\nmkdir -p /tmp/pod-shared\ntee > /tmp/pod-shared/neutron-agent.ini << EOF\n[DEFAULT]\nhost = $(hostname --fqdn)\nEOF\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-ironic-agent.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec ironic-neutron-agent \\\n        --config-file /etc/neutron/neutron.conf \\\n{{- if and ( empty .Values.conf.neutron.DEFAULT.host ) ( .Values.pod.use_fqdn.neutron_agent ) }}\n  --config-file /tmp/pod-shared/neutron-agent.ini \\\n{{- end }}\n        --config-file /etc/neutron/plugins/ml2/ml2_conf.ini \\\n        --config-dir /etc/neutron/neutron.conf.d\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-l2gw-agent.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\nexec neutron-l2gateway-agent \\\n      --config-file=/etc/neutron/neutron.conf \\\n{{- if and ( empty .Values.conf.neutron.DEFAULT.host ) ( .Values.pod.use_fqdn.neutron_agent ) }}\n  --config-file /tmp/pod-shared/neutron-agent.ini \\\n{{- end }}\n      --config-file=/etc/neutron/l2gw_agent.ini \\\n      --config-dir=/etc/neutron/neutron.conf.d\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-l3-agent-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n{{- if and ( empty .Values.conf.neutron.DEFAULT.host ) ( .Values.pod.use_fqdn.neutron_agent ) }}\nmkdir -p /tmp/pod-shared\ntee > /tmp/pod-shared/neutron-agent.ini << EOF\n[DEFAULT]\nhost = $(hostname --fqdn)\nEOF\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-l3-agent.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\n\nexec neutron-l3-agent \\\n      --config-file /etc/neutron/neutron.conf \\\n{{- if and ( empty .Values.conf.neutron.DEFAULT.host ) ( .Values.pod.use_fqdn.neutron_agent ) }}\n  --config-file /tmp/pod-shared/neutron-agent.ini \\\n{{- end }}\n      --config-file /etc/neutron/l3_agent.ini \\\n      --config-dir /etc/neutron/neutron.conf.d\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-linuxbridge-agent-init-modules.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nchroot /mnt/host-rootfs modprobe bridge\nchroot /mnt/host-rootfs modprobe ip6_tables\nchroot /mnt/host-rootfs modprobe ebtables\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-linuxbridge-agent-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n# configure all bridge mappings defined in config\n# /tmp/auto_bridge_add is one line json file: {\"br-ex1\":\"eth1\",\"br-ex2\":\"eth2\"}\nfor bmap in `sed 's/[{}\"]//g' /tmp/auto_bridge_add | tr \",\" \"\\n\"`\ndo\n  bridge=${bmap%:*}\n  iface=${bmap#*:}\n  # adding existing bridge would break out the script when -e is set\n  set +e\n  ip link add name $bridge type bridge\n  set -e\n  ip link set dev $bridge up\n  if [ -n \"$iface\" ] && [ \"$iface\" != \"null\" ]\n  then\n    ip link set dev $iface  master $bridge\n  fi\ndone\n\ntunnel_interface=\"{{- .Values.network.interface.tunnel -}}\"\nif [ -z \"${tunnel_interface}\" ] ; then\n    # search for interface with tunnel network routing\n    tunnel_network_cidr=\"{{- .Values.network.interface.tunnel_network_cidr -}}\"\n    if [ -z \"${tunnel_network_cidr}\" ] ; then\n        tunnel_network_cidr=\"0/0\"\n    fi\n    # If there is not tunnel network gateway, exit\n    tunnel_interface=$(ip -4 route list ${tunnel_network_cidr} | awk -F 'dev' '{ print $2; exit }' \\\n        | awk '{ print $1 }') || exit 1\nfi\n\n# determine local-ip dynamically based on interface provided but only if tunnel_types is not null\nLOCAL_IP=$(ip a s $tunnel_interface | grep 'inet ' | awk '{print $2}' | awk -F \"/\" 'NR==1 {print $1}')\nif [ -z \"${LOCAL_IP}\" ] ; then\n  echo \"Var LOCAL_IP is empty\"\n  exit 1\nfi\n\ntee > /tmp/pod-shared/ml2-local-ip.ini << EOF\n[vxlan]\nlocal_ip = \"${LOCAL_IP}\"\nEOF\n\n{{- if and ( empty .Values.conf.neutron.DEFAULT.host ) ( .Values.pod.use_fqdn.neutron_agent ) }}\nmkdir -p /tmp/pod-shared\ntee > /tmp/pod-shared/neutron-agent.ini << EOF\n[DEFAULT]\nhost = $(hostname --fqdn)\nEOF\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-linuxbridge-agent.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec neutron-linuxbridge-agent \\\n  --config-file /etc/neutron/neutron.conf \\\n  --config-file /etc/neutron/plugins/ml2/ml2_conf.ini \\\n  --config-file /tmp/pod-shared/ml2-local-ip.ini \\\n{{- if and ( empty .Values.conf.neutron.DEFAULT.host ) ( .Values.pod.use_fqdn.neutron_agent ) }}\n  --config-file /tmp/pod-shared/neutron-agent.ini \\\n{{- end }}\n  --config-file /etc/neutron/plugins/ml2/linuxbridge_agent.ini \\\n  --config-dir /etc/neutron/neutron.conf.d\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-metadata-agent-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nchown ${NEUTRON_USER_UID} /var/lib/neutron/openstack-helm\n{{- if (has \"ovn\" .Values.network.backend) }}\nchown ${NEUTRON_USER_UID} /run/openvswitch/db.sock\n{{- end }}\n\n{{- if and ( empty .Values.conf.neutron.DEFAULT.host ) ( .Values.pod.use_fqdn.neutron_agent ) }}\nmkdir -p /tmp/pod-shared\ntee > /tmp/pod-shared/neutron-agent.ini << EOF\n[DEFAULT]\nhost = $(hostname --fqdn)\nEOF\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-metadata-agent.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\nexec neutron-metadata-agent \\\n      --config-file /etc/neutron/neutron.conf \\\n{{- if and ( empty .Values.conf.neutron.DEFAULT.host ) ( .Values.pod.use_fqdn.neutron_agent ) }}\n  --config-file /tmp/pod-shared/neutron-agent.ini \\\n{{- end }}\n      --config-file /etc/neutron/metadata_agent.ini \\\n      --config-dir /etc/neutron/neutron.conf.d\n\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-netns-cleanup-cron.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -xe\n\n# Run \"neutron-netns-cleanup\" every 5 minutes\nwhile sleep 300; do\n    neutron-netns-cleanup \\\n        --config-file /etc/neutron/neutron.conf \\\n        --config-file /etc/neutron/dhcp_agent.ini \\\n        --config-file /etc/neutron/l3_agent.ini \\\n        --config-dir /etc/neutron/neutron.conf.d\ndone\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-openvswitch-agent-init-modules.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nchroot /mnt/host-rootfs modprobe ip6_tables\n\n{{- if .Values.conf.ovs_dpdk.enabled }}\nchroot /mnt/host-rootfs modprobe {{ .Values.conf.ovs_dpdk.driver | quote }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-openvswitch-agent-init-netoffload.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n# ASAP2\nfor cfg in $(cat /tmp/netoffload | jq -r '(.asap2 // [])[] | @base64'); do\n   _jq() {\n      echo ${cfg} | base64 --decode | jq -r ${1}\n   }\n\n   DEVICE=$(_jq '.dev')\n   VFS=$(_jq '.vfs')\n\n   offloadctl enable asap2 ${DEVICE} --vfs ${VFS}\ndone\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nOVS_SOCKET=/run/openvswitch/db.sock\nchown neutron: ${OVS_SOCKET}\n\n# This enables the usage of 'ovs-appctl' from neutron pod.\nOVS_PID=$(cat /run/openvswitch/ovs-vswitchd.pid)\nOVS_CTL=/run/openvswitch/ovs-vswitchd.${OVS_PID}.ctl\nchown neutron: ${OVS_CTL}\n\nfunction get_dpdk_config_value {\n  values=${@:1:$#-1}\n  filter=${!#}\n  value=$(echo ${values} | jq -r ${filter})\n  if [[ \"${value}\" == \"null\" ]]; then\n    echo \"\"\n  else\n    echo \"${value}\"\n  fi\n}\n\n\nDPDK_CONFIG_FILE=/tmp/dpdk.conf\nDPDK_CONFIG=\"\"\nDPDK_ENABLED=false\nif [ -f ${DPDK_CONFIG_FILE} ]; then\n  DPDK_CONFIG=$(cat ${DPDK_CONFIG_FILE})\n  if [[ $(get_dpdk_config_value ${DPDK_CONFIG} '.enabled') == \"true\" ]]; then\n    DPDK_ENABLED=true\n  fi\nfi\n\nfunction bind_nic {\n  echo $2 > /sys/bus/pci/devices/$1/driver_override\n  echo $1 > /sys/bus/pci/drivers/$2/bind\n}\n\nfunction unbind_nic {\n  echo $1 > /sys/bus/pci/drivers/$2/unbind\n  echo > /sys/bus/pci/devices/$1/driver_override\n}\n\nfunction get_name_by_pci_id {\n  path=$(find /sys/bus/pci/devices/$1/ -name net)\n  if [ -n \"${path}\" ] ; then\n    echo $(ls -1 $path/)\n  fi\n}\n\nfunction get_ip_address_from_interface {\n  local interface=$1\n  local ip=$(ip -4 -o addr s \"${interface}\" | awk '{ print $4; exit }' | awk -F '/' 'NR==1 {print $1}')\n  if [ -z \"${ip}\" ] ; then\n    exit 1\n  fi\n  echo ${ip}\n}\n\nfunction get_ip_prefix_from_interface {\n  local interface=$1\n  local prefix=$(ip -4 -o addr s \"${interface}\" | awk '{ print $4; exit }' | awk -F '/' 'NR==1 {print $2}')\n  if [ -z \"${prefix}\" ] ; then\n    exit 1\n  fi\n  echo ${prefix}\n}\n\nfunction migrate_ip {\n  pci_id=$1\n  bridge_name=$2\n\n  local src_nic=$(get_name_by_pci_id ${pci_id})\n  if [ -n \"${src_nic}\" ] ; then\n    bridge_exists=$(ip a s \"${bridge_name}\" | grep \"${bridge_name}\" | cut -f2 -d':' 2> /dev/null)\n    if [ -z \"${bridge_exists}\" ] ; then\n      echo \"Bridge \"${bridge_name}\" does not exist. Creating it on demand.\"\n      init_ovs_dpdk_bridge \"${bridge_name}\"\n    fi\n\n    migrate_ip_from_nic ${src_nic} ${bridge_name}\n  fi\n}\n\nfunction migrate_ip_from_nic {\n  src_nic=$1\n  bridge_name=$2\n\n  # Enabling explicit error handling: We must avoid to lose the IP\n  # address in the migration process. Hence, on every error, we\n  # attempt to assign the IP back to the original NIC and exit.\n  set +e\n\n  ip=$(get_ip_address_from_interface ${src_nic})\n  prefix=$(get_ip_prefix_from_interface ${src_nic})\n\n  bridge_ip=$(get_ip_address_from_interface \"${bridge_name}\")\n  bridge_prefix=$(get_ip_prefix_from_interface \"${bridge_name}\")\n\n  ip link set ${bridge_name} up\n\n  if [[ -n \"${ip}\" && -n \"${prefix}\" ]]; then\n    ip addr flush dev ${src_nic}\n    if [ $? -ne 0 ] ; then\n      ip addr replace ${ip}/${prefix} dev ${src_nic}\n      echo \"Error while flushing IP from ${src_nic}.\"\n      exit 1\n    fi\n\n    ip addr replace ${ip}/${prefix} dev \"${bridge_name}\"\n    if [ $? -ne 0 ] ; then\n      echo \"Error assigning IP to bridge \"${bridge_name}\".\"\n      ip addr replace ${ip}/${prefix} dev ${src_nic}\n      exit 1\n    fi\n  elif [[ -n \"${bridge_ip}\" && -n \"${bridge_prefix}\" ]]; then\n    echo \"Bridge '${bridge_name}' already has IP assigned. Keeping the same:: IP:[${bridge_ip}]; Prefix:[${bridge_prefix}]...\"\n  elif [[ -z \"${bridge_ip}\" && -z \"${ip}\" ]]; then\n    echo \"Interface and bridge have no ips configured. Leaving as is.\"\n  else\n    echo \"Interface ${name} has invalid IP address. IP:[${ip}]; Prefix:[${prefix}]...\"\n    exit 1\n  fi\n\n  set -e\n}\n\nfunction get_pf_or_vf_pci {\n  dpdk_pci_id=${1}\n  vf_index=${2}\n\n  if [ -n \"$vf_index\" ]\n  then\n    iface=$(get_name_by_pci_id \"${dpdk_pci_id}\")\n    sysfs_numvfs_path=\"/sys/class/net/${iface}/device/sriov_numvfs\"\n    if [[ -f /sys/class/net/${iface}/device/sriov_numvfs &&\n          \"$(cat /sys/class/net/${iface}/device/sriov_numvfs)\" -ne \"0\" &&\n          -e /sys/class/net/${iface}/device/virtfn${vf_index} ]]\n    then\n      dpdk_pci_id=$(ls -la /sys/class/net/${iface}/device/virtfn${vf_index})\n      dpdk_pci_id=${dpdk_pci_id#*\"../\"}\n    else\n      echo \"Error fetching the VF PCI for PF: [\"${iface}\", \"${dpdk_pci_id}\"] and VF-Index: ${vf_index}.\"\n      exit 1\n    fi\n  fi\n}\n\nfunction bind_dpdk_nic {\n  target_driver=${1}\n  pci_id=${2}\n\n  current_driver=\"$(get_driver_by_address \"${pci_id}\" )\"\n  if [ \"$current_driver\" != \"$target_driver\" ]; then\n    if [ \"$current_driver\" != \"\" ]; then\n      unbind_nic \"${pci_id}\" ${current_driver}\n    fi\n    bind_nic \"${pci_id}\" ${target_driver}\n  fi\n}\n\nfunction ensure_vf_state {\n  iface=${1}\n  vf_string=${2}\n  check_string=${3}\n  expected=${4}\n\n  # wait for the vf really get the needed state\n  for i in 0 1 2 4 8 16 32; do\n    sleep ${i};\n    if [ \"$(ip link show ${iface} | grep \"${vf_string} \" | grep -Eo \"${check_string}\")\" == \"${expected}\" ]; then\n      break;\n    fi;\n  done\n}\n\nfunction process_dpdk_nics {\n  target_driver=$(get_dpdk_config_value ${DPDK_CONFIG} '.driver')\n  # loop over all nics\n  echo $DPDK_CONFIG | jq -r -c '.nics[]' | \\\n  while IFS= read -r nic; do\n    local port_name=$(get_dpdk_config_value ${nic} '.name')\n    local pci_id=$(get_dpdk_config_value ${nic} '.pci_id')\n    local iface=$(get_dpdk_config_value ${nic} '.iface')\n    if [ -n ${iface} ] && [ -z ${pci_id} ]; then\n      local pci_id=$(get_address_by_nicname ${iface})\n    else\n      iface=$(get_name_by_pci_id \"${pci_id}\")\n    fi\n    local bridge=$(get_dpdk_config_value ${nic} '.bridge')\n    local vf_index=$(get_dpdk_config_value ${nic} '.vf_index')\n\n    if [[ $(get_dpdk_config_value ${nic} '.migrate_ip') == true ]] ; then\n      migrate_ip \"${pci_id}\" \"${bridge}\"\n    fi\n\n    if [ -n \"${iface}\" ]; then\n      ip link set ${iface} promisc on\n      if [ -n \"${vf_index}\" ]; then\n        vf_string=\"vf ${vf_index}\"\n        ip link set ${iface} ${vf_string} trust on\n        ensure_vf_state \"${iface}\" \"${vf_string}\" \"trust o(n|ff)\" \"trust on\"\n\n        # NOTE: To ensure proper toggle of spoofchk,\n        # turn it on then off.\n        ip link set ${iface} ${vf_string} spoofchk on\n        ensure_vf_state \"${iface}\" \"${vf_string}\" \"spoof checking o(n|ff)\" \"spoof checking on\"\n        ip link set ${iface} ${vf_string} spoofchk off\n        ensure_vf_state \"${iface}\" \"${vf_string}\" \"spoof checking o(n|ff)\" \"spoof checking off\"\n      fi\n    fi\n\n    # Fetch the PCI to be bound to DPDK driver.\n    # In case VF Index is configured then PCI of that particular VF\n    # is bound to DPDK, otherwise PF PCI is bound to DPDK.\n    get_pf_or_vf_pci \"${pci_id}\" \"${vf_index}\"\n\n    bind_dpdk_nic ${target_driver} \"${dpdk_pci_id}\"\n\n    dpdk_options=\"\"\n    ofport_request=$(get_dpdk_config_value ${nic} '.ofport_request')\n    if [ -n \"${ofport_request}\" ]; then\n      dpdk_options+='ofport_request=${ofport_request} '\n    fi\n    n_rxq=$(get_dpdk_config_value ${nic} '.n_rxq')\n    if [ -n \"${n_rxq}\" ]; then\n      dpdk_options+='options:n_rxq=${n_rxq} '\n    fi\n    n_txq=$(get_dpdk_config_value ${nic} '.n_txq')\n    if [ -n \"${n_txq}\" ]; then\n      dpdk_options+='options:n_txq=${n_txq} '\n    fi\n    pmd_rxq_affinity=$(get_dpdk_config_value ${nic} '.pmd_rxq_affinity')\n    if [ -n \"${pmd_rxq_affinity}\" ]; then\n      dpdk_options+='other_config:pmd-rxq-affinity=${pmd_rxq_affinity} '\n    fi\n    mtu=$(get_dpdk_config_value ${nic} '.mtu')\n    if [ -n \"${mtu}\" ]; then\n      dpdk_options+='mtu_request=${mtu} '\n    fi\n    n_rxq_size=$(get_dpdk_config_value ${nic} '.n_rxq_size')\n    if [ -n \"${n_rxq_size}\" ]; then\n      dpdk_options+='options:n_rxq_desc=${n_rxq_size} '\n    fi\n    n_txq_size=$(get_dpdk_config_value ${nic} '.n_txq_size')\n    if [ -n \"${n_txq_size}\" ]; then\n      dpdk_options+='options:n_txq_desc=${n_txq_size} '\n    fi\n    vhost_iommu_support=$(get_dpdk_config_value ${nic} '.vhost-iommu-support')\n    if [ -n \"${vhost_iommu_support}\" ]; then\n      dpdk_options+='options:vhost-iommu-support=${vhost_iommu_support} '\n    fi\n\n    ovs-vsctl --db=unix:${OVS_SOCKET} --may-exist add-port ${bridge} ${port_name} \\\n       -- set Interface ${port_name} type=dpdk options:dpdk-devargs=${pci_id} ${dpdk_options}\n\n  done\n}\n\nfunction process_dpdk_bonds {\n  target_driver=$(get_dpdk_config_value ${DPDK_CONFIG} '.driver')\n  # loop over all bonds\n  echo $DPDK_CONFIG | jq -r -c '.bonds[]' > /tmp/bonds_array\n  while IFS= read -r bond; do\n    local bond_name=$(get_dpdk_config_value ${bond} '.name')\n    local dpdk_bridge=$(get_dpdk_config_value ${bond} '.bridge')\n    local migrate_ip=$(get_dpdk_config_value ${bond} '.migrate_ip')\n    local mtu=$(get_dpdk_config_value ${bond} '.mtu')\n    local n_rxq=$(get_dpdk_config_value ${bond} '.n_rxq')\n    local n_txq=$(get_dpdk_config_value ${bond} '.n_txq')\n    local ofport_request=$(get_dpdk_config_value ${bond} '.ofport_request')\n    local n_rxq_size=$(get_dpdk_config_value ${bond} '.n_rxq_size')\n    local n_txq_size=$(get_dpdk_config_value ${bond} '.n_txq_size')\n    local vhost_iommu_support=$(get_dpdk_config_value ${bond} '.vhost-iommu-support')\n    local ovs_options=$(get_dpdk_config_value ${bond} '.ovs_options')\n\n    local nic_name_str=\"\"\n    local dev_args_str=\"\"\n    local ip_migrated=false\n\n    echo $bond | jq -r -c '.nics[]' > /tmp/nics_array\n    while IFS= read -r nic; do\n      local pci_id=$(get_dpdk_config_value ${nic} '.pci_id')\n      local iface=$(get_dpdk_config_value ${nic} '.iface')\n      if [ -n ${iface} ] && [ -z ${pci_id} ]; then\n        local pci_id=$(get_address_by_nicname ${iface})\n      else\n        iface=$(get_name_by_pci_id \"${pci_id}\")\n      fi\n      local nic_name=$(get_dpdk_config_value ${nic} '.name')\n      local pmd_rxq_affinity=$(get_dpdk_config_value ${nic} '.pmd_rxq_affinity')\n      local vf_index=$(get_dpdk_config_value ${nic} '.vf_index')\n      local vf_string=\"\"\n\n      if [[ ${migrate_ip} = \"true\" && ${ip_migrated} = \"false\" ]]; then\n        migrate_ip \"${pci_id}\" \"${dpdk_bridge}\"\n        ip_migrated=true\n      fi\n\n      if [ -n \"${iface}\" ]; then\n        ip link set ${iface} promisc on\n        if [ -n \"${vf_index}\" ]; then\n          vf_string=\"vf ${vf_index}\"\n          ip link set ${iface} ${vf_string} trust on\n          ensure_vf_state \"${iface}\" \"${vf_string}\" \"trust o(n|ff)\" \"trust on\"\n\n          # NOTE: To ensure proper toggle of spoofchk,\n          # turn it on then off.\n          ip link set ${iface} ${vf_string} spoofchk on\n          ensure_vf_state \"${iface}\" \"${vf_string}\" \"spoof checking o(n|ff)\" \"spoof checking on\"\n          ip link set ${iface} ${vf_string} spoofchk off\n          ensure_vf_state \"${iface}\" \"${vf_string}\" \"spoof checking o(n|ff)\" \"spoof checking off\"\n        fi\n      fi\n\n      # Fetch the PCI to be bound to DPDK driver.\n      # In case VF Index is configured then PCI of that particular VF\n      # is bound to DPDK, otherwise PF PCI is bound to DPDK.\n      get_pf_or_vf_pci \"${pci_id}\" \"${vf_index}\"\n\n      bind_dpdk_nic ${target_driver} \"${dpdk_pci_id}\"\n\n      nic_name_str+=\" \"${nic_name}\"\"\n      dev_args_str+=\" -- set Interface \"${nic_name}\" type=dpdk options:dpdk-devargs=\"\"${dpdk_pci_id}\"\n\n      if [[ -n ${mtu} ]]; then\n        dev_args_str+=\" -- set Interface \"${nic_name}\" mtu_request=${mtu}\"\n      fi\n\n      if [[ -n ${n_rxq} ]]; then\n        dev_args_str+=\" -- set Interface \"${nic_name}\" options:n_rxq=${n_rxq}\"\n      fi\n\n      if [[ -n ${n_txq} ]]; then\n        dev_args_str+=\" -- set Interface \"${nic_name}\" options:n_txq=${n_txq}\"\n      fi\n\n      if [[ -n ${ofport_request} ]]; then\n        dev_args_str+=\" -- set Interface \"${nic_name}\" ofport_request=${ofport_request}\"\n      fi\n\n      if [[ -n ${pmd_rxq_affinity} ]]; then\n        dev_args_str+=\" -- set Interface \"${nic_name}\" other_config:pmd-rxq-affinity=${pmd_rxq_affinity}\"\n      fi\n\n      if [[ -n ${n_rxq_size} ]]; then\n        dev_args_str+=\" -- set Interface \"${nic_name}\" options:n_rxq_desc=${n_rxq_size}\"\n      fi\n\n      if [[ -n ${n_txq_size} ]]; then\n        dev_args_str+=\" -- set Interface \"${nic_name}\" options:n_txq_desc=${n_txq_size}\"\n      fi\n\n      if [[ -n ${vhost_iommu_support} ]]; then\n        dev_args_str+=\" -- set Interface \"${nic_name}\" options:vhost-iommu-support=${vhost_iommu_support}\"\n      fi\n    done < /tmp/nics_array\n\n    if [ \"${UPDATE_DPDK_BOND_CONFIG}\" == \"true\" ]; then\n      echo -e \"NOTE: UPDATE_DPDK_BOND_CONFIG is set to true.\\\n      \\nThis might cause disruptions in ovs traffic.\\\n      \\nTo avoid this disruption set UPDATE_DPDK_BOND_CONFIG to false.\"\n      ovs-vsctl --db=unix:${OVS_SOCKET} set Bridge \"${dpdk_bridge}\" other_config:update_config=true\n      ovs_update_config=true\n    else\n      ovs_update_config=$(ovs-vsctl --columns=other_config --no-heading -d json list bridge \"${dpdk_bridge}\" \\\n        | jq -r '.[1][] as $list | if $list[0] == \"update_config\" then $list[1] else empty end')\n    fi\n\n\n    if [ \"${ovs_update_config}\" == \"true\" ] || [ \"${ovs_update_config}\" == \"\" ];\n    then\n      ovs-vsctl --db=unix:${OVS_SOCKET} --if-exists del-port \"${bond_name}\"\n      ovs-vsctl --db=unix:${OVS_SOCKET} set Bridge \"${dpdk_bridge}\" other_config:update_config=false\n      ovs-vsctl --db=unix:${OVS_SOCKET} --may-exist add-bond \"${dpdk_bridge}\" \"${bond_name}\" \\\n        ${nic_name_str} \\\n        ${ovs_options} ${dev_args_str}\n    fi\n\n  done < \"/tmp/bonds_array\"\n}\n\nfunction set_dpdk_module_log_level {\n  # loop over all target modules\n  if [ -n \"$(get_dpdk_config_value ${DPDK_CONFIG} '.modules')\" ]; then\n    echo $DPDK_CONFIG | jq -r -c '.modules[]' > /tmp/modules_array\n    while IFS= read -r module; do\n      local mod_name=$(get_dpdk_config_value ${module} '.name')\n      local mod_level=$(get_dpdk_config_value ${module} '.log_level')\n\n      ovs-appctl -t ${OVS_CTL} vlog/set ${mod_name}:${mod_level}\n      ovs-appctl -t ${OVS_CTL} vlog/list|grep ${mod_name}\n    done < /tmp/modules_array\n  fi\n}\n\nfunction get_driver_by_address {\n  if [[ -e /sys/bus/pci/devices/$1/driver ]]; then\n    echo $(ls /sys/bus/pci/devices/$1/driver -al | awk '{n=split($NF,a,\"/\"); print a[n]}')\n  fi\n}\n\nfunction get_address_by_nicname {\n  if [[ -e /sys/class/net/$1 ]]; then\n    local pci_address=$(readlink -f /sys/class/net/$1/device | xargs basename)\n    if [[ -e /sys/bus/pci/devices/${pci_address} ]]; then\n      echo ${pci_address}\n    else\n      echo \"PCI id for interface $1 cannot be found\" >&2\n    fi\n  else\n    echo \"Interface name $1 cannot be found\" >&2\n  fi\n}\n\nfunction init_ovs_dpdk_bridge {\n  bridge=$1\n  ovs-vsctl --db=unix:${OVS_SOCKET} --may-exist add-br ${bridge} \\\n  -- set Bridge ${bridge} datapath_type=netdev\n  ip link set ${bridge} up\n}\n\n# create all additional bridges defined in the DPDK section\nfunction init_ovs_dpdk_bridges {\n  for br in $(get_dpdk_config_value ${DPDK_CONFIG} '.bridges[].name'); do\n    init_ovs_dpdk_bridge ${br}\n  done\n}\n\n# handle any bridge mappings\n# /tmp/auto_bridge_add is one line json file: {\"br-ex1\":\"eth1\",\"br-ex2\":\"eth2\"}\nfor bmap in `sed 's/[{}\"]//g' /tmp/auto_bridge_add | tr \",\" \"\\n\"`\ndo\n  bridge=${bmap%:*}\n  iface=${bmap#*:}\n  if [[ \"${DPDK_ENABLED}\" == \"true\" ]]; then\n    ovs-vsctl --db=unix:${OVS_SOCKET} --may-exist add-br $bridge -- set bridge $bridge datapath_type=netdev\n  else\n    ovs-vsctl --db=unix:${OVS_SOCKET} --may-exist add-br $bridge\n  fi\n  if [ -n \"$iface\" ] && [ \"$iface\" != \"null\" ] && ( ip link show $iface 1>/dev/null 2>&1 );\n  then\n    ovs-vsctl --db=unix:${OVS_SOCKET} --may-exist add-port $bridge $iface\n    migrate_ip_from_nic $iface $bridge\n    if [[ \"${DPDK_ENABLED}\" != \"true\" ]]; then\n      ip link set dev $iface up\n    fi\n  fi\ndone\n\ntunnel_types=\"{{- .Values.conf.plugins.openvswitch_agent.agent.tunnel_types -}}\"\nif [[ -n \"${tunnel_types}\" ]] ; then\n    tunnel_interface=\"{{- .Values.network.interface.tunnel -}}\"\n    if [ -z \"${tunnel_interface}\" ] ; then\n        # search for interface with tunnel network routing\n        tunnel_network_cidr=\"{{- .Values.network.interface.tunnel_network_cidr -}}\"\n        if [ -z \"${tunnel_network_cidr}\" ] ; then\n            tunnel_network_cidr=\"0/0\"\n        fi\n        # If there is not tunnel network gateway, exit\n        tunnel_interface=$(ip -4 route list ${tunnel_network_cidr} | awk -F 'dev' '{ print $2; exit }' \\\n            | awk '{ print $1 }') || exit 1\n    fi\nfi\n\nif [[ \"${DPDK_ENABLED}\" == \"true\" ]]; then\n  init_ovs_dpdk_bridges\n  process_dpdk_nics\n  process_dpdk_bonds\n  set_dpdk_module_log_level\nfi\n\n# determine local-ip dynamically based on interface provided but only if tunnel_types is not null\nif [[ -n \"${tunnel_types}\" ]] ; then\n  LOCAL_IP=$(get_ip_address_from_interface ${tunnel_interface})\n  if [ -z \"${LOCAL_IP}\" ] ; then\n    echo \"Var LOCAL_IP is empty\"\n    exit 1\n  fi\n\ntee > /tmp/pod-shared/ml2-local-ip.ini << EOF\n[ovs]\nlocal_ip = \"${LOCAL_IP}\"\nEOF\n\n  if [[ \"${DPDK_ENABLED}\" == \"true\" ]]; then\n    PREFIX=$(get_ip_prefix_from_interface \"${tunnel_interface}\")\n\n    # loop over all nics\n    echo $DPDK_CONFIG | jq -r -c '.bridges[]' | \\\n    while IFS= read -r br; do\n      bridge_name=$(get_dpdk_config_value ${br} '.name')\n      tunnel_underlay_vlan=$(get_dpdk_config_value ${br} '.tunnel_underlay_vlan')\n\n      if [[ \"${bridge_name}\" == \"${tunnel_interface}\" ]]; then\n        # Route the tunnel traffic via the physical bridge\n        if [[ -n \"${LOCAL_IP}\" && -n \"${PREFIX}\" ]]; then\n          if [[ -n $(ovs-appctl -t ${OVS_CTL} ovs/route/show | grep \"${LOCAL_IP}\" | grep -v '^Cached:') ]]; then\n            ovs-appctl -t ${OVS_CTL} ovs/route/del \"${LOCAL_IP}\"/\"${PREFIX}\"\n          fi\n          ovs-appctl -t ${OVS_CTL} ovs/route/add \"${LOCAL_IP}\"/\"${PREFIX}\" \"${tunnel_interface}\"\n\n          if [[ -n \"${tunnel_underlay_vlan}\" ]]; then\n            # If there is not tunnel network gateway, exit\n            IFS=. read -r i1 i2 i3 i4 <<< \"${LOCAL_IP}\"\n            IFS=. read -r xx m1 m2 m3 m4 <<< $(for a in $(seq 1 32); do if [ $(((a - 1) % 8)) -eq 0 ]; then echo -n .; fi; if [ $a -le ${PREFIX} ]; then echo -n 1; else echo -n 0; fi; done)\n            tunnel_network_cidr=$(printf \"%d.%d.%d.%d\\n\" \"$((i1 & (2#$m1)))\" \"$((i2 & (2#$m2)))\" \"$((i3 & (2#$m3)))\" \"$((i4 & (2#$m4)))\") || exit 1\n            # Put a new flow to tag all the tunnel traffic with configured vlan-id\n            if [[ -n $(ovs-ofctl dump-flows \"${tunnel_interface}\" | grep \"nw_dst=${tunnel_network_cidr}\") ]]; then\n              ovs-ofctl del-flows \"${tunnel_interface}\" \"cookie=0x9999/-1, table=0, ip,nw_dst=${tunnel_network_cidr}\"\n            fi\n            ovs-ofctl add-flow \"${tunnel_interface}\" \"cookie=0x9999, table=0, priority=8, ip,nw_dst=${tunnel_network_cidr}, actions=mod_vlan_vid:${tunnel_underlay_vlan},NORMAL\"\n          fi\n        fi\n        break\n      fi\n    done\n  fi\nfi\n\n{{- if and ( empty .Values.conf.neutron.DEFAULT.host ) ( .Values.pod.use_fqdn.neutron_agent ) }}\nmkdir -p /tmp/pod-shared\ntee > /tmp/pod-shared/neutron-agent.ini << EOF\n[DEFAULT]\nhost = $(hostname --fqdn)\nEOF\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-openvswitch-agent-liveness.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -e\n\n/tmp/neutron-openvswitch-agent-readiness.sh\n\npython \\\n    /tmp/health-probe.py \\\n    --config-file \\\n    /etc/neutron/neutron.conf \\\n    --config-file \\\n    /etc/neutron/plugins/ml2/openvswitch_agent.ini \\\n    --agent-queue-name \\\n    q-agent-notifier-tunnel-update \\\n{{- if .Values.pod.use_fqdn.neutron_agent }}\n    --use-fqdn \\\n{{- end }}\n    --liveness-probe\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-openvswitch-agent-readiness.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -e\n\nOVS_PID=$(cat /run/openvswitch/ovs-vswitchd.pid)\nOVS_CTL=/run/openvswitch/ovs-vswitchd.${OVS_PID}.ctl\n\novs-vsctl list-br | grep -q br-int\n\n[ -z \"$(/usr/bin/ovs-vsctl show | grep error:)\" ]\n\n{{ if .Values.conf.ovs_dpdk.enabled }}\n  {{- if hasKey .Values.conf.ovs_dpdk \"nics\"}}\n    # Check if port(s) and bridge(s) are configured.\n    {{- range .Values.conf.ovs_dpdk.nics }}\n      ovs-vsctl list-br | grep -q {{ .bridge }}\n      ovs-vsctl list-ports {{ .bridge }} | grep -q {{ .name }}\n    {{- end }}\n  {{- end }}\n\n  {{- if hasKey .Values.conf.ovs_dpdk \"bonds\"}}\n    # Check if bond(s) and slave(s) are configured.\n    {{- range .Values.conf.ovs_dpdk.bonds }}\n      bond={{ .name }}\n      ovs-appctl -t ${OVS_CTL} bond/list | grep -q  ${bond}\n      {{- range .nics }}\n        ovs-appctl -t ${OVS_CTL} bond/show ${bond} | grep -q \"slave {{ .name }}\\|member {{ .name }}\"\n      {{- end }}\n    {{- end }}\n  {{- end }}\n{{ end }}\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-openvswitch-agent.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec neutron-openvswitch-agent \\\n  --config-file /etc/neutron/neutron.conf \\\n{{- if and ( empty .Values.conf.neutron.DEFAULT.host ) ( .Values.pod.use_fqdn.neutron_agent ) }}\n  --config-file /tmp/pod-shared/neutron-agent.ini \\\n{{- end }}\n{{- if .Values.conf.plugins.openvswitch_agent.agent.tunnel_types }}\n  --config-file /tmp/pod-shared/ml2-local-ip.ini \\\n{{- end }}\n{{- if .Values.conf.plugins.taas.taas.enabled }}\n  --config-file /etc/neutron/plugins/ml2/taas.ini \\\n{{- end }}\n  --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini \\\n  --config-file /etc/neutron/plugins/ml2/ml2_conf.ini \\\n  --config-dir /etc/neutron/neutron.conf.d\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-ovn-db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nneutron-ovn-db-sync-util \\\n        --config-file /etc/neutron/neutron.conf \\\n{{- if ( has \"ovn\" .Values.network.backend ) }}\n        --config-file /tmp/pod-shared/ovn.ini \\\n{{- end }}\n{{- if .Values.conf.plugins.taas.taas.enabled }}\n        --config-file /etc/neutron/taas_plugin.ini \\\n{{- end }}\n{{- if ( has \"sriov\" .Values.network.backend ) }}\n        --config-file /etc/neutron/plugins/ml2/sriov_agent.ini \\\n{{- end }}\n{{- if .Values.conf.plugins.l2gateway }}\n        --config-file /etc/neutron/l2gw_plugin.ini \\\n{{- end }}\n        --config-file /etc/neutron/plugins/ml2/ml2_conf.ini \\\n        --config-dir /etc/neutron/neutron.conf.d \\\n        --ovn-neutron_sync_mode \"$1\"\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-ovn-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n# See: https://bugs.launchpad.net/neutron/+bug/2028442\nmkdir -p /tmp/pod-shared\ntee > /tmp/pod-shared/ovn.ini << EOF\n[ovn]\novn_nb_connection={{ coalesce .Values.conf.plugins.ml2_conf.ovn.ovn_nb_connection \"tcp:$OVN_OVSDB_NB_SERVICE_HOST:$OVN_OVSDB_NB_SERVICE_PORT_OVSDB\" }}\novn_sb_connection={{ coalesce .Values.conf.plugins.ml2_conf.ovn.ovn_sb_connection \"tcp:$OVN_OVSDB_SB_SERVICE_HOST:$OVN_OVSDB_SB_SERVICE_PORT_OVSDB\" }}\nEOF\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-ovn-metadata-agent.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\n\nexec neutron-ovn-metadata-agent \\\n      --config-file /etc/neutron/neutron.conf \\\n      --config-file /etc/neutron/ovn_metadata_agent.ini \\\n{{- if and ( empty .Values.conf.neutron.DEFAULT.host ) ( .Values.pod.use_fqdn.neutron_agent ) }}\n  --config-file /tmp/pod-shared/neutron-agent.ini \\\n{{- end }}\n      --config-file /tmp/pod-shared/ovn.ini \\\n      --config-dir /etc/neutron/neutron.conf.d\n\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-ovn-vpn-agent-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nchown ${NEUTRON_USER_UID} /var/lib/neutron/openstack-helm\n\n{{- if and ( empty .Values.conf.neutron.DEFAULT.host ) ( .Values.pod.use_fqdn.neutron_agent ) }}\nmkdir -p /tmp/pod-shared\ntee > /tmp/pod-shared/neutron-agent.ini << EOF\n[DEFAULT]\nhost = $(hostname --fqdn)\nEOF\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-ovn-vpn-agent.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\n\nexec neutron-ovn-vpn-agent \\\n      --config-file /etc/neutron/neutron.conf \\\n      --config-file /etc/neutron/neutron_vpnaas.conf \\\n      --config-file /etc/neutron/neutron_ovn_vpn_agent.ini \\\n{{- if and ( empty .Values.conf.neutron.DEFAULT.host ) ( .Values.pod.use_fqdn.neutron_agent ) }}\n  --config-file /tmp/pod-shared/neutron-agent.ini \\\n{{- end }}\n      --config-file /tmp/pod-shared/ovn.ini \\\n      --config-dir /etc/neutron/neutron.conf.d\n\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-rpc-server.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec neutron-rpc-server \\\n        --config-file /etc/neutron/neutron.conf \\\n{{- if ( has \"ovn\" .Values.network.backend ) }}\n        --config-file /tmp/pod-shared/ovn.ini \\\n{{- end }}\n{{- if .Values.conf.plugins.taas.taas.enabled }}\n        --config-file /etc/neutron/taas_plugin.ini \\\n{{- end }}\n{{- if ( has \"sriov\" .Values.network.backend ) }}\n        --config-file /etc/neutron/plugins/ml2/sriov_agent.ini \\\n{{- end }}\n{{- if .Values.conf.plugins.l2gateway }}\n        --config-file /etc/neutron/l2gw_plugin.ini \\\n{{- end }}\n        --config-file /etc/neutron/plugins/ml2/ml2_conf.ini \\\n        --config-dir /etc/neutron/neutron.conf.d\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-server.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  confs=\"--config-file /etc/neutron/neutron.conf\"\n{{- if ( has \"ovn\" .Values.network.backend ) }}\n  confs+=\" --config-file /tmp/pod-shared/ovn.ini\"\n{{- end }}\n{{- if contains \"vpnaas\" .Values.conf.neutron.DEFAULT.service_plugins }}\n  confs+=\" --config-file /etc/neutron/neutron_vpnaas.conf\"\n{{- end }}\n{{- if contains \"ovn-vpnaas\" .Values.conf.neutron.DEFAULT.service_plugins }}\n  confs+=\" --config-file /etc/neutron/neutron_ovn_vpn_agent.ini\"\n{{- end }}\n{{- if .Values.conf.plugins.taas.taas.enabled }}\n  confs+=\" --config-file /etc/neutron/taas_plugin.ini\"\n{{- end }}\n{{- if ( has \"sriov\" .Values.network.backend ) }}\n  confs+=\" --config-file /etc/neutron/plugins/ml2/sriov_agent.ini\"\n{{- end }}\n{{- if .Values.conf.plugins.l2gateway }}\n  confs+=\" --config-file /etc/neutron/l2gw_plugin.ini\"\n{{- end }}\n  confs+=\" --config-file /etc/neutron/plugins/ml2/ml2_conf.ini\"\n  confs+=\" --config-dir /etc/neutron/neutron.conf.d\"\n\n  exec uwsgi --ini /etc/neutron/neutron-api-uwsgi.ini --pyargv \" $confs \"\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-sriov-agent-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n#NOTE: Please limit \"besteffort\" to dev env with mixed hardware computes only\n#      For prod env, the target nic should be there, if not, script should error out.\nset -ex\nBESTEFFORT=false\n{{- if ( has \"besteffort\" .Values.conf.sriov_init ) }}\nset +e\nBESTEFFORT=true\n{{- end }}\n\n{{- range $k, $sriov := .Values.network.interface.sriov }}\nif [ \"x{{ $sriov.num_vfs }}\" != \"x\" ]; then\n  echo \"{{ $sriov.num_vfs }}\" > /sys/class/net/{{ $sriov.device }}/device/sriov_numvfs\nelse\n  #NOTE(portdirect): Many NICs have difficulty creating more than n-1 over their\n  # claimed limit, by default err on the side of caution and account for this\n  # limitation.\n  TOT_NUM_VFS=$(cat /sys/class/net/{{ $sriov.device }}/device/sriov_totalvfs)\n  if [[ \"$TOT_NUM_VFS\" -le \"0\" ]]; then\n    NUM_VFS=\"$TOT_NUM_VFS\"\n  else\n    if [[ \"$((TOT_NUM_VFS - 1 ))\" -le \"1\" ]]; then\n      NUM_VFS=1\n    else\n      NUM_VFS=\"$((TOT_NUM_VFS - 1 ))\"\n    fi\n  fi\n  echo \"${NUM_VFS}\" > /sys/class/net/{{ $sriov.device }}/device/sriov_numvfs\nfi\n\n{{- if hasKey $sriov \"qos\" -}}\n{{- range $v, $qos := $sriov.qos }}\necho \"{{ $qos.share }}\" > /sys/class/net/{{ $sriov.device }}/device/sriov/{{ $qos.vf_num }}/qos/share\n{{- end}}\necho \"1\" > /sys/class/net/{{ $sriov.device }}/device/sriov/qos/apply\n{{- end }}\n\n# Set number of queues is best effort in case where VF is already binded,\n# NIC will not allow to set, in such case, a node reboot will allow all\n# VF to set properly.\n{{- if hasKey $sriov \"queues_per_vf\" }}\nset +e\n{{- range $v, $qvf := $sriov.queues_per_vf }}\nSMOKE=','\nMIRROR=' '\nSKIPLIST={{ $qvf.exclude_vf }}\nSKIPLIST=${SKIPLIST//$SMOKE/$MIRROR}\n\nNUMVF={{ $sriov.num_vfs }}\nfor vf in `seq 0 $[$NUMVF - 1]`\ndo\n  if ! ( echo ${SKIPLIST[@]} | grep -q -w \"$vf\" ); then\n    echo \"{{ $qvf.num_queues }}\" > /sys/class/net/{{ $sriov.device }}/device/sriov/$vf/num_queues\n  fi\ndone\n\n{{- end }}\nif ! $BESTEFFORT; then\n  set -e\nfi\n{{- end }}\n\n{{- if $sriov.mtu }}\nip link set dev {{ $sriov.device }} mtu {{ $sriov.mtu }}\n{{- end }}\nip link set {{ $sriov.device }} up\nip link show {{ $sriov.device }}\n\n{{- if $sriov.promisc }}\npromisc_mode=\"on\"\n{{- else }}\npromisc_mode=\"off\"\n{{- end }}\nip link set {{ $sriov.device }} promisc ${promisc_mode}\n#NOTE(portdirect): get the bus that the port is on\nNIC_BUS=$(lshw -c network -businfo | awk '/{{ $sriov.device }}/ {print $1}')\n#NOTE(portdirect): get first port on the nic\nNIC_FIRST_PORT=$(lshw -c network -businfo | awk \"/${NIC_BUS%%.*}/ { print \\$2; exit }\")\n#NOTE(portdirect): Enable promisc mode on the nic, by setting it for the 1st port\nethtool --set-priv-flags ${NIC_FIRST_PORT} vf-true-promisc-support ${promisc_mode}\n{{- end }}\n\n{{- if and ( empty .Values.conf.neutron.DEFAULT.host ) ( .Values.pod.use_fqdn.neutron_agent ) }}\nmkdir -p /tmp/pod-shared\ntee > /tmp/pod-shared/neutron-agent.ini << EOF\n[DEFAULT]\nhost = $(hostname --fqdn)\nEOF\n{{- end }}\n\nif $BESTEFFORT; then\n  exit 0\nfi\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-sriov-agent.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec neutron-sriov-nic-agent \\\n  --config-file /etc/neutron/neutron.conf \\\n  --config-file /etc/neutron/plugins/ml2/ml2_conf.ini \\\n{{- if and ( empty .Values.conf.neutron.DEFAULT.host ) ( .Values.pod.use_fqdn.neutron_agent ) }}\n  --config-file /tmp/pod-shared/neutron-agent.ini \\\n{{- end }}\n{{- if .Values.conf.plugins.taas.taas.enabled }}\n  --config-file /etc/neutron/plugins/ml2/taas.ini \\\n{{- end }}\n  --config-file /etc/neutron/plugins/ml2/sriov_agent.ini \\\n  --config-dir /etc/neutron/neutron.conf.d\n"
  },
  {
    "path": "neutron/templates/bin/_neutron-test-force-cleanup.sh.tpl",
    "content": "#!/bin/bash\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -ex\n\n\nif openstack project show \"${OS_TEST_PROJECT_NAME}\" --domain=\"${OS_TEST_PROJECT_DOMAIN_NAME}\" ; then\n  OS_TEST_PROJECT_ID=$(openstack project show \"${OS_TEST_PROJECT_NAME}\" -f value -c id --domain=\"${OS_TEST_PROJECT_DOMAIN_NAME}\")\n  ospurge --purge-project \"${OS_TEST_PROJECT_ID}\"\n  openstack quota set \"${OS_TEST_PROJECT_ID}\" --networks \"${NETWORK_QUOTA}\" --ports \"${PORT_QUOTA}\" --routers \"${ROUTER_QUOTA}\" --subnets \"${SUBNET_QUOTA}\" --secgroups \"${SEC_GROUP_QUOTA}\"\nfi\n"
  },
  {
    "path": "neutron/templates/bin/_nginx.sh.tpl",
    "content": "#!/bin/sh\nset -xe\n\nCOMMAND=\"${@:-start}\"\n\nstart () {\n  envsubst < /etc/nginx/nginx.conf > /tmp/nginx.conf\n  cat /tmp/nginx.conf\n  nginx -t -c /tmp/nginx.conf\n  exec nginx -c /tmp/nginx.conf\n}\n\nstop () {\n  nginx -s stop\n}\n\n$COMMAND\n"
  },
  {
    "path": "neutron/templates/certificates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.certificates -}}\n{{  dict \"envAll\" . \"service\" \"network\" \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end -}}\n"
  },
  {
    "path": "neutron/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n{{- $rallyTests := .Values.conf.rally_tests }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: neutron-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  rally-test.sh: |\n{{ tuple $rallyTests | include \"helm-toolkit.scripts.rally_test\" | indent 4 }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  health-probe.py: |\n{{ tuple \"bin/_health-probe.py.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-dhcp-agent.sh: |\n{{ tuple \"bin/_neutron-dhcp-agent.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-dhcp-agent-init.sh: |\n{{ tuple \"bin/_neutron-dhcp-agent-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-l3-agent.sh: |\n{{ tuple \"bin/_neutron-l3-agent.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-l3-agent-init.sh: |\n{{ tuple \"bin/_neutron-l3-agent-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-linuxbridge-agent.sh: |\n{{ tuple \"bin/_neutron-linuxbridge-agent.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-linuxbridge-agent-init.sh: |\n{{ tuple \"bin/_neutron-linuxbridge-agent-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-linuxbridge-agent-init-modules.sh: |\n{{ tuple \"bin/_neutron-linuxbridge-agent-init-modules.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-openvswitch-agent.sh: |\n{{ tuple \"bin/_neutron-openvswitch-agent.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-openvswitch-agent-init.sh: |\n{{ tuple \"bin/_neutron-openvswitch-agent-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-openvswitch-agent-init-modules.sh: |\n{{ tuple \"bin/_neutron-openvswitch-agent-init-modules.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- if .Values.conf.netoffload.enabled }}\n  neutron-openvswitch-agent-init-netoffload.sh: |\n{{ tuple \"bin/_neutron-openvswitch-agent-init-netoffload.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  neutron-openvswitch-agent-readiness.sh: |\n{{ tuple \"bin/_neutron-openvswitch-agent-readiness.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-openvswitch-agent-liveness.sh: |\n{{ tuple \"bin/_neutron-openvswitch-agent-liveness.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-sriov-agent.sh: |\n{{ tuple \"bin/_neutron-sriov-agent.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-sriov-agent-init.sh: |\n{{ tuple \"bin/_neutron-sriov-agent-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-l2gw-agent.sh: |\n{{ tuple \"bin/_neutron-l2gw-agent.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-bagpipe-bgp.sh: |\n{{ tuple \"bin/_neutron-bagpipe-bgp.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-bagpipe-bgp-init.sh: |\n{{ tuple \"bin/_neutron-bagpipe-bgp-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-bgp-dragent.sh: |\n{{ tuple \"bin/_neutron-bgp-dragent.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- if .Values.manifests.certificates }}\n  nginx.sh: |\n{{ tuple \"bin/_nginx.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  neutron-server.sh: |\n{{ tuple \"bin/_neutron-server.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-rpc-server.sh: |\n{{ tuple \"bin/_neutron-rpc-server.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-ironic-agent-init.sh: |\n{{ tuple \"bin/_neutron-ironic-agent-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-ironic-agent.sh: |\n{{ tuple \"bin/_neutron-ironic-agent.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-netns-cleanup-cron.sh: |\n{{ tuple \"bin/_neutron-netns-cleanup-cron.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" . | indent 4 }}\n  neutron-test-force-cleanup.sh: |\n{{ tuple \"bin/_neutron-test-force-cleanup.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n\n  neutron-metadata-agent-init.sh: |\n{{ tuple \"bin/_neutron-metadata-agent-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- if ( has \"ovn\" .Values.network.backend ) }}\n  neutron-ovn-db-sync.sh: |\n{{ tuple \"bin/_neutron-ovn-db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-ovn-metadata-agent.sh: |\n{{ tuple \"bin/_neutron-ovn-metadata-agent.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-ovn-init.sh: |\n{{ tuple \"bin/_neutron-ovn-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-ovn-vpn-agent-init.sh: |\n{{ tuple \"bin/_neutron-ovn-vpn-agent-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  neutron-ovn-vpn-agent.sh: |\n{{ tuple \"bin/_neutron-ovn-vpn-agent.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- else }}\n  neutron-metadata-agent.sh: |\n{{ tuple \"bin/_neutron-metadata-agent.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- define \"neutron.configmap.etc\" }}\n{{- $configMapName := index . 0 }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n\n{{- if empty $envAll.Values.conf.neutron.keystone_authtoken.auth_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set $envAll.Values.conf.neutron.keystone_authtoken \"auth_uri\" -}}\n{{- end }}\n\n{{- if empty $envAll.Values.conf.neutron.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set $envAll.Values.conf.neutron.keystone_authtoken \"auth_url\" -}}\n{{- end }}\n\n\n{{- if empty $envAll.Values.conf.neutron.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set $envAll.Values.conf.neutron.keystone_authtoken \"memcached_servers\" -}}\n{{- end }}\n{{- if empty .Values.conf.neutron.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.neutron.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" $envAll.Values.conf.neutron.database.connection)) (empty $envAll.Values.conf.neutron.database.connection) -}}\n{{- $connection := tuple \"oslo_db\" \"internal\" \"neutron\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | set .Values.conf.neutron.database \"connection\" -}}\n{{- else -}}\n{{- $_ := set .Values.conf.neutron.database \"connection\" $connection -}}\n{{- end -}}\n{{- end }}\n\n{{- if empty $envAll.Values.conf.neutron.DEFAULT.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"neutron\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set $envAll.Values.conf.neutron.DEFAULT \"transport_url\" -}}\n{{- end }}\n\n{{- if empty $envAll.Values.conf.neutron.nova.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set $envAll.Values.conf.neutron.nova \"auth_url\" -}}\n{{- end }}\n\n\n{{- if empty $envAll.Values.conf.neutron.placement.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set $envAll.Values.conf.neutron.placement \"auth_url\" -}}\n{{- end }}\n\n\n{{- if empty $envAll.Values.conf.neutron.octavia.base_url -}}\n{{- $_ := tuple \"load_balancer\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set $envAll.Values.conf.neutron.octavia \"base_url\" -}}\n{{- end }}\n\n{{- if empty $envAll.Values.conf.metadata_agent.DEFAULT.nova_metadata_host -}}\n{{- $_ := tuple \"compute_metadata\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" | set $envAll.Values.conf.metadata_agent.DEFAULT \"nova_metadata_host\" -}}\n{{- end -}}\n{{- if empty $envAll.Values.conf.metadata_agent.DEFAULT.nova_metadata_port -}}\n{{- $_ := tuple \"compute_metadata\" \"internal\" \"metadata\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set $envAll.Values.conf.metadata_agent.DEFAULT \"nova_metadata_port\" }}\n{{- end -}}\n{{- if empty $envAll.Values.conf.metadata_agent.cache.memcache_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set $envAll.Values.conf.metadata_agent.cache \"memcache_servers\" -}}\n{{- end -}}\n\n{{- if empty $envAll.Values.conf.ovn_metadata_agent.DEFAULT.nova_metadata_host -}}\n{{- $_ := tuple \"compute_metadata\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" | set $envAll.Values.conf.ovn_metadata_agent.DEFAULT \"nova_metadata_host\" -}}\n{{- end -}}\n{{- if empty $envAll.Values.conf.ovn_metadata_agent.cache.memcache_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set $envAll.Values.conf.ovn_metadata_agent.cache \"memcache_servers\" -}}\n{{- end -}}\n{{- if empty $envAll.Values.conf.ovn_metadata_agent.DEFAULT.nova_metadata_port -}}\n{{- $_ := tuple \"compute_metadata\" \"internal\" \"metadata\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set $envAll.Values.conf.ovn_metadata_agent.DEFAULT \"nova_metadata_port\" }}\n{{- end -}}\n\n{{- if empty $envAll.Values.conf.neutron.DEFAULT.interface_driver -}}\n{{- $_ := set $envAll.Values \"__interface_driver\" ( list ) }}\n{{- if ( has \"openvswitch\" $envAll.Values.network.backend ) -}}\n{{ $__interface_driver := append $envAll.Values.__interface_driver \"openvswitch\" }}\n{{- $_ := set $envAll.Values \"__interface_driver\" $__interface_driver }}\n{{- end -}}\n{{- if ( has \"linuxbridge\" $envAll.Values.network.backend ) -}}\n{{ $__interface_driver := append $envAll.Values.__interface_driver \"linuxbridge\" }}\n{{- $_ := set $envAll.Values \"__interface_driver\" $__interface_driver }}\n{{- end -}}\n{{- $_ := set $envAll.Values.conf.neutron.DEFAULT \"interface_driver\" $envAll.Values.__interface_driver -}}\n{{- end -}}\n\n{{- if empty $envAll.Values.conf.dhcp_agent.DEFAULT.interface_driver -}}\n{{- $_ := set $envAll.Values \"__interface_driver\" ( list ) }}\n{{- if or ( has \"openvswitch\" $envAll.Values.network.backend ) ( has \"ovn\" $envAll.Values.network.backend ) -}}\n{{ $__interface_driver := append $envAll.Values.__interface_driver \"openvswitch\" }}\n{{- $_ := set $envAll.Values \"__interface_driver\" $__interface_driver }}\n{{- end -}}\n{{- if ( has \"linuxbridge\" $envAll.Values.network.backend ) -}}\n{{ $__interface_driver := append $envAll.Values.__interface_driver \"linuxbridge\" }}\n{{- $_ := set $envAll.Values \"__interface_driver\" $__interface_driver }}\n{{- end -}}\n{{- $_ := set $envAll.Values.conf.dhcp_agent.DEFAULT \"interface_driver\" $envAll.Values.__interface_driver -}}\n{{- end -}}\n{{- if and (has \"ovn\" $envAll.Values.network.backend) (empty $envAll.Values.conf.dhcp_agent.ovs.ovsdb_connection) -}}\n{{- $_ := set $envAll.Values.conf.dhcp_agent.ovs \"ovsdb_connection\" \"unix:/run/openvswitch/db.sock\" -}}\n{{- end -}}\n\n{{- if empty $envAll.Values.conf.l3_agent.DEFAULT.interface_driver -}}\n{{- $_ := set $envAll.Values \"__interface_driver\" ( list ) }}\n{{- if ( has \"openvswitch\" $envAll.Values.network.backend ) -}}\n{{ $__interface_driver := append $envAll.Values.__interface_driver \"openvswitch\" }}\n{{- $_ := set $envAll.Values \"__interface_driver\" $__interface_driver }}\n{{- end -}}\n{{- if ( has \"linuxbridge\" $envAll.Values.network.backend ) -}}\n{{ $__interface_driver := append $envAll.Values.__interface_driver \"linuxbridge\" }}\n{{- $_ := set $envAll.Values \"__interface_driver\" $__interface_driver }}\n{{- end -}}\n{{- $_ := set $envAll.Values.conf.l3_agent.DEFAULT \"interface_driver\" $envAll.Values.__interface_driver -}}\n{{- end -}}\n\n{{- if empty $envAll.Values.conf.plugins.ml2_conf.ml2.mechanism_drivers -}}\n{{- if (contains \"vxlan\" $envAll.Values.conf.plugins.ml2_conf.ml2.tenant_network_types) -}}\n{{- $_ := set $envAll.Values \"__mechanism_drivers\" (append $envAll.Values.network.backend \"l2population\") -}}\n{{- end -}}\n{{- $_ := set $envAll.Values.conf.plugins.ml2_conf.ml2 \"mechanism_drivers\" ($envAll.Values.__mechanism_drivers | default $envAll.Values.network.backend | uniq) -}}\n{{- end -}}\n\n{{- if empty .Values.conf.neutron.DEFAULT.bind_port -}}\n{{- $_ := tuple \"network\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set .Values.conf.neutron.DEFAULT \"bind_port\" -}}\n{{- end -}}\n{{- if empty .Values.conf.neutron_api_uwsgi.uwsgi.processes -}}\n{{- $_ := set .Values.conf.neutron_api_uwsgi.uwsgi \"processes\" .Values.conf.neutron.DEFAULT.api_workers -}}\n{{- end -}}\n{{- if empty (index .Values.conf.neutron_api_uwsgi.uwsgi \"http-socket\") -}}\n{{- $http_socket_port := tuple \"network\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | toString }}\n{{- $http_socket := printf \"0.0.0.0:%s\" $http_socket_port }}\n{{- $_ := set .Values.conf.neutron_api_uwsgi.uwsgi \"http-socket\" $http_socket -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.handler_fluent) (has \"fluent\" .Values.conf.logging.handlers.keys) -}}\n{{- $fluentd_host := tuple \"fluentd\" \"internal\" $envAll | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\n{{- $fluentd_port := tuple \"fluentd\" \"internal\" \"service\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $fluent_args := printf \"('%s.%s', '%s', %s)\" .Release.Namespace .deployment_name $fluentd_host $fluentd_port }}\n{{- $handler_fluent := dict \"class\" \"fluent.handler.FluentHandler\" \"formatter\" \"fluent\" \"args\" $fluent_args -}}\n{{- $_ := set .Values.conf.logging \"handler_fluent\" $handler_fluent -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.formatter_fluent) (has \"fluent\" .Values.conf.logging.formatters.keys) -}}\n{{- $formatter_fluent := dict \"class\" \"oslo_log.formatters.FluentFormatter\" -}}\n{{- $_ := set .Values.conf.logging \"formatter_fluent\" $formatter_fluent -}}\n{{- end -}}\n\n{{- if .Values.conf.ovs_dpdk.enabled -}}\n{{- $_ := set $envAll.Values.conf.plugins.openvswitch_agent.ovs \"datapath_type\" \"netdev\" -}}\n{{- if empty $envAll.Values.conf.plugins.openvswitch_agent.ovs.vhostuser_socket_dir -}}\n{{- $_ := set $envAll.Values.conf.plugins.openvswitch_agent.ovs \"vhostuser_socket_dir\" \"/run/openvswitch/vhostuser\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/* Designate DNS driver */}}\n{{- if eq (.Values.conf.neutron.DEFAULT.external_dns_driver | default \"\") \"designate\" -}}\n{{- if empty .Values.conf.neutron.designate.auth_url -}}\n{{- $_ := tuple \"dns\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.neutron.designate \"auth_url\" -}}\n{{- end -}}\n{{- end }}\n\n{{- if (has \"baremetal\" .Values.network.backend) -}}\n{{- if empty $envAll.Values.conf.neutron.ironic.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set $envAll.Values.conf.neutron.ironic \"auth_url\" -}}\n{{- end }}\n{{- end -}}\n\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $configMapName }}\ntype: Opaque\ndata:\n  rally_tests.yaml: {{ toYaml $envAll.Values.conf.rally_tests.tests | b64enc }}\n  api-paste.ini: {{ include \"helm-toolkit.utils.to_ini\" $envAll.Values.conf.paste | b64enc }}\n  policy.yaml: {{ toYaml $envAll.Values.conf.policy | b64enc }}\n  neutron-api-uwsgi.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.neutron_api_uwsgi | b64enc }}\n  neutron.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" $envAll.Values.conf.neutron | b64enc }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.logging | b64enc }}\n  api_audit_map.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.api_audit_map | b64enc }}\n  dhcp_agent.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" $envAll.Values.conf.dhcp_agent | b64enc }}\n  l3_agent.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" $envAll.Values.conf.l3_agent | b64enc }}\n  metering_agent.ini: {{ default \"\\\"\\\"\" (include \"helm-toolkit.utils.to_oslo_conf\" $envAll.Values.conf.metering_agent | b64enc) }}\n  taas_plugin.ini: {{ default \"\\\"\\\"\" (include \"helm-toolkit.utils.to_oslo_conf\" $envAll.Values.conf.taas_plugin | b64enc) }}\n  ml2_conf.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" $envAll.Values.conf.plugins.ml2_conf | b64enc }}\n  ml2_conf_sriov.ini: {{ default \"\\\"\\\"\" (include \"helm-toolkit.utils.to_oslo_conf\" $envAll.Values.conf.plugins.ml2_conf_sriov | b64enc) }}\n  taas.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" $envAll.Values.conf.plugins.taas | b64enc }}\n  l2gw_plugin.ini: {{ default \"\\\"\\\"\" (include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.plugins.l2gateway | b64enc) }}\n  macvtap_agent.ini: {{ default \"\\\"\\\"\" (include \"helm-toolkit.utils.to_oslo_conf\" $envAll.Values.conf.plugins.macvtap_agent | b64enc) }}\n  linuxbridge_agent.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" $envAll.Values.conf.plugins.linuxbridge_agent | b64enc }}\n  openvswitch_agent.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" $envAll.Values.conf.plugins.openvswitch_agent | b64enc }}\n  sriov_agent.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" $envAll.Values.conf.plugins.sriov_agent | b64enc }}\n  l2gw_agent.ini: {{ default \"\\\"\\\"\" (include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.l2gateway_agent | b64enc) }}\n  bagpipe_bgp.conf: {{ default \"\\\"\\\"\" (include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.bagpipe_bgp | b64enc) }}\n  bgp_dragent.ini: {{ default \"\\\"\\\"\" (include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.bgp_dragent | b64enc) }}\n  dnsmasq.conf: \"{{ $envAll.Values.conf.dnsmasq | b64enc }}\"\n  neutron_sudoers: {{ $envAll.Values.conf.neutron_sudoers | b64enc }}\n  rootwrap.conf: {{ $envAll.Values.conf.rootwrap | b64enc }}\n  auto_bridge_add: {{ toJson $envAll.Values.conf.auto_bridge_add | b64enc }}\n  neutron_vpnaas.conf: {{ default \"\\\"\\\"\" (include \"helm-toolkit.utils.to_oslo_conf\" $envAll.Values.conf.neutron_vpnaas | b64enc) }}\n{{- if .Values.conf.netoffload.enabled }}\n  netoffload: {{ toJson $envAll.Values.conf.netoffload | b64enc }}\n{{- end }}\n  dpdk.conf: {{ toJson $envAll.Values.conf.ovs_dpdk | b64enc }}\n  update_dpdk_bond_config: {{ $envAll.Values.conf.ovs_dpdk.update_dpdk_bond_config | toString | b64enc }}\n{{- if ( has \"ovn\" .Values.network.backend ) }}\n  neutron_ovn_vpn_agent.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" $envAll.Values.conf.ovn_vpn_agent | b64enc }}\n  ovn_metadata_agent.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" $envAll.Values.conf.ovn_metadata_agent | b64enc }}\n{{- else }}\n  metadata_agent.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" $envAll.Values.conf.metadata_agent | b64enc }}\n{{- end }}\n\n{{-  if .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" ( dict \"envAll\" $envAll \"template\" .Values.conf.nginx \"key\" \"nginx.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- end }}\n{{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n{{- $filePrefix := replace \"_\" \"-\"  $key }}\n  {{ printf \"%s.filters\" $filePrefix }}: {{ $value.content | b64enc }}\n{{- end }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- list \"neutron-etc\" . | include \"neutron.configmap.etc\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/cron-job-ovn-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.cron_job_ovn_db_sync }}\n{{- $envAll := . }}\n\n{{- $mounts_neutron_ovn_db_sync := .Values.pod.mounts.neutron_ovn_db_sync.neutron_ovn_db_sync }}\n{{- $mounts_neutron_ovn_db_sync_init := .Values.pod.mounts.neutron_ovn_db_sync.init_container }}\n{{- $etcSources := .Values.pod.etcSources.neutron_ovn_db_sync }}\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"neutron-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"neutron-ovn-db-sync\" }}\n{{ tuple $envAll \"pod_dependency\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: neutron-ovn-db-sync\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  schedule: {{ .Values.jobs.ovn_db_sync.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.ovn_db_sync.history.success }}\n  failedJobsHistoryLimit: {{ .Values.jobs.ovn_db_sync.history.failed }}\n  concurrencyPolicy: Forbid\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"neutron\" \"ovn-db-sync\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"neutron-ovn-db-sync\" \"containerNames\" (list \"init\" \"neutron-ovn-db-sync\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"neutron\" \"ovn-db-sync\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n          annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 12 }}\n{{ dict \"envAll\" $envAll \"podName\" \"neutron-ovn-db-sync\" \"containerNames\" (list \"init\" \"neutron-ovn-db-sync\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n        spec:\n{{ tuple \"neutron_ovn_db_sync\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"neutron_ovn_db_sync\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n          serviceAccountName: {{ $serviceAccountName }}\n          restartPolicy: OnFailure\n{{ if $envAll.Values.pod.tolerations.neutron.enabled }}\n{{ tuple $envAll \"neutron\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 10 }}\n{{ end }}\n          nodeSelector:\n            {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n          initContainers:\n{{ tuple $envAll \"ovn_db_sync\" $mounts_neutron_ovn_db_sync_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 12 }}\n            - name: ovn-neutron-init\n{{ tuple $envAll \"neutron_ovn_db_sync\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n              command:\n                - /tmp/neutron-ovn-init.sh\n              volumeMounts:\n                - name: pod-shared\n                  mountPath: /tmp/pod-shared\n                - name: neutron-bin\n                  mountPath: /tmp/neutron-ovn-init.sh\n                  subPath: neutron-ovn-init.sh\n                  readOnly: true\n          containers:\n            - name: neutron-ovn-db-sync\n{{ tuple $envAll \"neutron_ovn_db_sync\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.ovn_db_sync | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n              env:\n                - name: REQUESTS_CA_BUNDLE\n                  value: \"/etc/neutron/certs/ca.crt\"\n{{- end }}\n              command:\n                - /tmp/neutron-ovn-db-sync.sh\n                - {{ quote .Values.jobs.ovn_db_sync.sync_mode }}\n              volumeMounts:\n                - name: neutron-bin\n                  mountPath: /tmp/neutron-ovn-db-sync.sh\n                  subPath: neutron-ovn-db-sync.sh\n                  readOnly: true\n                - name: pod-tmp\n                  mountPath: /tmp\n                - name: pod-shared\n                  mountPath: /tmp/pod-shared\n                - name: pod-var-neutron\n                  mountPath: {{ .Values.conf.neutron.DEFAULT.state_path }}\n                - name: neutron-etc\n                  mountPath: /etc/neutron/neutron.conf\n                  subPath: neutron.conf\n                  readOnly: true\n                - name: neutron-etc-snippets\n                  mountPath: /etc/neutron/neutron.conf.d/\n                  readOnly: true\n                - name: neutron-etc\n                  mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini\n                  subPath: ml2_conf.ini\n                  readOnly: true\n                {{ if ( has \"sriov\" .Values.network.backend ) }}\n                - name: neutron-etc\n                  mountPath: /etc/neutron/plugins/ml2/sriov_agent.ini\n                  subPath: sriov_agent.ini\n                  readOnly: true\n                {{ end }}\n                {{- if .Values.conf.plugins.taas.taas.enabled }}\n                - name: neutron-etc\n                  mountPath: /etc/neutron/taas_plugin.ini\n                  subPath: taas_plugin.ini\n                  readOnly: true\n                {{ end }}\n                {{- if .Values.conf.plugins.l2gateway }}\n                - name: neutron-etc\n                  mountPath: /etc/neutron/l2gw_plugin.ini\n                  subPath: l2gw_plugin.ini\n                  readOnly: true\n                {{ end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 16 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.network.server.internal \"path\" \"/etc/neutron/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 16 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 16 }}\n{{ if $mounts_neutron_ovn_db_sync.volumeMounts }}{{ toYaml $mounts_neutron_ovn_db_sync.volumeMounts | indent 14 }}{{ end }}\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: pod-shared\n              emptyDir: {}\n            {{- if .Values.manifests.certificates }}\n            - name: wsgi-neutron\n              emptyDir: {}\n            {{- end }}\n            - name: pod-var-neutron\n              emptyDir: {}\n            - name: neutron-bin\n              configMap:\n                name: neutron-bin\n                defaultMode: 0555\n            - name: neutron-etc\n              secret:\n                secretName: neutron-etc\n                defaultMode: 0444\n            - name: neutron-etc-snippets\n{{- if $etcSources }}\n              projected:\n                sources:\n{{ toYaml $etcSources | indent 18 }}\n{{- else }}\n              emptyDir: {}\n{{ end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.network.server.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n{{ if $mounts_neutron_ovn_db_sync.volumes }}{{ toYaml $mounts_neutron_ovn_db_sync.volumes | indent 12 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/daemonset-bagpipe-bgp.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"bagpipeBgpLivenessProbeTemplate\" }}\ntcpSocket:\n  port: {{ tuple \"network\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- define \"bagpipeBgpReadinessProbeTemplate\" }}\ntcpSocket:\n  port: {{ tuple \"network\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- define \"neutron.bagpipe_bgp.daemonset\" }}\n{{- $daemonset := index . 0 }}\n{{- $configMapName := index . 1 }}\n{{- $serviceAccountName := index . 2 }}\n{{- $envAll := index . 3 }}\n{{- with $envAll }}\n\n{{- $mounts_neutron_bagpipe_bgp := .Values.pod.mounts.bagpipe_bgp.bagpipe_bgp }}\n{{- $mounts_neutron_bagpipe_bgp_init := .Values.pod.mounts.bagpipe_bgp.init_container }}\n\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: bagpipe-bgp\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"neutron\" \"bagpipe-bgp\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"neutron\" \"bagpipe-bgp\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"bagpipe_bgp\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"neutron\" \"bagpipe-bgp\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"neutron_bagpipe_bgp\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"neutron_bagpipe_bgp\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"bagpipe_bgp\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"bagpipe_bgp\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ if $envAll.Values.pod.tolerations.neutron.enabled }}\n{{ tuple $envAll \"neutron\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.bagpipe_bgp.node_selector_key }}: {{ .Values.labels.bagpipe_bgp.node_selector_value }}\n      dnsPolicy: ClusterFirstWithHostNet\n      hostNetwork: true\n      {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n      shareProcessNamespace: true\n      {{- else }}\n      hostPID: true\n      {{- end }}\n      initContainers:\n{{ tuple $envAll \"pod_dependency\" $mounts_neutron_bagpipe_bgp_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: neutron-bagpipe-bgp-init\n{{ tuple $envAll \"neutron_bagpipe_bgp\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n          securityContext:\n            privileged: true\n            runAsUser: 0\n          command:\n            - /tmp/neutron-bagpipe-bgp-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: neutron-bin\n              mountPath: /tmp/neutron-bagpipe-bgp-init.sh\n              subPath: neutron-bagpipe-bgp-init.sh\n              readOnly: true\n            - name: run\n              mountPath: /run\n      containers:\n        - name: neutron-bagpipe-bgp\n{{ tuple $envAll \"neutron_bagpipe_bgp\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent.bagpipe_bgp | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_bagpipe_bgp\" \"container\" \"neutron_bagpipe_bgp\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"bagpipe_bgp\" \"container\" \"bagpipe_bgp\" \"type\" \"liveness\" \"probeTemplate\" (include \"bagpipeBgpLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"bagpipe_bgp\" \"container\" \"bagpipe_bgp\" \"type\" \"readiness\" \"probeTemplate\" (include \"bagpipeBgpReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          command:\n            - /tmp/neutron-bagpipe-bgp.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.neutron.oslo_concurrency.lock_path }}\n            - name: pod-var-neutron\n              mountPath: {{ .Values.conf.neutron.DEFAULT.state_path }}\n            - name: neutron-bin\n              mountPath: /tmp/neutron-bagpipe-bgp.sh\n              subPath: neutron-bagpipe-bgp.sh\n              readOnly: true\n            - name: neutron-bin\n              mountPath: /tmp/health-probe.py\n              subPath: health-probe.py\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /tmp/auto_bridge_add\n              subPath: auto_bridge_add\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/bagpipe-bgp/bgp.conf\n              subPath: bagpipe_bgp.conf\n              readOnly: true\n            - name: neutron-etc\n              # NOTE (Portdirect): We mount here to override Kollas\n              # custom sudoers file when using Kolla images, this\n              # location will also work fine for other images.\n              mountPath: /etc/sudoers.d/kolla_neutron_sudoers\n              subPath: neutron_sudoers\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/rootwrap.conf\n              subPath: rootwrap.conf\n              readOnly: true\n            {{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n            {{- if ( has \"bagpipe_bgp\" $value.pods ) }}\n            {{- $filePrefix := replace \"_\" \"-\"  $key }}\n            {{- $rootwrapFile := printf \"/etc/neutron/rootwrap.d/%s.filters\" $filePrefix }}\n            - name: neutron-etc\n              mountPath: {{ $rootwrapFile }}\n              subPath: {{ base $rootwrapFile }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            - name: libmodules\n              mountPath: /lib/modules\n              readOnly: true\n            - name: run\n              mountPath: /run\n{{ if $mounts_neutron_bagpipe_bgp.volumeMounts }}{{ toYaml $mounts_neutron_bagpipe_bgp.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-var-neutron\n          emptyDir: {}\n        - name: neutron-bin\n          configMap:\n            name: neutron-bin\n            defaultMode: 0555\n        - name: neutron-etc\n          secret:\n            secretName: {{ $configMapName }}\n            defaultMode: 0444\n        - name: libmodules\n          hostPath:\n            path: /lib/modules\n        - name: run\n          hostPath:\n            path: /run\n{{ if $mounts_neutron_bagpipe_bgp.volumes }}{{ toYaml $mounts_neutron_bagpipe_bgp.volumes | indent 8 }}{{ end }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.daemonset_bagpipe_bgp }}\n{{- $envAll := . }}\n{{- $daemonset := \"bagpipe-bgp\" }}\n{{- $configMapName := \"neutron-etc\" }}\n{{- $serviceAccountName := \"neutron-bagpipe-bgp\" }}\n{{- $dependencyOpts := dict \"envAll\" $envAll \"dependencyMixinParam\" $envAll.Values.network.backend \"dependencyKey\" \"bagpipe_bgp\" -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_resolver\" $dependencyOpts | toString | fromYaml }}\n{{ tuple $envAll \"pod_dependency\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include \"neutron.bagpipe_bgp.daemonset\" | toString | fromYaml }}\n{{- $configmap_yaml := \"neutron.configmap.etc\" }}\n{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include \"helm-toolkit.utils.daemonset_overrides\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/daemonset-bgp-dragent.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"bgp_dragentLivenessProbeTemplate\" }}\nexec:\n  command:\n    - bash\n    - -c\n    - pidof -x /var/lib/openstack/bin/neutron-bgp-dragent\n{{- end }}\n\n\n{{- define \"neutron.bgp_dragent.daemonset\" }}\n{{- $daemonset := index . 0 }}\n{{- $configMapName := index . 1 }}\n{{- $serviceAccountName := index . 2 }}\n{{- $envAll := index . 3 }}\n{{- with $envAll }}\n\n{{- $mounts_neutron_bgp_dragent := .Values.pod.mounts.bgp_dragent.bgp_dragent }}\n{{- $mounts_neutron_bgp_dragent_init := .Values.pod.mounts.bgp_dragent.init_container }}\n{{- $etcSources := .Values.pod.etcSources.bgp_dragent }}\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"neutron-ks-etc\")) }}\n{{- end }}\n\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: bgp-dragent\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"neutron\" \"bgp-dragent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"neutron\" \"bgp-dragent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"bgp_dragent\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"neutron\" \"bgp-dragent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"neutron_bgp_dragent\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"neutron_bgp_dragent\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"bgp_dragent\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"bgp_dragent\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ if $envAll.Values.pod.tolerations.neutron.enabled }}\n{{ tuple $envAll \"neutron\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.bgp_dragent.node_selector_key }}: {{ .Values.labels.bgp_dragent.node_selector_value }}\n      dnsPolicy: ClusterFirstWithHostNet\n      hostNetwork: true\n      {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n      shareProcessNamespace: true\n      {{- else }}\n      hostPID: true\n      {{- end }}\n      containers:\n        - name: neutron-bgp-dragent\n{{ tuple $envAll \"neutron_bgp_dragent\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent.bgp_dragent | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_bgp_dragent\" \"container\" \"neutron_bgp_dragent\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"bgp_dragent\" \"container\" \"bgp_dragent\" \"type\" \"liveness\" \"probeTemplate\" (include \"bgp_dragentLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          command:\n            - /tmp/neutron-bgp-dragent.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.neutron.oslo_concurrency.lock_path }}\n            - name: pod-var-neutron\n              mountPath: {{ .Values.conf.neutron.DEFAULT.state_path }}\n            - name: neutron-bin\n              mountPath: /tmp/neutron-bgp-dragent.sh\n              subPath: neutron-bgp-dragent.sh\n              readOnly: true\n            - name: neutron-bin\n              mountPath: /tmp/health-probe.py\n              subPath: health-probe.py\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            {{- if .Values.conf.neutron.DEFAULT.log_config_append }}\n            - name: neutron-etc\n              mountPath: {{ .Values.conf.neutron.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.neutron.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/bgp_dragent.ini\n              subPath: bgp_dragent.ini\n              readOnly: true\n            - name: neutron-etc\n              # NOTE (Portdirect): We mount here to override Kollas\n              # custom sudoers file when using Kolla images, this\n              # location will also work fine for other images.\n              mountPath: /etc/sudoers.d/kolla_neutron_sudoers\n              subPath: neutron_sudoers\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/rootwrap.conf\n              subPath: rootwrap.conf\n              readOnly: true\n            {{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n            {{- if ( has \"bgp_dragent\" $value.pods ) }}\n            {{- $filePrefix := replace \"_\" \"-\"  $key }}\n            {{- $rootwrapFile := printf \"/etc/neutron/rootwrap.d/%s.filters\" $filePrefix }}\n            - name: neutron-etc\n              mountPath: {{ $rootwrapFile }}\n              subPath: {{ base $rootwrapFile }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            - name: libmodules\n              mountPath: /lib/modules\n              readOnly: true\n            - name: run\n              mountPath: /run\n{{ if $mounts_neutron_bgp_dragent.volumeMounts }}{{ toYaml $mounts_neutron_bgp_dragent.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-var-neutron\n          emptyDir: {}\n        - name: neutron-bin\n          configMap:\n            name: neutron-bin\n            defaultMode: 0555\n        - name: neutron-etc\n          secret:\n            secretName: {{ $configMapName }}\n            defaultMode: 0444\n        - name: neutron-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: libmodules\n          hostPath:\n            path: /lib/modules\n        - name: run\n          hostPath:\n            path: /run\n{{ if $mounts_neutron_bgp_dragent.volumes }}{{ toYaml $mounts_neutron_bgp_dragent.volumes | indent 8 }}{{ end }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.daemonset_bgp_dragent }}\n{{- $envAll := . }}\n{{- $daemonset := \"bgp-dragent\" }}\n{{- $configMapName := \"neutron-etc\" }}\n{{- $serviceAccountName := \"neutron-bgp-dragent\" }}\n{{- $dependencyOpts := dict \"envAll\" $envAll \"dependencyMixinParam\" $envAll.Values.network.backend \"dependencyKey\" \"bgp_dragent\" -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_resolver\" $dependencyOpts | toString | fromYaml }}\n{{ tuple $envAll \"pod_dependency\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include \"neutron.bgp_dragent.daemonset\" | toString | fromYaml }}\n{{- $configmap_yaml := \"neutron.configmap.etc\" }}\n{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include \"helm-toolkit.utils.daemonset_overrides\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/daemonset-dhcp-agent.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"dhcpAgentReadinessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/neutron/neutron.conf\n    - --config-file\n    - /etc/neutron/dhcp_agent.ini\n    - --agent-queue-name\n    - dhcp_agent\n{{- if .Values.pod.use_fqdn.neutron_agent }}\n    - --use-fqdn\n{{- end }}\n{{- end }}\n{{- define \"dhcpAgentLivenessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/neutron/neutron.conf\n    - --config-file\n    - /etc/neutron/dhcp_agent.ini\n    - --agent-queue-name\n    - dhcp_agent\n{{- if .Values.pod.use_fqdn.neutron_agent }}\n    - --use-fqdn\n{{- end }}\n{{- end }}\n\n{{- define \"neutron.dhcp_agent.daemonset\" }}\n{{- $daemonset := index . 0 }}\n{{- $configMapName := index . 1 }}\n{{- $serviceAccountName := index . 2 }}\n{{- $envAll := index . 3 }}\n{{- with $envAll }}\n\n{{- $mounts_neutron_dhcp_agent := .Values.pod.mounts.neutron_dhcp_agent.neutron_dhcp_agent }}\n{{- $mounts_neutron_dhcp_agent_init := .Values.pod.mounts.neutron_dhcp_agent.init_container }}\n{{- $etcSources := .Values.pod.etcSources.neutron_dhcp_agent }}\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"neutron-ks-etc\")) }}\n{{- end }}\n\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: neutron-dhcp-agent\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"neutron\" \"dhcp-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"neutron\" \"dhcp-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"dhcp_agent\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"neutron\" \"dhcp-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"neutron_dhcp_agent\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"neutron-dhcp-agent-default\" \"containerNames\" (list \"neutron-dhcp-agent\" \"neutron-dhcp-agent-init\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"neutron_dhcp_agent\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"neutron_dhcp_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"neutron_dhcp_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ if $envAll.Values.pod.tolerations.neutron.enabled }}\n{{ tuple $envAll \"neutron\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.agent.dhcp.node_selector_key }}: {{ .Values.labels.agent.dhcp.node_selector_value }}\n      dnsPolicy: ClusterFirstWithHostNet\n      hostNetwork: true\n      {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n      shareProcessNamespace: true\n      {{- else }}\n      hostPID: true\n      {{- end }}\n      initContainers:\n{{ tuple $envAll \"pod_dependency\" $mounts_neutron_dhcp_agent_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        {{- if ( has \"ovn\" .Values.network.backend ) }}\n        - name: ovn-neutron-init\n{{ tuple $envAll \"neutron_dhcp\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n          command:\n            - /tmp/neutron-ovn-init.sh\n          volumeMounts:\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: neutron-bin\n              mountPath: /tmp/neutron-ovn-init.sh\n              subPath: neutron-ovn-init.sh\n              readOnly: true\n        {{- end }}\n        - name: neutron-dhcp-agent-init\n{{ tuple $envAll \"neutron_dhcp\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent.dhcp | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_dhcp_agent\" \"container\" \"neutron_dhcp_agent_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/neutron-dhcp-agent-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: neutron-bin\n              mountPath: /tmp/neutron-dhcp-agent-init.sh\n              subPath: neutron-dhcp-agent-init.sh\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/dhcp_agent.ini\n              subPath: dhcp_agent.ini\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/metadata_agent.ini\n              subPath: metadata_agent.ini\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini\n              subPath: ml2_conf.ini\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/openvswitch_agent.ini\n              subPath: openvswitch_agent.ini\n              readOnly: true\n            - name: neutron-etc\n              # NOTE (Portdirect): We mount here to override Kollas\n              # custom sudoers file when using Kolla images, this\n              # location will also work fine for other images.\n              mountPath: /etc/sudoers.d/kolla_neutron_sudoers\n              subPath: neutron_sudoers\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /tmp/auto_bridge_add\n              subPath: auto_bridge_add\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/rootwrap.conf\n              subPath: rootwrap.conf\n              readOnly: true\n            {{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n            {{- if ( has \"dhcp_agent\" $value.pods ) }}\n            {{- $filePrefix := replace \"_\" \"-\"  $key }}\n            {{- $rootwrapFile := printf \"/etc/neutron/rootwrap.d/%s.filters\" $filePrefix }}\n            - name: neutron-etc\n              mountPath: {{ $rootwrapFile }}\n              subPath: {{ base $rootwrapFile }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n      containers:\n        - name: neutron-dhcp-agent\n{{ tuple $envAll \"neutron_dhcp\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent.dhcp | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_dhcp_agent\" \"container\" \"neutron_dhcp_agent\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: RPC_PROBE_TIMEOUT\n              value: \"{{ .Values.pod.probes.rpc_timeout }}\"\n            - name: RPC_PROBE_RETRIES\n              value: \"{{ .Values.pod.probes.rpc_retries }}\"\n{{ dict \"envAll\" $envAll \"component\" \"dhcp_agent\" \"container\" \"dhcp_agent\" \"type\" \"readiness\" \"probeTemplate\" (include \"dhcpAgentReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"dhcp_agent\" \"container\" \"dhcp_agent\" \"type\" \"liveness\" \"probeTemplate\" (include \"dhcpAgentLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          command:\n            - /tmp/neutron-dhcp-agent.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.neutron.oslo_concurrency.lock_path }}\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: pod-var-neutron\n              mountPath: {{ .Values.conf.neutron.DEFAULT.state_path }}\n            - name: neutron-bin\n              mountPath: /tmp/neutron-dhcp-agent.sh\n              subPath: neutron-dhcp-agent.sh\n              readOnly: true\n            - name: neutron-bin\n              mountPath: /tmp/health-probe.py\n              subPath: health-probe.py\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            {{- if .Values.conf.neutron.DEFAULT.log_config_append }}\n            - name: neutron-etc\n              mountPath: {{ .Values.conf.neutron.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.neutron.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini\n              subPath: ml2_conf.ini\n              readOnly: true\n            {{- if ( has \"openvswitch\" .Values.network.backend ) }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/openvswitch_agent.ini\n              subPath: openvswitch_agent.ini\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/dhcp_agent.ini\n              subPath: dhcp_agent.ini\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/dnsmasq.conf\n              subPath: dnsmasq.conf\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/metadata_agent.ini\n              subPath: metadata_agent.ini\n              readOnly: true\n            - name: neutron-etc\n              # NOTE (Portdirect): We mount here to override Kollas\n              # custom sudoers file when using Kolla images, this\n              # location will also work fine for other images.\n              mountPath: /etc/sudoers.d/kolla_neutron_sudoers\n              subPath: neutron_sudoers\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/rootwrap.conf\n              subPath: rootwrap.conf\n              readOnly: true\n            {{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n            {{- if ( has \"dhcp_agent\" $value.pods ) }}\n            {{- $filePrefix := replace \"_\" \"-\"  $key }}\n            {{- $rootwrapFile := printf \"/etc/neutron/rootwrap.d/%s.filters\" $filePrefix }}\n            - name: neutron-etc\n              mountPath: {{ $rootwrapFile }}\n              subPath: {{ base $rootwrapFile }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            - name: iptables-lockfile\n              mountPath: /run/xtables.lock\n            - name: socket\n              mountPath: /var/lib/neutron/openstack-helm\n            {{- if .Values.network.share_namespaces }}\n            - name: host-run-netns\n              mountPath: /run/netns\n              mountPropagation: Bidirectional\n            {{- end }}\n            {{- if ( has \"ovn\" .Values.network.backend ) }}\n            - name: run-openvswitch\n              mountPath: /run/openvswitch\n            {{- end }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_neutron_dhcp_agent.volumeMounts }}{{ toYaml $mounts_neutron_dhcp_agent.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-var-neutron\n          emptyDir: {}\n        - name: neutron-bin\n          configMap:\n            name: neutron-bin\n            defaultMode: 0555\n        - name: neutron-etc\n          secret:\n            secretName: {{ $configMapName }}\n            defaultMode: 0444\n        - name: neutron-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: iptables-lockfile\n          hostPath:\n            path: /run/xtables.lock\n            type: FileOrCreate\n        - name: socket\n          hostPath:\n            path: /var/lib/neutron/openstack-helm\n        - name: pod-shared\n          emptyDir: {}\n        {{- if .Values.network.share_namespaces }}\n        - name: host-run-netns\n          hostPath:\n            path: /run/netns\n        {{- end }}\n        {{- if ( has \"ovn\" .Values.network.backend ) }}\n        - name: run-openvswitch\n          hostPath:\n            path: /run/openvswitch\n        {{- end }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_neutron_dhcp_agent.volumes }}{{ toYaml $mounts_neutron_dhcp_agent.volumes | indent 8 }}{{ end }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.daemonset_dhcp_agent }}\n{{- $envAll := . }}\n{{- $daemonset := \"dhcp-agent\" }}\n{{- $configMapName := \"neutron-etc\" }}\n{{- $serviceAccountName := \"neutron-dhcp-agent\" }}\n{{- $dependencyOpts := dict \"envAll\" $envAll \"dependencyMixinParam\" $envAll.Values.network.backend \"dependencyKey\" \"dhcp\" -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_resolver\" $dependencyOpts | toString | fromYaml }}\n{{ tuple $envAll \"pod_dependency\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include \"neutron.dhcp_agent.daemonset\" | toString | fromYaml }}\n{{- $configmap_yaml := \"neutron.configmap.etc\" }}\n{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include \"helm-toolkit.utils.daemonset_overrides\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/daemonset-l2gw-agent.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"l2gwAgentLivenessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/neutron/neutron.conf\n    - --config-file\n    - /etc/neutron/l2gw_agent.ini\n    - --agent-queue-name\n    - l2gateway_agent\n    - --liveness-probe\n{{- if .Values.pod.use_fqdn.neutron_agent }}\n    - --use-fqdn\n{{- end }}\n{{- end }}\n\n{{- define \"l2gwAgentReadinessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/neutron/neutron.conf\n    - --config-file\n    - /etc/neutron/l2gw_agent.ini\n    - --agent-queue-name\n    - l2gateway_agent\n{{- if .Values.pod.use_fqdn.neutron_agent }}\n    - --use-fqdn\n{{- end }}\n{{- end }}\n\n{{- define \"neutron.l2gw_agent.daemonset\" }}\n{{- $daemonset := index . 0 }}\n{{- $configMapName := index . 1 }}\n{{- $serviceAccountName := index . 2 }}\n{{- $envAll := index . 3 }}\n{{- with $envAll }}\n\n{{- $mounts_neutron_l2gw_agent := .Values.pod.mounts.neutron_l2gw_agent.neutron_l2gw_agent }}\n{{- $mounts_neutron_l2gw_agent_init := .Values.pod.mounts.neutron_l2gw_agent.init_container }}\n{{- $etcSources := .Values.pod.etcSources.neutron_l2gw_agent }}\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"neutron-ks-etc\")) }}\n{{- end }}\n\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: neutron-l2gw-agent\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"neutron\" \"l2gw-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"neutron\" \"l2gw-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"l2gw_agent\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"neutron\" \"l2gw-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"neutron_l2gw_agent\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"neutron_l2gw_agent\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"neutron_l2gw_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"neutron_l2gw_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ if $envAll.Values.pod.tolerations.neutron.enabled }}\n{{ tuple $envAll \"neutron\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.agent.l2gw.node_selector_key }}: {{ .Values.labels.agent.l2gw.node_selector_value }}\n      dnsPolicy: ClusterFirstWithHostNet\n      hostNetwork: true\n      {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n      shareProcessNamespace: true\n      {{- else }}\n      hostPID: true\n      {{- end }}\n      initContainers:\n{{ tuple $envAll \"pod_dependency\" $mounts_neutron_l2gw_agent_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: neutron-l2gw-agent\n{{ tuple $envAll \"neutron_l2gw\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent.l2gw | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_l2gw_agent\" \"container\" \"neutron_l2gw_agent\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: RPC_PROBE_TIMEOUT\n              value: \"{{ .Values.pod.probes.rpc_timeout }}\"\n            - name: RPC_PROBE_RETRIES\n              value: \"{{ .Values.pod.probes.rpc_retries }}\"\n{{ dict \"envAll\" $envAll \"component\" \"l2gw_agent\" \"container\" \"l2gw_agent\" \"type\" \"liveness\" \"probeTemplate\" (include \"l2gwAgentLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"l2gw_agent\" \"container\" \"l2gw_agent\" \"type\" \"readiness\" \"probeTemplate\" (include \"l2gwAgentReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          command:\n            - /tmp/neutron-l2gw-agent.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.neutron.oslo_concurrency.lock_path }}\n            - name: pod-var-neutron\n              mountPath: {{ .Values.conf.neutron.DEFAULT.state_path }}\n            - name: neutron-bin\n              mountPath: /tmp/neutron-l2gw-agent.sh\n              subPath: neutron-l2gw-agent.sh\n              readOnly: true\n            - name: neutron-bin\n              mountPath: /tmp/health-probe.py\n              subPath: health-probe.py\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            {{- if .Values.conf.neutron.DEFAULT.log_config_append }}\n            - name: neutron-etc\n              mountPath: {{ .Values.conf.neutron.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.neutron.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/l2gw_agent.ini\n              subPath: l2gw_agent.ini\n              readOnly: true\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_neutron_l2gw_agent.volumeMounts }}{{ toYaml $mounts_neutron_l2gw_agent.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-var-neutron\n          emptyDir: {}\n        - name: neutron-bin\n          configMap:\n            name: neutron-bin\n            defaultMode: 0555\n        - name: neutron-etc\n          secret:\n            secretName: {{ $configMapName }}\n            defaultMode: 0444\n        - name: neutron-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_neutron_l2gw_agent.volumes }}{{ toYaml $mounts_neutron_l2gw_agent.volumes | indent 8 }}{{ end }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.daemonset_l2gw_agent }}\n{{- $envAll := . }}\n{{- $daemonset := \"l2gw-agent\" }}\n{{- $configMapName := \"neutron-etc\" }}\n{{- $serviceAccountName := \"neutron-l2gw-agent\" }}\n{{- $dependencyOpts := dict \"envAll\" $envAll \"dependencyMixinParam\" $envAll.Values.network.backend \"dependencyKey\" \"l2gateway\" -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_resolver\" $dependencyOpts | toString | fromYaml }}\n{{ tuple $envAll \"pod_dependency\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include \"neutron.l2gw_agent.daemonset\" | toString | fromYaml }}\n{{- $configmap_yaml := \"neutron.configmap.etc\" }}\n{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include \"helm-toolkit.utils.daemonset_overrides\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/daemonset-l3-agent.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"l3AgentReadinessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/neutron/neutron.conf\n    - --config-file\n    - /etc/neutron/l3_agent.ini\n    - --agent-queue-name\n    - l3_agent\n{{- if .Values.pod.use_fqdn.neutron_agent }}\n    - --use-fqdn\n{{- end }}\n{{- end }}\n{{- define \"l3AgentLivenessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/neutron/neutron.conf\n    - --config-file\n    - /etc/neutron/l3_agent.ini\n    - --agent-queue-name\n    - l3_agent\n    - --liveness-probe\n{{- if .Values.pod.use_fqdn.neutron_agent }}\n    - --use-fqdn\n{{- end }}\n{{- end }}\n\n{{- define \"neutron.l3_agent.daemonset\" }}\n{{- $daemonset := index . 0 }}\n{{- $configMapName := index . 1 }}\n{{- $serviceAccountName := index . 2 }}\n{{- $envAll := index . 3 }}\n{{- with $envAll }}\n\n{{- $mounts_neutron_l3_agent := .Values.pod.mounts.neutron_l3_agent.neutron_l3_agent }}\n{{- $mounts_neutron_l3_agent_init := .Values.pod.mounts.neutron_l3_agent.init_container }}\n{{- $etcSources := .Values.pod.etcSources.neutron_l3_agent }}\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"neutron-ks-etc\")) }}\n{{- end }}\n\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: neutron-l3-agent\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"neutron\" \"l3-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"neutron\" \"l3-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"l3_agent\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"neutron\" \"l3-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"neutron_l3_agent\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"neutron-l3-agent-default\" \"containerNames\" (list \"neutron-l3-agent\" \"init\"  \"neutron-l3-agent-init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"neutron_l3_agent\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"neutron_l3_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"neutron_l3_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ if $envAll.Values.pod.tolerations.neutron.enabled }}\n{{ tuple $envAll \"neutron\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.agent.l3.node_selector_key }}: {{ .Values.labels.agent.l3.node_selector_value }}\n      dnsPolicy: ClusterFirstWithHostNet\n      hostNetwork: true\n      {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n      shareProcessNamespace: true\n      {{- else }}\n      hostPID: true\n      {{- end }}\n      initContainers:\n{{ tuple $envAll \"pod_dependency\" $mounts_neutron_l3_agent_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: neutron-l3-agent-init\n{{ tuple $envAll \"neutron_l3\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent.l3 | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_l3_agent\" \"container\" \"neutron_l3_agent_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/neutron-l3-agent-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: neutron-bin\n              mountPath: /tmp/neutron-l3-agent-init.sh\n              subPath: neutron-l3-agent-init.sh\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: neutron-etc\n              mountPath: /etc/neutron/l3_agent.ini\n              subPath: l3_agent.ini\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/metadata_agent.ini\n              subPath: metadata_agent.ini\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini\n              subPath: ml2_conf.ini\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/openvswitch_agent.ini\n              subPath: openvswitch_agent.ini\n              readOnly: true\n            - name: neutron-etc\n              # NOTE (Portdirect): We mount here to override Kollas\n              # custom sudoers file when using Kolla images, this\n              # location will also work fine for other images.\n              mountPath: /etc/sudoers.d/kolla_neutron_sudoers\n              subPath: neutron_sudoers\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /tmp/auto_bridge_add\n              subPath: auto_bridge_add\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/rootwrap.conf\n              subPath: rootwrap.conf\n              readOnly: true\n            {{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n            {{- if ( has \"l3_agent\" $value.pods ) }}\n            {{- $filePrefix := replace \"_\" \"-\"  $key }}\n            {{- $rootwrapFile := printf \"/etc/neutron/rootwrap.d/%s.filters\" $filePrefix }}\n            - name: neutron-etc\n              mountPath: {{ $rootwrapFile }}\n              subPath: {{ base $rootwrapFile }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n      containers:\n        - name: neutron-l3-agent\n{{ tuple $envAll \"neutron_l3\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent.l3 | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_l3_agent\" \"container\" \"neutron_l3_agent\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: RPC_PROBE_TIMEOUT\n              value: \"{{ .Values.pod.probes.rpc_timeout }}\"\n            - name: RPC_PROBE_RETRIES\n              value: \"{{ .Values.pod.probes.rpc_retries }}\"\n{{ dict \"envAll\" $envAll \"component\" \"l3_agent\" \"container\" \"l3_agent\" \"type\" \"readiness\" \"probeTemplate\" (include \"l3AgentReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"l3_agent\" \"container\" \"l3_agent\" \"type\" \"liveness\" \"probeTemplate\" (include \"l3AgentLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          command:\n            - /tmp/neutron-l3-agent.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.neutron.oslo_concurrency.lock_path }}\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: pod-var-neutron\n              mountPath: {{ .Values.conf.neutron.DEFAULT.state_path }}\n            - name: neutron-bin\n              mountPath: /tmp/neutron-l3-agent.sh\n              subPath: neutron-l3-agent.sh\n              readOnly: true\n            - name: neutron-bin\n              mountPath: /tmp/health-probe.py\n              subPath: health-probe.py\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            {{- if .Values.conf.neutron.DEFAULT.log_config_append }}\n            - name: neutron-etc\n              mountPath: {{ .Values.conf.neutron.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.neutron.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini\n              subPath: ml2_conf.ini\n              readOnly: true\n            {{- if ( has \"openvswitch\" .Values.network.backend ) }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/openvswitch_agent.ini\n              subPath: openvswitch_agent.ini\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/l3_agent.ini\n              subPath: l3_agent.ini\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/metadata_agent.ini\n              subPath: metadata_agent.ini\n              readOnly: true\n            - name: neutron-etc\n              # NOTE (Portdirect): We mount here to override Kollas\n              # custom sudoers file when using Kolla images, this\n              # location will also work fine for other images.\n              mountPath: /etc/sudoers.d/kolla_neutron_sudoers\n              subPath: neutron_sudoers\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/rootwrap.conf\n              subPath: rootwrap.conf\n              readOnly: true\n            {{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n            {{- if ( has \"l3_agent\" $value.pods ) }}\n            {{- $filePrefix := replace \"_\" \"-\"  $key }}\n            {{- $rootwrapFile := printf \"/etc/neutron/rootwrap.d/%s.filters\" $filePrefix }}\n            - name: neutron-etc\n              mountPath: {{ $rootwrapFile }}\n              subPath: {{ base $rootwrapFile }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            - name: libmodules\n              mountPath: /lib/modules\n              readOnly: true\n            - name: iptables-lockfile\n              mountPath: /run/xtables.lock\n            - name: socket\n              mountPath: /var/lib/neutron/openstack-helm\n            {{- if .Values.network.share_namespaces }}\n            - name: host-run-netns\n              mountPath: /run/netns\n              mountPropagation: Bidirectional\n            {{- end }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_neutron_l3_agent.volumeMounts }}{{ toYaml $mounts_neutron_l3_agent.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-var-neutron\n          emptyDir: {}\n        - name: neutron-bin\n          configMap:\n            name: neutron-bin\n            defaultMode: 0555\n        - name: neutron-etc\n          secret:\n            secretName: {{ $configMapName }}\n            defaultMode: 0444\n        - name: neutron-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: libmodules\n          hostPath:\n            path: /lib/modules\n        - name: iptables-lockfile\n          hostPath:\n            path: /run/xtables.lock\n            type: FileOrCreate\n        - name: pod-shared\n          emptyDir: {}\n        - name: socket\n          hostPath:\n            path: /var/lib/neutron/openstack-helm\n        {{- if .Values.network.share_namespaces }}\n        - name: host-run-netns\n          hostPath:\n            path: /run/netns\n        {{- end }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_neutron_l3_agent.volumes }}{{ toYaml $mounts_neutron_l3_agent.volumes | indent 8 }}{{ end }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.daemonset_l3_agent }}\n{{- $envAll := . }}\n{{- $daemonset := \"l3-agent\" }}\n{{- $configMapName := \"neutron-etc\" }}\n{{- $serviceAccountName := \"neutron-l3-agent\" }}\n{{- $dependencyOpts := dict \"envAll\" $envAll \"dependencyMixinParam\" $envAll.Values.network.backend \"dependencyKey\" \"l3\" -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_resolver\" $dependencyOpts | toString | fromYaml }}\n{{ tuple $envAll \"pod_dependency\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include \"neutron.l3_agent.daemonset\" | toString | fromYaml }}\n{{- $configmap_yaml := \"neutron.configmap.etc\" }}\n{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include \"helm-toolkit.utils.daemonset_overrides\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/daemonset-lb-agent.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"lbAgentReadinessProbeTemplate\" }}\nexec:\n  command:\n    - bash\n    - -c\n    - 'brctl show'\n{{- end }}\n\n{{- define \"neutron.lb_agent.daemonset\" }}\n{{- $daemonset := index . 0 }}\n{{- $configMapName := index . 1 }}\n{{- $serviceAccountName := index . 2 }}\n{{- $envAll := index . 3 }}\n{{- with $envAll }}\n\n{{- $mounts_neutron_lb_agent := .Values.pod.mounts.neutron_lb_agent.neutron_lb_agent }}\n{{- $mounts_neutron_lb_agent_init := .Values.pod.mounts.neutron_lb_agent.init_container }}\n{{- $etcSources := .Values.pod.etcSources.neutron_lb_agent }}\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"neutron-ks-etc\")) }}\n{{- end }}\n\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: neutron-lb-agent\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"neutron\" \"neutron-lb-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"neutron\" \"neutron-lb-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"lb_agent\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"neutron\" \"neutron-lb-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"neutron_lb_agent\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"neutron_lb_agent\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"neutron_lb_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"neutron_lb_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ if $envAll.Values.pod.tolerations.neutron.enabled }}\n{{ tuple $envAll \"neutron\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.lb.node_selector_key }}: {{ .Values.labels.lb.node_selector_value }}\n      dnsPolicy: ClusterFirstWithHostNet\n      hostNetwork: true\n      {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n      shareProcessNamespace: true\n      {{- else }}\n      hostPID: true\n      {{- end }}\n      initContainers:\n{{ tuple $envAll \"pod_dependency\" $mounts_neutron_lb_agent_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: neutron-lb-agent-kernel-modules\n{{ tuple $envAll \"neutron_linuxbridge_agent\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_lb_agent\" \"container\" \"neutron_lb_agent_kernel_modules\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/neutron-linuxbridge-agent-init-modules.sh\n          volumeMounts:\n            - name: neutron-bin\n              mountPath: /tmp/neutron-linuxbridge-agent-init-modules.sh\n              subPath: neutron-linuxbridge-agent-init-modules.sh\n              readOnly: true\n            - name: host-rootfs\n              mountPath: /mnt/host-rootfs\n              mountPropagation: HostToContainer\n              readOnly: true\n        - name: neutron-lb-agent-init\n{{ tuple $envAll \"neutron_linuxbridge_agent\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent.lb | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_lb_agent\" \"container\" \"neutron_lb_agent_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/neutron-linuxbridge-agent-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: neutron-bin\n              mountPath: /tmp/neutron-linuxbridge-agent-init.sh\n              subPath: neutron-linuxbridge-agent-init.sh\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini\n              subPath: ml2_conf.ini\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/linuxbridge_agent.ini\n              subPath: linuxbridge_agent.ini\n              readOnly: true\n            - name: neutron-etc\n              # NOTE (Portdirect): We mount here to override Kollas\n              # custom sudoers file when using Kolla images, this\n              # location will also work fine for other images.\n              mountPath: /etc/sudoers.d/kolla_neutron_sudoers\n              subPath: neutron_sudoers\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /tmp/auto_bridge_add\n              subPath: auto_bridge_add\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/rootwrap.conf\n              subPath: rootwrap.conf\n              readOnly: true\n            {{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n            {{- if ( has \"lb_agent\" $value.pods ) }}\n            {{- $filePrefix := replace \"_\" \"-\"  $key }}\n            {{- $rootwrapFile := printf \"/etc/neutron/rootwrap.d/%s.filters\" $filePrefix }}\n            - name: neutron-etc\n              mountPath: {{ $rootwrapFile }}\n              subPath: {{ base $rootwrapFile }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            - name: run\n              mountPath: /run\n{{ if $mounts_neutron_lb_agent.volumeMounts }}{{ toYaml $mounts_neutron_lb_agent.volumeMounts | indent 12 }}{{ end }}\n      containers:\n        - name: neutron-lb-agent\n{{ tuple $envAll \"neutron_linuxbridge_agent\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent.lb | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_lb_agent\" \"container\" \"neutron_lb_agent\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"lb_agent\" \"container\" \"lb_agent\" \"type\" \"readiness\" \"probeTemplate\" (include \"lbAgentReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          command:\n            - /tmp/neutron-linuxbridge-agent.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.neutron.oslo_concurrency.lock_path }}\n            - name: pod-var-neutron\n              mountPath: {{ .Values.conf.neutron.DEFAULT.state_path }}\n            - name: neutron-bin\n              mountPath: /tmp/neutron-linuxbridge-agent.sh\n              subPath: neutron-linuxbridge-agent.sh\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            {{- if .Values.conf.neutron.DEFAULT.log_config_append }}\n            - name: neutron-etc\n              mountPath: {{ .Values.conf.neutron.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.neutron.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini\n              subPath: ml2_conf.ini\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/linuxbridge_agent.ini\n              subPath: linuxbridge_agent.ini\n              readOnly: true\n            - name: neutron-etc\n              # NOTE (Portdirect): We mount here to override Kollas\n              # custom sudoers file when using Kolla images, this\n              # location will also work fine for other images.\n              mountPath: /etc/sudoers.d/kolla_neutron_sudoers\n              subPath: neutron_sudoers\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/rootwrap.conf\n              subPath: rootwrap.conf\n              readOnly: true\n            {{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n            {{- if ( has \"lb_agent\" $value.pods ) }}\n            {{- $filePrefix := replace \"_\" \"-\"  $key }}\n            {{- $rootwrapFile := printf \"/etc/neutron/rootwrap.d/%s.filters\" $filePrefix }}\n            - name: neutron-etc\n              mountPath: {{ $rootwrapFile }}\n              subPath: {{ base $rootwrapFile }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            - name: run\n              mountPath: /run\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_neutron_lb_agent.volumeMounts }}{{ toYaml $mounts_neutron_lb_agent.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-var-neutron\n          emptyDir: {}\n        - name: pod-shared\n          emptyDir: {}\n        - name: neutron-bin\n          configMap:\n            name: neutron-bin\n            defaultMode: 0555\n        - name: neutron-etc\n          secret:\n            secretName: {{ $configMapName }}\n            defaultMode: 0444\n        - name: neutron-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: run\n          hostPath:\n            path: /run\n        - name: host-rootfs\n          hostPath:\n            path: /\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_neutron_lb_agent.volumes }}{{ toYaml $mounts_neutron_lb_agent.volumes | indent 8 }}{{ end }}\n{{- end }}\n{{- end }}\n\n{{- if and .Values.manifests.daemonset_lb_agent ( has \"linuxbridge\" .Values.network.backend ) }}\n{{- $envAll := . }}\n{{- $daemonset := \"lb-agent\" }}\n{{- $configMapName := \"neutron-etc\" }}\n{{- $serviceAccountName := \"neutron-lb-agent\" }}\n{{- $dependencyOpts := dict \"envAll\" $envAll \"dependencyMixinParam\" $envAll.Values.network.backend \"dependencyKey\" \"lb_agent\" -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_resolver\" $dependencyOpts | toString | fromYaml }}\n{{ tuple $envAll \"pod_dependency\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include \"neutron.lb_agent.daemonset\" | toString | fromYaml }}\n{{- $configmap_yaml := \"neutron.configmap.etc\" }}\n{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include \"helm-toolkit.utils.daemonset_overrides\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/daemonset-metadata-agent.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadataAgentReadinessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/neutron/neutron.conf\n    - --config-file\n    - /etc/neutron/metadata_agent.ini\n{{- if .Values.pod.use_fqdn.neutron_agent }}\n    - --use-fqdn\n{{- end }}\n{{- end }}\n{{- define \"metadataAgentLivenessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/neutron/neutron.conf\n    - --config-file\n    - /etc/neutron/metadata_agent.ini\n    - --liveness-probe\n{{- if .Values.pod.use_fqdn.neutron_agent }}\n    - --use-fqdn\n{{- end }}\n{{- end }}\n\n{{- define \"neutron.metadata_agent.daemonset\" }}\n{{- $daemonset := index . 0 }}\n{{- $configMapName := index . 1 }}\n{{- $serviceAccountName := index . 2 }}\n{{- $envAll := index . 3 }}\n{{- with $envAll }}\n\n{{- $mounts_neutron_metadata_agent := .Values.pod.mounts.neutron_metadata_agent.neutron_metadata_agent }}\n{{- $mounts_neutron_metadata_agent_init := .Values.pod.mounts.neutron_metadata_agent.init_container }}\n{{- $etcSources := .Values.pod.etcSources.neutron_metadata_agent }}\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"neutron-ks-etc\")) }}\n{{- end }}\n\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: neutron-metadata-agent\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"neutron\" \"metadata-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"neutron\" \"metadata-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"metadata_agent\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"neutron\" \"metadata-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"neutron_metadata_agent\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"neutron-metadata-agent-default\" \"containerNames\" (list \"neutron-metadata-agent\" \"neutron-metadata-agent-init\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"neutron_metadata_agent\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"neutron_metadata_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"neutron_metadata_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ if $envAll.Values.pod.tolerations.neutron.enabled }}\n{{ tuple $envAll \"neutron\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.agent.metadata.node_selector_key }}: {{ .Values.labels.agent.metadata.node_selector_value }}\n      dnsPolicy: ClusterFirstWithHostNet\n      hostNetwork: true\n      {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n      shareProcessNamespace: true\n      {{- else }}\n      hostPID: true\n      {{- end }}\n      initContainers:\n{{ tuple $envAll \"pod_dependency\" $mounts_neutron_metadata_agent_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: neutron-metadata-agent-init\n{{ tuple $envAll \"neutron_metadata\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent.metadata | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_metadata_agent\" \"container\" \"neutron_metadata_agent_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: NEUTRON_USER_UID\n              value: \"{{ .Values.pod.security_context.neutron_metadata_agent.pod.runAsUser }}\"\n          command:\n            - /tmp/neutron-metadata-agent-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: neutron-bin\n              mountPath: /tmp/neutron-metadata-agent-init.sh\n              subPath: neutron-metadata-agent-init.sh\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            - name: socket\n              mountPath: /var/lib/neutron/openstack-helm\n      containers:\n        - name: neutron-metadata-agent\n{{ tuple $envAll \"neutron_metadata\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent.metadata | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n            - name: RPC_PROBE_TIMEOUT\n              value: \"{{ .Values.pod.probes.rpc_timeout }}\"\n            - name: RPC_PROBE_RETRIES\n              value: \"{{ .Values.pod.probes.rpc_retries }}\"\n{{ dict \"envAll\" $envAll \"component\" \"metadata_agent\" \"container\" \"metadata_agent\" \"type\" \"readiness\" \"probeTemplate\" (include \"metadataAgentReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"metadata_agent\" \"container\" \"metadata_agent\" \"type\" \"liveness\" \"probeTemplate\" (include \"metadataAgentLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          securityContext:\n            privileged: true\n          command:\n            - /tmp/neutron-metadata-agent.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.neutron.oslo_concurrency.lock_path }}\n            - name: pod-var-neutron\n              mountPath: {{ .Values.conf.neutron.DEFAULT.state_path }}\n            - name: neutron-bin\n              mountPath: /tmp/neutron-metadata-agent.sh\n              subPath: neutron-metadata-agent.sh\n              readOnly: true\n            - name: neutron-bin\n              mountPath: /tmp/health-probe.py\n              subPath: health-probe.py\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            {{- if .Values.conf.neutron.DEFAULT.log_config_append }}\n            - name: neutron-etc\n              mountPath: {{ .Values.conf.neutron.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.neutron.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini\n              subPath: ml2_conf.ini\n              readOnly: true\n            {{- if ( has \"openvswitch\" .Values.network.backend ) }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/openvswitch_agent.ini\n              subPath: openvswitch_agent.ini\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/metadata_agent.ini\n              subPath: metadata_agent.ini\n              readOnly: true\n            - name: neutron-etc\n              # NOTE (Portdirect): We mount here to override Kollas\n              # custom sudoers file when using Kolla images, this\n              # location will also work fine for other images.\n              mountPath: /etc/sudoers.d/kolla_neutron_sudoers\n              subPath: neutron_sudoers\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/rootwrap.conf\n              subPath: rootwrap.conf\n              readOnly: true\n            {{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n            {{- if ( has \"metadata_agent\" $value.pods ) }}\n            {{- $filePrefix := replace \"_\" \"-\"  $key }}\n            {{- $rootwrapFile := printf \"/etc/neutron/rootwrap.d/%s.filters\" $filePrefix }}\n            - name: neutron-etc\n              mountPath: {{ $rootwrapFile }}\n              subPath: {{ base $rootwrapFile }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            - name: socket\n              mountPath: /var/lib/neutron/openstack-helm\n            {{- if .Values.network.share_namespaces }}\n            - name: host-run-netns\n              mountPath: /run/netns\n              mountPropagation: Bidirectional\n            {{- end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.compute_metadata.metadata.internal | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_neutron_metadata_agent.volumeMounts }}{{ toYaml $mounts_neutron_metadata_agent.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-var-neutron\n          emptyDir: {}\n        - name: neutron-bin\n          configMap:\n            name: neutron-bin\n            defaultMode: 0555\n        - name: neutron-etc\n          secret:\n            secretName: {{ $configMapName }}\n            defaultMode: 0444\n        - name: neutron-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: socket\n          hostPath:\n            path: /var/lib/neutron/openstack-helm\n        {{- if .Values.network.share_namespaces }}\n        - name: host-run-netns\n          hostPath:\n            path: /run/netns\n        {{- end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.compute_metadata.metadata.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_neutron_metadata_agent.volumes }}{{ toYaml $mounts_neutron_metadata_agent.volumes | indent 8 }}{{ end }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.daemonset_metadata_agent }}\n{{- $envAll := . }}\n{{- $daemonset := \"metadata-agent\" }}\n{{- $configMapName := \"neutron-etc\" }}\n{{- $serviceAccountName := \"neutron-metadata-agent\" }}\n{{- $dependencyOpts := dict \"envAll\" $envAll \"dependencyMixinParam\" $envAll.Values.network.backend \"dependencyKey\" \"metadata\" -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_resolver\" $dependencyOpts | toString | fromYaml }}\n{{ tuple $envAll \"pod_dependency\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include \"neutron.metadata_agent.daemonset\" | toString | fromYaml }}\n{{- $configmap_yaml := \"neutron.configmap.etc\" }}\n{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include \"helm-toolkit.utils.daemonset_overrides\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/daemonset-netns-cleanup-cron.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"neutron.netns_cleanup_cron.daemonset\" }}\n{{- $daemonset := index . 0 }}\n{{- $configMapName := index . 1 }}\n{{- $serviceAccountName := index . 2 }}\n{{- $envAll := index . 3 }}\n{{- with $envAll }}\n\n{{- $mounts_neutron_netns_cleanup_cron := .Values.pod.mounts.neutron_netns_cleanup_cron.neutron_netns_cleanup_cron }}\n{{- $mounts_neutron_netns_cleanup_cron_init := .Values.pod.mounts.neutron_netns_cleanup_cron.init_container }}\n{{- $etcSources := .Values.pod.etcSources.neutron_netns_cleanup_cron }}\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"neutron-ks-etc\")) }}\n{{- end }}\n\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: neutron-netns-cleanup-cron\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"neutron\" \"netns-cleanup-cron\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"neutron\" \"netns-cleanup-cron\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"netns_cleanup_cron\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"neutron\" \"netns-cleanup-cron\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"neutron_netns_cleanup_cron\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"neutron-netns-cleanup-cron-default\" \"containerNames\" (list \"neutron-netns-cleanup-cron\" \"init\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"neutron_netns_cleanup_cron\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"neutron_netns_cleanup_cron\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"neutron_netns_cleanup_cron\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ if $envAll.Values.pod.tolerations.neutron.enabled }}\n{{ tuple $envAll \"neutron\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.netns_cleanup_cron.node_selector_key }}: {{ .Values.labels.netns_cleanup_cron.node_selector_value }}\n      dnsPolicy: ClusterFirstWithHostNet\n      hostNetwork: true\n      {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n      shareProcessNamespace: true\n      {{- else }}\n      hostPID: true\n      {{- end }}\n      initContainers:\n{{ tuple $envAll \"pod_dependency\" $mounts_neutron_netns_cleanup_cron_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: neutron-netns-cleanup-cron\n{{ tuple $envAll \"neutron_netns_cleanup_cron\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.netns_cleanup_cron | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_netns_cleanup_cron\" \"container\" \"neutron_netns_cleanup_cron\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/neutron-netns-cleanup-cron.sh\n          env:\n{{- with $env := dict \"ksUserSecret\" $envAll.Values.secrets.identity.admin \"useCA\" false }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.neutron.oslo_concurrency.lock_path }}\n            - name: neutron-bin\n              mountPath: /tmp/neutron-netns-cleanup-cron.sh\n              subPath: neutron-netns-cleanup-cron.sh\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            {{- if .Values.conf.neutron.DEFAULT.log_config_append }}\n            - name: neutron-etc\n              mountPath: {{ .Values.conf.neutron.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.neutron.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/dhcp_agent.ini\n              subPath: dhcp_agent.ini\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/l3_agent.ini\n              subPath: l3_agent.ini\n              readOnly: true\n            - name: neutron-etc\n              # NOTE (Portdirect): We mount here to override Kollas\n              # custom sudoers file when using Kolla images, this\n              # location will also work fine for other images.\n              mountPath: /etc/sudoers.d/kolla_neutron_sudoers\n              subPath: neutron_sudoers\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/rootwrap.conf\n              subPath: rootwrap.conf\n              readOnly: true\n            {{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n            {{- if ( has \"netns_cleanup_cron\" $value.pods ) }}\n            {{- $filePrefix := replace \"_\" \"-\"  $key }}\n            {{- $rootwrapFile := printf \"/etc/neutron/rootwrap.d/%s.filters\" $filePrefix }}\n            - name: neutron-etc\n              mountPath: {{ $rootwrapFile }}\n              subPath: {{ base $rootwrapFile }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            - name: libmodules\n              mountPath: /lib/modules\n              readOnly: true\n            - name: iptables-lockfile\n              mountPath: /run/xtables.lock\n            - name: socket\n              mountPath: /var/lib/neutron/openstack-helm\n            {{- if .Values.network.share_namespaces }}\n            - name: host-run-netns\n              mountPath: /run/netns\n              mountPropagation: Bidirectional\n            {{- end }}\n{{ if $mounts_neutron_netns_cleanup_cron.volumeMounts }}{{ toYaml $mounts_neutron_netns_cleanup_cron.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-var-neutron\n          emptyDir: {}\n        - name: neutron-bin\n          configMap:\n            name: neutron-bin\n            defaultMode: 0555\n        - name: neutron-etc\n          secret:\n            secretName: {{ $configMapName }}\n            defaultMode: 0444\n        - name: neutron-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: libmodules\n          hostPath:\n            path: /lib/modules\n        - name: iptables-lockfile\n          hostPath:\n            path: /run/xtables.lock\n            type: FileOrCreate\n        - name: socket\n          hostPath:\n            path: /var/lib/neutron/openstack-helm\n        {{- if .Values.network.share_namespaces }}\n        - name: host-run-netns\n          hostPath:\n            path: /run/netns\n        {{- end }}\n#{{ if $mounts_neutron_netns_cleanup_cron.volumes }}{{ toYaml $mounts_neutron_netns_cleanup_cron.volumes | indent 8 }}{{ end }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.daemonset_netns_cleanup_cron}}\n{{- $envAll := . }}\n{{- $daemonset := \"netns-cleanup-cron\" }}\n{{- $configMapName := \"neutron-etc\" }}\n{{- $serviceAccountName := \"neutron-netns-cleanup-cron\" }}\n{{- $dependencyOpts := dict \"envAll\" $envAll \"dependencyMixinParam\" $envAll.Values.network.backend \"dependencyKey\" \"netns_cleanup_cron\" -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_resolver\" $dependencyOpts | toString | fromYaml }}\n{{ tuple $envAll \"pod_dependency\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include \"neutron.netns_cleanup_cron.daemonset\" | toString | fromYaml }}\n{{- $configmap_yaml := \"neutron.configmap.etc\" }}\n{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include \"helm-toolkit.utils.daemonset_overrides\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/daemonset-neutron-ovn-vpn-agent.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"ovnVPNAgentReadinessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/neutron/neutron.conf\n    - --config-file\n    - /etc/neutron/neutron_vpnaas.conf\n    - --config-file\n    - /etc/neutron/neutron_ovn_vpn_agent.ini\n{{- if .Values.pod.use_fqdn.neutron_agent }}\n    - --use-fqdn\n{{- end }}\n{{- end }}\n{{- define \"ovnVPNAgentLivenessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/neutron/neutron.conf\n    - --config-file\n    - /etc/neutron/neutron_vpnaas.conf\n    - --config-file\n    - /etc/neutron/neutron_ovn_vpn_agent.ini\n    - --liveness-probe\n{{- if .Values.pod.use_fqdn.neutron_agent }}\n    - --use-fqdn\n{{- end }}\n{{- end }}\n\n{{- define \"neutron.ovn_vpn_agent.daemonset\" }}\n{{- $daemonset := index . 0 }}\n{{- $configMapName := index . 1 }}\n{{- $serviceAccountName := index . 2 }}\n{{- $envAll := index . 3 }}\n{{- with $envAll }}\n\n{{- $mounts_ovn_vpn_agent := .Values.pod.mounts.ovn_vpn_agent.ovn_vpn_agent }}\n{{- $mounts_ovn_vpn_agent_init := .Values.pod.mounts.ovn_vpn_agent.init_container }}\n{{- $etcSources := .Values.pod.etcSources.ovn_vpn_agent }}\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"neutron-ks-etc\")) }}\n{{- end }}\n\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: neutron-ovn-vpn-agent\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"neutron\" \"ovn-vpn-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"neutron\" \"ovn-vpn-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"ovn_vpn_agent\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"neutron\" \"ovn-vpn-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"ovn_vpn_agent\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"ovn_vpn_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"ovn_vpn_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ if $envAll.Values.pod.tolerations.neutron.enabled }}\n{{ tuple $envAll \"neutron\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.agent.ovn_vpn.node_selector_key }}: {{ .Values.labels.agent.ovn_vpn.node_selector_value }}\n      dnsPolicy: ClusterFirstWithHostNet\n      hostNetwork: true\n      {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n      shareProcessNamespace: true\n      {{- else }}\n      hostPID: true\n      {{- end }}\n      initContainers:\n{{ tuple $envAll \"pod_dependency\" $mounts_ovn_vpn_agent_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: ovn-vpn-agent-init\n{{ tuple $envAll \"neutron_ovn_vpn\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent.ovn_vpn | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"ovn_vpn_agent\" \"container\" \"ovn_vpn_agent_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: NEUTRON_USER_UID\n              value: \"{{ .Values.pod.security_context.ovn_vpn_agent.pod.runAsUser }}\"\n          command:\n            - /tmp/neutron-ovn-vpn-agent-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: neutron-bin\n              mountPath: /tmp/neutron-ovn-vpn-agent-init.sh\n              subPath: neutron-ovn-vpn-agent-init.sh\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            - name: socket\n              mountPath: /var/lib/neutron/openstack-helm\n        - name: ovn-neutron-init\n{{ tuple $envAll \"neutron_ovn_vpn\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent.ovn_vpn | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"ovn_vpn_agent\" \"container\" \"ovn_vpn_agent_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/neutron-ovn-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: neutron-bin\n              mountPath: /tmp/neutron-ovn-init.sh\n              subPath: neutron-ovn-init.sh\n              readOnly: true\n      containers:\n        - name: neutron-ovn-vpn-agent\n{{ tuple $envAll \"neutron_ovn_vpn\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent.ovn_vpn | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n            - name: RPC_PROBE_TIMEOUT\n              value: \"{{ .Values.pod.probes.rpc_timeout }}\"\n            - name: RPC_PROBE_RETRIES\n              value: \"{{ .Values.pod.probes.rpc_retries }}\"\n{{ dict \"envAll\" $envAll \"component\" \"ovn_vpn_agent\" \"container\" \"ovn_vpn_agent\" \"type\" \"readiness\" \"probeTemplate\" (include \"ovnVPNAgentReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"ovn_vpn_agent\" \"container\" \"ovn_vpn_agent\" \"type\" \"liveness\" \"probeTemplate\" (include \"ovnVPNAgentLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          securityContext:\n            privileged: true\n          command:\n            - /tmp/neutron-ovn-vpn-agent.sh\n          volumeMounts:\n            - name: run\n              mountPath: /run\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.neutron.oslo_concurrency.lock_path }}\n            - name: pod-var-neutron\n              mountPath: {{ .Values.conf.neutron.DEFAULT.state_path }}\n            - name: neutron-bin\n              mountPath: /tmp/neutron-ovn-vpn-agent.sh\n              subPath: neutron-ovn-vpn-agent.sh\n              readOnly: true\n            - name: neutron-bin\n              mountPath: /tmp/health-probe.py\n              subPath: health-probe.py\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            {{- if .Values.conf.neutron.DEFAULT.log_config_append }}\n            - name: neutron-etc\n              mountPath: {{ .Values.conf.neutron.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.neutron.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini\n              subPath: ml2_conf.ini\n              readOnly: true\n            {{- if ( has \"openvswitch\" .Values.network.backend ) }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/openvswitch_agent.ini\n              subPath: openvswitch_agent.ini\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron_vpnaas.conf\n              subPath: neutron_vpnaas.conf\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron_ovn_vpn_agent.ini\n              subPath: neutron_ovn_vpn_agent.ini\n              readOnly: true\n            - name: neutron-etc\n              # NOTE (Portdirect): We mount here to override Kollas\n              # custom sudoers file when using Kolla images, this\n              # location will also work fine for other images.\n              mountPath: /etc/sudoers.d/kolla_neutron_sudoers\n              subPath: neutron_sudoers\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/rootwrap.conf\n              subPath: rootwrap.conf\n              readOnly: true\n            {{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n            {{- if ( has \"ovn_vpn_agent\" $value.pods ) }}\n            {{- $filePrefix := replace \"_\" \"-\"  $key }}\n            {{- $rootwrapFile := printf \"/etc/neutron/rootwrap.d/%s.filters\" $filePrefix }}\n            - name: neutron-etc\n              mountPath: {{ $rootwrapFile }}\n              subPath: {{ base $rootwrapFile }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            - name: socket\n              mountPath: /var/lib/neutron/openstack-helm\n            {{- if .Values.network.share_namespaces }}\n            - name: host-run-netns\n              mountPath: /run/netns\n              mountPropagation: Bidirectional\n            {{- end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.compute_metadata.metadata.internal | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_ovn_vpn_agent.volumeMounts }}{{ toYaml $mounts_ovn_vpn_agent.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-var-neutron\n          emptyDir: {}\n        - name: run\n          hostPath:\n            path: /run\n        - name: neutron-bin\n          configMap:\n            name: neutron-bin\n            defaultMode: 0555\n        - name: neutron-etc\n          secret:\n            secretName: {{ $configMapName }}\n            defaultMode: 0444\n        - name: neutron-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: socket\n          hostPath:\n            path: /var/lib/neutron/openstack-helm\n        {{- if .Values.network.share_namespaces }}\n        - name: host-run-netns\n          hostPath:\n            path: /run/netns\n        {{- end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.compute_metadata.metadata.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_ovn_vpn_agent.volumes }}{{ toYaml $mounts_ovn_vpn_agent.volumes | indent 8 }}{{ end }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.daemonset_ovn_vpn_agent }}\n{{- $envAll := . }}\n{{- $daemonset := \"ovn-vpn-agent\" }}\n{{- $configMapName := \"neutron-etc\" }}\n{{- $serviceAccountName := \"neutron-ovn-vpn-agent\" }}\n{{- $dependencyOpts := dict \"envAll\" $envAll \"dependencyMixinParam\" $envAll.Values.network.backend \"dependencyKey\" \"ovn_vpn_agent\" -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_resolver\" $dependencyOpts | toString | fromYaml }}\n{{ tuple $envAll \"pod_dependency\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include \"neutron.ovn_vpn_agent.daemonset\" | toString | fromYaml }}\n{{- $configmap_yaml := \"neutron.configmap.etc\" }}\n{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include \"helm-toolkit.utils.daemonset_overrides\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/daemonset-ovn-metadata-agent.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"ovnMetadataAgentReadinessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/neutron/neutron.conf\n    - --config-file\n    - /etc/neutron/ovn_metadata_agent.ini\n{{- if .Values.pod.use_fqdn.neutron_agent }}\n    - --use-fqdn\n{{- end }}\n{{- end }}\n{{- define \"ovnMetadataAgentLivenessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/neutron/neutron.conf\n    - --config-file\n    - /etc/neutron/ovn_metadata_agent.ini\n    - --liveness-probe\n{{- if .Values.pod.use_fqdn.neutron_agent }}\n    - --use-fqdn\n{{- end }}\n{{- end }}\n\n{{- define \"neutron.ovn_metadata_agent.daemonset\" }}\n{{- $daemonset := index . 0 }}\n{{- $configMapName := index . 1 }}\n{{- $serviceAccountName := index . 2 }}\n{{- $envAll := index . 3 }}\n{{- with $envAll }}\n\n{{- $mounts_neutron_ovn_metadata_agent := .Values.pod.mounts.neutron_ovn_metadata_agent.neutron_ovn_metadata_agent }}\n{{- $mounts_neutron_ovn_metadata_agent_init := .Values.pod.mounts.neutron_ovn_metadata_agent.init_container }}\n{{- $etcSources := .Values.pod.etcSources.neutron_ovn_metadata_agent }}\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"neutron-ks-etc\")) }}\n{{- end }}\n\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: neutron-ovn-metadata-agent\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"neutron\" \"ovn-metadata-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"neutron\" \"ovn-metadata-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"ovn_metadata_agent\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"neutron\" \"ovn-metadata-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"neutron_ovn_metadata_agent\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"neutron-ovn-metadata-agent-default\" \"containerNames\" (list \"neutron-ovn-metadata-agent\" \"neutron-ovn-metadata-agent-init\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"neutron_ovn_metadata_agent\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"neutron_ovn_metadata_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"neutron_ovn_metadata_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ if $envAll.Values.pod.tolerations.neutron.enabled }}\n{{ tuple $envAll \"neutron\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.ovs.node_selector_key }}: {{ .Values.labels.ovs.node_selector_value }}\n      dnsPolicy: ClusterFirstWithHostNet\n      hostNetwork: true\n      {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n      shareProcessNamespace: true\n      {{- else }}\n      hostPID: true\n      {{- end }}\n      initContainers:\n{{ tuple $envAll \"pod_dependency\" $mounts_neutron_ovn_metadata_agent_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: neutron-metadata-agent-init\n{{ tuple $envAll \"neutron_metadata\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent.ovn_metadata | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_metadata_agent\" \"container\" \"neutron_metadata_agent_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: NEUTRON_USER_UID\n              value: \"{{ .Values.pod.security_context.neutron_metadata_agent.pod.runAsUser }}\"\n          command:\n            - /tmp/neutron-metadata-agent-init.sh\n          volumeMounts:\n            - name: run-openvswitch\n              mountPath: /run/openvswitch\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: neutron-bin\n              mountPath: /tmp/neutron-metadata-agent-init.sh\n              subPath: neutron-metadata-agent-init.sh\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            - name: socket\n              mountPath: /var/lib/neutron/openstack-helm\n        - name: ovn-neutron-init\n{{ tuple $envAll \"neutron_metadata\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent.ovn_metadata | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_metadata_agent\" \"container\" \"neutron_metadata_agent_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/neutron-ovn-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: neutron-bin\n              mountPath: /tmp/neutron-ovn-init.sh\n              subPath: neutron-ovn-init.sh\n              readOnly: true\n      containers:\n        - name: neutron-ovn-metadata-agent\n{{ tuple $envAll \"neutron_metadata\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent.ovn_metadata | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n            - name: RPC_PROBE_TIMEOUT\n              value: \"{{ .Values.pod.probes.rpc_timeout }}\"\n            - name: RPC_PROBE_RETRIES\n              value: \"{{ .Values.pod.probes.rpc_retries }}\"\n{{ dict \"envAll\" $envAll \"component\" \"ovn_metadata_agent\" \"container\" \"ovn_metadata_agent\" \"type\" \"readiness\" \"probeTemplate\" (include \"ovnMetadataAgentReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"ovn_metadata_agent\" \"container\" \"ovn_metadata_agent\" \"type\" \"liveness\" \"probeTemplate\" (include \"ovnMetadataAgentLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          securityContext:\n            privileged: true\n          command:\n            - /tmp/neutron-ovn-metadata-agent.sh\n          volumeMounts:\n            - name: run-openvswitch\n              mountPath: /run/openvswitch\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.neutron.oslo_concurrency.lock_path }}\n            - name: pod-var-neutron\n              mountPath: {{ .Values.conf.neutron.DEFAULT.state_path }}\n            - name: neutron-bin\n              mountPath: /tmp/neutron-ovn-metadata-agent.sh\n              subPath: neutron-ovn-metadata-agent.sh\n              readOnly: true\n            - name: neutron-bin\n              mountPath: /tmp/health-probe.py\n              subPath: health-probe.py\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            {{- if .Values.conf.neutron.DEFAULT.log_config_append }}\n            - name: neutron-etc\n              mountPath: {{ .Values.conf.neutron.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.neutron.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini\n              subPath: ml2_conf.ini\n              readOnly: true\n            {{- if ( has \"openvswitch\" .Values.network.backend ) }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/openvswitch_agent.ini\n              subPath: openvswitch_agent.ini\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/ovn_metadata_agent.ini\n              subPath: ovn_metadata_agent.ini\n              readOnly: true\n            - name: neutron-etc\n              # NOTE (Portdirect): We mount here to override Kollas\n              # custom sudoers file when using Kolla images, this\n              # location will also work fine for other images.\n              mountPath: /etc/sudoers.d/kolla_neutron_sudoers\n              subPath: neutron_sudoers\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/rootwrap.conf\n              subPath: rootwrap.conf\n              readOnly: true\n            {{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n            {{- if ( has \"ovn_metadata_agent\" $value.pods ) }}\n            {{- $filePrefix := replace \"_\" \"-\"  $key }}\n            {{- $rootwrapFile := printf \"/etc/neutron/rootwrap.d/%s.filters\" $filePrefix }}\n            - name: neutron-etc\n              mountPath: {{ $rootwrapFile }}\n              subPath: {{ base $rootwrapFile }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            - name: socket\n              mountPath: /var/lib/neutron/openstack-helm\n            {{- if .Values.network.share_namespaces }}\n            - name: host-run-netns\n              mountPath: /run/netns\n              mountPropagation: Bidirectional\n            {{- end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.compute_metadata.metadata.internal | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_neutron_ovn_metadata_agent.volumeMounts }}{{ toYaml $mounts_neutron_ovn_metadata_agent.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-var-neutron\n          emptyDir: {}\n        - name: run-openvswitch\n          hostPath:\n            path: /run/openvswitch\n        - name: neutron-bin\n          configMap:\n            name: neutron-bin\n            defaultMode: 0555\n        - name: neutron-etc\n          secret:\n            secretName: {{ $configMapName }}\n            defaultMode: 0444\n        - name: neutron-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: socket\n          hostPath:\n            path: /var/lib/neutron/openstack-helm\n        {{- if .Values.network.share_namespaces }}\n        - name: host-run-netns\n          hostPath:\n            path: /run/netns\n        {{- end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.compute_metadata.metadata.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_neutron_ovn_metadata_agent.volumes }}{{ toYaml $mounts_neutron_ovn_metadata_agent.volumes | indent 8 }}{{ end }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.daemonset_ovn_metadata_agent }}\n{{- $envAll := . }}\n{{- $daemonset := \"ovn-metadata-agent\" }}\n{{- $configMapName := \"neutron-etc\" }}\n{{- $serviceAccountName := \"neutron-ovn-metadata-agent\" }}\n{{- $dependencyOpts := dict \"envAll\" $envAll \"dependencyMixinParam\" $envAll.Values.network.backend \"dependencyKey\" \"ovn_metadata\" -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_resolver\" $dependencyOpts | toString | fromYaml }}\n{{ tuple $envAll \"pod_dependency\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include \"neutron.ovn_metadata_agent.daemonset\" | toString | fromYaml }}\n{{- $configmap_yaml := \"neutron.configmap.etc\" }}\n{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include \"helm-toolkit.utils.daemonset_overrides\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/daemonset-ovs-agent.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"ovsAgentReadinessProbeTemplate\" }}\nexec:\n  command:\n    - /tmp/neutron-openvswitch-agent-readiness.sh\n{{- end }}\n{{- define \"ovsAgentLivenessProbeTemplate\" }}\nexec:\n  command:\n    - /tmp/neutron-openvswitch-agent-liveness.sh\n{{- end }}\n\n{{- define \"neutron.ovs_agent.daemonset\" }}\n{{- $daemonset := index . 0 }}\n{{- $configMapName := index . 1 }}\n{{- $serviceAccountName := index . 2 }}\n{{- $envAll := index . 3 }}\n{{- with $envAll }}\n\n{{- $mounts_neutron_ovs_agent := .Values.pod.mounts.neutron_ovs_agent.neutron_ovs_agent }}\n{{- $mounts_neutron_ovs_agent_init := .Values.pod.mounts.neutron_ovs_agent.init_container }}\n{{- $etcSources := .Values.pod.etcSources.neutron_ovs_agent }}\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"neutron-ks-etc\")) }}\n{{- end }}\n\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: neutron-ovs-agent\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"neutron\" \"neutron-ovs-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"neutron\" \"neutron-ovs-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"ovs_agent\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"neutron\" \"neutron-ovs-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"neutron_ovs_agent\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"$configMapName\" \"containerNames\" (list \"neutron-ovs-agent\" \"init\" \"neutron-openvswitch-agent-kernel-modules\" \"neutron-ovs-agent-init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"neutron_ovs_agent\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"neutron_ovs_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"neutron_ovs_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      nodeSelector:\n        {{ .Values.labels.ovs.node_selector_key }}: {{ .Values.labels.ovs.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.neutron.enabled }}\n{{ tuple $envAll \"neutron\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      dnsPolicy: ClusterFirstWithHostNet\n      hostNetwork: true\n      {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n      shareProcessNamespace: true\n      {{- else }}\n      hostPID: true\n      {{- end }}\n      initContainers:\n{{ tuple $envAll \"pod_dependency\" $mounts_neutron_ovs_agent_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: neutron-openvswitch-agent-kernel-modules\n{{ tuple $envAll \"neutron_openvswitch_agent\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_ovs_agent\" \"container\" \"neutron_openvswitch_agent_kernel_modules\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/neutron-openvswitch-agent-init-modules.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: neutron-bin\n              mountPath: /tmp/neutron-openvswitch-agent-init-modules.sh\n              subPath: neutron-openvswitch-agent-init-modules.sh\n              readOnly: true\n            - name: host-rootfs\n              mountPath: /mnt/host-rootfs\n              mountPropagation: HostToContainer\n              readOnly: true\n{{- if .Values.conf.ovs_dpdk.enabled }}\n            - name: pci-devices\n              mountPath: /sys/bus/pci/devices\n{{- end }}\n{{- if .Values.conf.netoffload.enabled }}\n        - name: netoffload\n{{ tuple $envAll \"netoffload\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_ovs_agent\" \"container\" \"netoffload\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/neutron-openvswitch-agent-init-netoffload.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: neutron-bin\n              mountPath: /tmp/neutron-openvswitch-agent-init-netoffload.sh\n              subPath: neutron-openvswitch-agent-init-netoffload.sh\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /tmp/netoffload\n              subPath: netoffload\n              readOnly: true\n            - name: run\n              mountPath: /run\n{{- end }}\n        - name: neutron-ovs-agent-init\n{{ tuple $envAll \"neutron_openvswitch_agent\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent.ovs | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_ovs_agent\" \"container\" \"neutron_ovs_agent_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          {{- if .Values.conf.ovs_dpdk.enabled }}\n          env:\n            - name: UPDATE_DPDK_BOND_CONFIG\n              valueFrom:\n                secretKeyRef:\n                  name: {{ $configMapName }}\n                  key: update_dpdk_bond_config\n          {{- end }}\n          command:\n            - /tmp/neutron-openvswitch-agent-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: neutron-bin\n              mountPath: /tmp/neutron-openvswitch-agent-init.sh\n              subPath: neutron-openvswitch-agent-init.sh\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini\n              subPath: ml2_conf.ini\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/openvswitch_agent.ini\n              subPath: openvswitch_agent.ini\n              readOnly: true\n            {{- if .Values.conf.neutron.DEFAULT.log_config_append }}\n            - name: neutron-etc\n              mountPath: {{ .Values.conf.neutron.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.neutron.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            {{- if .Values.conf.plugins.taas.taas.enabled }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/taas.ini\n              subPath: taas.ini\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              # NOTE (Portdirect): We mount here to override Kollas\n              # custom sudoers file when using Kolla images, this\n              # location will also work fine for other images.\n              mountPath: /etc/sudoers.d/kolla_neutron_sudoers\n              subPath: neutron_sudoers\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /tmp/auto_bridge_add\n              subPath: auto_bridge_add\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/rootwrap.conf\n              subPath: rootwrap.conf\n              readOnly: true\n            {{- if .Values.conf.ovs_dpdk.enabled }}\n            - name: neutron-etc\n              mountPath: /tmp/dpdk.conf\n              subPath: dpdk.conf\n              readOnly: true\n            {{- end }}\n            {{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n            {{- if ( has \"ovs_agent\" $value.pods ) }}\n            {{- $filePrefix := replace \"_\" \"-\"  $key }}\n            {{- $rootwrapFile := printf \"/etc/neutron/rootwrap.d/%s.filters\" $filePrefix }}\n            - name: neutron-etc\n              mountPath: {{ $rootwrapFile }}\n              subPath: {{ base $rootwrapFile }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            - name: run\n              mountPath: /run\n{{ if $mounts_neutron_ovs_agent.volumeMounts }}{{ toYaml $mounts_neutron_ovs_agent.volumeMounts | indent 12 }}{{ end }}\n      containers:\n        - name: neutron-ovs-agent\n{{ tuple $envAll \"neutron_openvswitch_agent\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent.ovs | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n            - name: RPC_PROBE_TIMEOUT\n              value: \"{{ .Values.pod.probes.rpc_timeout }}\"\n            - name: RPC_PROBE_RETRIES\n              value: \"{{ .Values.pod.probes.rpc_retries }}\"\n{{ dict \"envAll\" $envAll \"component\" \"ovs_agent\" \"container\" \"ovs_agent\" \"type\" \"readiness\" \"probeTemplate\" (include \"ovsAgentReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"ovs_agent\" \"container\" \"ovs_agent\" \"type\" \"liveness\" \"probeTemplate\" (include \"ovsAgentLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_ovs_agent\" \"container\" \"neutron_ovs_agent\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/neutron-openvswitch-agent.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.neutron.oslo_concurrency.lock_path }}\n            - name: pod-var-neutron\n              mountPath: {{ .Values.conf.neutron.DEFAULT.state_path }}\n            - name: neutron-bin\n              mountPath: /tmp/neutron-openvswitch-agent.sh\n              subPath: neutron-openvswitch-agent.sh\n              readOnly: true\n            - name: neutron-bin\n              mountPath: /tmp/neutron-openvswitch-agent-readiness.sh\n              subPath: neutron-openvswitch-agent-readiness.sh\n              readOnly: true\n            - name: neutron-bin\n              mountPath: /tmp/neutron-openvswitch-agent-liveness.sh\n              subPath: neutron-openvswitch-agent-liveness.sh\n              readOnly: true\n            - name: neutron-bin\n              mountPath: /tmp/health-probe.py\n              subPath: health-probe.py\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            {{- if .Values.conf.neutron.DEFAULT.log_config_append }}\n            - name: neutron-etc\n              mountPath: {{ .Values.conf.neutron.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.neutron.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini\n              subPath: ml2_conf.ini\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/openvswitch_agent.ini\n              subPath: openvswitch_agent.ini\n              readOnly: true\n            {{- if .Values.conf.plugins.taas.taas.enabled }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/taas.ini\n              subPath: taas.ini\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              # NOTE (Portdirect): We mount here to override Kollas\n              # custom sudoers file when using Kolla images, this\n              # location will also work fine for other images.\n              mountPath: /etc/sudoers.d/kolla_neutron_sudoers\n              subPath: neutron_sudoers\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/rootwrap.conf\n              subPath: rootwrap.conf\n              readOnly: true\n            {{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n            {{- if ( has \"ovs_agent\" $value.pods ) }}\n            {{- $filePrefix := replace \"_\" \"-\"  $key }}\n            {{- $rootwrapFile := printf \"/etc/neutron/rootwrap.d/%s.filters\" $filePrefix }}\n            - name: neutron-etc\n              mountPath: {{ $rootwrapFile }}\n              subPath: {{ base $rootwrapFile }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            - name: run\n              mountPath: /run\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_neutron_ovs_agent.volumeMounts }}{{ toYaml $mounts_neutron_ovs_agent.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-var-neutron\n          emptyDir: {}\n        - name: varlibopenvswitch\n          emptyDir: {}\n        - name: pod-shared\n          emptyDir: {}\n        - name: neutron-bin\n          configMap:\n            name: neutron-bin\n            defaultMode: 0555\n        - name: neutron-etc\n          secret:\n            secretName: {{ $configMapName }}\n            defaultMode: 0444\n        - name: neutron-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: run\n          hostPath:\n            path: /run\n        - name: host-rootfs\n          hostPath:\n            path: /\n{{- if .Values.conf.ovs_dpdk.enabled }}\n        - name: pci-devices\n          hostPath:\n            path: /sys/bus/pci/devices\n            type: Directory\n{{- end }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_neutron_ovs_agent.volumes }}{{ toYaml $mounts_neutron_ovs_agent.volumes | indent 8 }}{{ end }}\n{{- end }}\n{{- end }}\n\n{{- if and .Values.manifests.daemonset_ovs_agent ( has \"openvswitch\" .Values.network.backend ) }}\n{{- $envAll := . }}\n{{- $daemonset := \"ovs-agent\" }}\n{{- $configMapName := \"neutron-etc\" }}\n{{- $serviceAccountName := \"neutron-ovs-agent\" }}\n{{- $dependencyOpts := dict \"envAll\" $envAll \"dependencyMixinParam\" $envAll.Values.network.backend \"dependencyKey\" \"ovs_agent\" -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_resolver\" $dependencyOpts | toString | fromYaml }}\n{{ tuple $envAll \"pod_dependency\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include \"neutron.ovs_agent.daemonset\" | toString | fromYaml }}\n{{- $configmap_yaml := \"neutron.configmap.etc\" }}\n{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include \"helm-toolkit.utils.daemonset_overrides\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/daemonset-sriov-agent.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"sriovAgentReadinessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/neutron/neutron.conf\n    - --config-file\n    - /etc/neutron/sriov_agent.ini\n{{- if .Values.pod.use_fqdn.neutron_agent }}\n    - --use-fqdn\n{{- end }}\n{{- end }}\n\n{{- define \"neutron.sriov_agent.daemonset\" }}\n{{- $daemonset := index . 0 }}\n{{- $configMapName := index . 1 }}\n{{- $serviceAccountName := index . 2 }}\n{{- $envAll := index . 3 }}\n{{- with $envAll }}\n\n{{- $mounts_neutron_sriov_agent := .Values.pod.mounts.neutron_sriov_agent.neutron_sriov_agent }}\n{{- $mounts_neutron_sriov_agent_init := .Values.pod.mounts.neutron_sriov_agent.init_container }}\n{{- $etcSources := .Values.pod.etcSources.neutron_sriov_agent }}\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"neutron-ks-etc\")) }}\n{{- end }}\n\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: neutron-sriov-agent\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"neutron\" \"neutron-sriov-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"neutron\" \"neutron-sriov-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"sriov_agent\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"neutron\" \"neutron-sriov-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"neutron_sriov_agent\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"neutron-sriov-agent-default\" \"containerNames\" (list \"neutron-sriov-agent-init\" \"init\" \"neutron-sriov-agent\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"neutron_sriov_agent\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"neutron_sriov_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"neutron_sriov_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      nodeSelector:\n        {{ .Values.labels.sriov.node_selector_key }}: {{ .Values.labels.sriov.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.neutron.enabled }}\n{{ tuple $envAll \"neutron\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      dnsPolicy: ClusterFirstWithHostNet\n      hostNetwork: true\n      {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n      shareProcessNamespace: true\n      {{- else }}\n      hostPID: true\n      {{- end }}\n      initContainers:\n{{ tuple $envAll \"pod_dependency\" $mounts_neutron_sriov_agent_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: neutron-sriov-agent-init\n{{ tuple $envAll \"neutron_sriov_agent_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent.sriov | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_sriov_agent\" \"container\" \"neutron_sriov_agent_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/neutron-sriov-agent-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: neutron-bin\n              mountPath: /tmp/neutron-sriov-agent-init.sh\n              subPath: neutron-sriov-agent-init.sh\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini\n              subPath: ml2_conf.ini\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/sriov_agent.ini\n              subPath: sriov_agent.ini\n              readOnly: true\n            {{- if .Values.conf.plugins.taas.taas.enabled }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/taas.ini\n              subPath: taas.ini\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              # NOTE (Portdirect): We mount here to override Kollas\n              # custom sudoers file when using Kolla images, this\n              # location will also work fine for other images.\n              mountPath: /etc/sudoers.d/kolla_neutron_sudoers\n              subPath: neutron_sudoers\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/rootwrap.conf\n              subPath: rootwrap.conf\n              readOnly: true\n            {{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n            {{- if ( has \"sriov_agent\" $value.pods ) }}\n            {{- $filePrefix := replace \"_\" \"-\"  $key }}\n            {{- $rootwrapFile := printf \"/etc/neutron/rootwrap.d/%s.filters\" $filePrefix }}\n            - name: neutron-etc\n              mountPath: {{ $rootwrapFile }}\n              subPath: {{ base $rootwrapFile }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            - name: run\n              mountPath: /run\n{{ if $mounts_neutron_sriov_agent.volumeMounts }}{{ toYaml $mounts_neutron_sriov_agent.volumeMounts | indent 12 }}{{ end }}\n      containers:\n        - name: neutron-sriov-agent\n{{ tuple $envAll \"neutron_sriov_agent\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.agent.sriov | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_sriov_agent\" \"container\" \"neutron_sriov_agent\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: RPC_PROBE_TIMEOUT\n              value: \"{{ .Values.pod.probes.rpc_timeout }}\"\n            - name: RPC_PROBE_RETRIES\n              value: \"{{ .Values.pod.probes.rpc_retries }}\"\n{{ dict \"envAll\" $envAll \"component\" \"sriov_agent\" \"container\" \"sriov_agent\" \"type\" \"readiness\" \"probeTemplate\" (include \"sriovAgentReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          command:\n            - /tmp/neutron-sriov-agent.sh\n          volumeMounts:\n            - mountPath: /sys/class/net\n              name: host-sys-class-net\n            - mountPath: /sys/devices\n              name: host-sys-devices\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.neutron.oslo_concurrency.lock_path }}\n            - name: pod-var-neutron\n              mountPath: {{ .Values.conf.neutron.DEFAULT.state_path }}\n            - name: neutron-bin\n              mountPath: /tmp/neutron-sriov-agent.sh\n              subPath: neutron-sriov-agent.sh\n              readOnly: true\n            - name: neutron-bin\n              mountPath: /tmp/health-probe.py\n              subPath: health-probe.py\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            {{- if .Values.conf.neutron.DEFAULT.log_config_append }}\n            - name: neutron-etc\n              mountPath: {{ .Values.conf.neutron.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.neutron.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini\n              subPath: ml2_conf.ini\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/sriov_agent.ini\n              subPath: sriov_agent.ini\n              readOnly: true\n            {{- if .Values.conf.plugins.taas.taas.enabled }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/taas.ini\n              subPath: taas.ini\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              # NOTE (Portdirect): We mount here to override Kollas\n              # custom sudoers file when using Kolla images, this\n              # location will also work fine for other images.\n              mountPath: /etc/sudoers.d/kolla_neutron_sudoers\n              subPath: neutron_sudoers\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/rootwrap.conf\n              subPath: rootwrap.conf\n              readOnly: true\n            {{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n            {{- if ( has \"sriov_agent\" $value.pods ) }}\n            {{- $filePrefix := replace \"_\" \"-\"  $key }}\n            {{- $rootwrapFile := printf \"/etc/neutron/rootwrap.d/%s.filters\" $filePrefix }}\n            - name: neutron-etc\n              mountPath: {{ $rootwrapFile }}\n              subPath: {{ base $rootwrapFile }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            - name: run\n              mountPath: /run\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_neutron_sriov_agent.volumeMounts }}{{ toYaml $mounts_neutron_sriov_agent.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: host-sys-class-net\n          hostPath:\n            path: /sys/class/net\n        - name: host-sys-devices\n          hostPath:\n            path: /sys/devices\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-var-neutron\n          emptyDir: {}\n        - name: pod-shared\n          emptyDir: {}\n        - name: neutron-bin\n          configMap:\n            name: neutron-bin\n            defaultMode: 0555\n        - name: neutron-etc\n          secret:\n            secretName: {{ $configMapName }}\n            defaultMode: 0444\n        - name: neutron-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: run\n          hostPath:\n            path: /run\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_neutron_sriov_agent.volumes }}{{ toYaml $mounts_neutron_sriov_agent.volumes | indent 8 }}{{ end }}\n{{- end }}\n{{- end }}\n\n{{- if and .Values.manifests.daemonset_sriov_agent ( has \"sriov\" .Values.network.backend ) }}\n{{- $envAll := . }}\n{{- $daemonset := \"sriov-agent\" }}\n{{- $configMapName := \"neutron-etc\" }}\n{{- $serviceAccountName := \"neutron-sriov-agent\" }}\n{{- $dependencyOpts := dict \"envAll\" $envAll \"dependencyMixinParam\" $envAll.Values.network.backend \"dependencyKey\" \"sriov_agent\" -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_resolver\" $dependencyOpts | toString | fromYaml }}\n{{ tuple $envAll \"pod_dependency\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include \"neutron.sriov_agent.daemonset\" | toString | fromYaml }}\n{{- $configmap_yaml := \"neutron.configmap.etc\" }}\n{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include \"helm-toolkit.utils.daemonset_overrides\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/deployment-ironic-agent.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_ironic_agent }}\n{{- $envAll := . }}\n\n{{- $dependencyOpts := dict \"envAll\" $envAll \"dependencyMixinParam\" $envAll.Values.network.backend \"dependencyKey\" \"ironic_agent\" -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_resolver\" $dependencyOpts | toString | fromYaml }}\n\n{{- $mounts_neutron_ironic_agent := .Values.pod.mounts.neutron_ironic_agent.neutron_ironic_agent }}\n{{- $mounts_neutron_ironic_agent_init := .Values.pod.mounts.neutron_ironic_agent.init_container }}\n{{- $etcSources := .Values.pod.etcSources.neutron_ironic_agent }}\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"neutron-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"neutron-ironic-agent\" }}\n{{ tuple $envAll \"pod_dependency\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: neutron-ironic-agent\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"neutron\" \"ironic-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.ironic_agent }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"neutron\" \"ironic-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"neutron\" \"ironic-agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"neutron_ironic_agent\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"neutron_ironic_agent\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"neutron_ironic_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"neutron_ironic_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"neutron\" \"ironic_agent\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.ironic_agent.node_selector_key }}: {{ .Values.labels.ironic_agent.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.neutron.enabled }}\n{{ tuple $envAll \"neutron\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 8 }}\n{{ end }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.ironic_agent.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"pod_dependency\" $mounts_neutron_ironic_agent_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: neutron-ironic-agent-init\n{{ tuple $envAll \"neutron_ironic_agent_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.ironic_agent | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_ironic_agent\" \"container\" \"neutron_ironic_agent_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/neutron-ironic-agent-init.sh\n          volumeMounts:\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: neutron-bin\n              mountPath: /tmp/neutron-ironic-agent-init.sh\n              subPath: neutron-ironic-agent-init.sh\n              readOnly: true\n      containers:\n        - name: neutron-ironic-agent\n{{ tuple $envAll \"neutron_ironic_agent\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.ironic_agent | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_ironic_agent\" \"container\" \"neutron_ironic_agent\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/neutron-ironic-agent.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/neutron-ironic-agent.sh\n                  - stop\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.neutron.oslo_concurrency.lock_path }}\n            - name: pod-var-neutron\n              mountPath: {{ .Values.conf.neutron.DEFAULT.state_path }}\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: neutron-bin\n              mountPath: /tmp/neutron-ironic-agent.sh\n              subPath: neutron-ironic-agent.sh\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            {{- if .Values.conf.neutron.DEFAULT.log_config_append }}\n            - name: neutron-etc\n              mountPath: {{ .Values.conf.neutron.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.neutron.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini\n              subPath: ml2_conf.ini\n              readOnly: true\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_neutron_ironic_agent.volumeMounts }}{{ toYaml $mounts_neutron_ironic_agent.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-var-neutron\n          emptyDir: {}\n        - name: pod-shared\n          emptyDir: {}\n        - name: neutron-bin\n          configMap:\n            name: neutron-bin\n            defaultMode: 0555\n        - name: neutron-etc\n          secret:\n            secretName: neutron-etc\n            defaultMode: 0444\n        - name: neutron-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_neutron_ironic_agent.volumes }}{{ toYaml $mounts_neutron_ironic_agent.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/deployment-rpc_server.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_rpc_server }}\n{{- $envAll := . }}\n\n{{- $dependencyOpts := dict \"envAll\" $envAll \"dependencyMixinParam\" $envAll.Values.network.backend \"dependencyKey\" \"server\" -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_resolver\" $dependencyOpts | toString | fromYaml }}\n\n{{- $mounts_neutron_rpc_server := .Values.pod.mounts.neutron_rpc_server.neutron_rpc_server }}\n{{- $mounts_neutron_rpc_server_init := .Values.pod.mounts.neutron_rpc_server.init_container }}\n{{- $etcSources := .Values.pod.etcSources.neutron_rpc_server }}\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"neutron-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"neutron-rpc-server\" }}\n{{ tuple $envAll \"pod_dependency\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: neutron-rpc-server\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"neutron\" \"rpc_server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.rpc_server }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"neutron\" \"rpc_server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"neutron\" \"rpc_server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"neutron-rpc-server\" \"containerNames\" (list \"neutron-rpc-server\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"neutron_rpc_server\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"neutron_rpc_server\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"neutron_rpc_server\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"neutron\" \"rpc_server\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.rpc_server.node_selector_key }}: {{ .Values.labels.rpc_server.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.neutron.enabled }}\n{{ tuple $envAll \"neutron\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.rpc_server.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"pod_dependency\" $mounts_neutron_rpc_server_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        {{- if ( has \"ovn\" .Values.network.backend ) }}\n        - name: ovn-neutron-init\n{{ tuple $envAll \"neutron_rpc_server\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n          command:\n            - /tmp/neutron-ovn-init.sh\n          volumeMounts:\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: neutron-bin\n              mountPath: /tmp/neutron-ovn-init.sh\n              subPath: neutron-ovn-init.sh\n              readOnly: true\n        {{- end }}\n      containers:\n        - name: neutron-rpc-server\n{{ tuple $envAll \"neutron_rpc_server\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.rpc_server | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_rpc_server\" \"container\" \"neutron_rpc_server\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/neutron-rpc-server.sh\n            - start\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n          env:\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/neutron/certs/ca.crt\"\n{{- end }}\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/neutron-rpc-server.sh\n                  - stop\n          ports:\n            - name: q-api\n              containerPort: {{ tuple \"network\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.neutron.oslo_concurrency.lock_path }}\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: pod-var-neutron\n              mountPath: {{ .Values.conf.neutron.DEFAULT.state_path }}\n            - name: neutron-bin\n              mountPath: /tmp/neutron-rpc-server.sh\n              subPath: neutron-rpc-server.sh\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron-api-uwsgi.ini\n              subPath: neutron-api-uwsgi.ini\n              readOnly: true\n            {{- if .Values.conf.neutron.DEFAULT.log_config_append }}\n            - name: neutron-etc\n              mountPath: {{ .Values.conf.neutron.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.neutron.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/api_audit_map.conf\n              subPath: api_audit_map.conf\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini\n              subPath: ml2_conf.ini\n              readOnly: true\n            {{ if ( has \"sriov\" .Values.network.backend ) }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/sriov_agent.ini\n              subPath: sriov_agent.ini\n              readOnly: true\n            {{ end }}\n            {{- if .Values.conf.plugins.taas.taas.enabled }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/taas_plugin.ini\n              subPath: taas_plugin.ini\n              readOnly: true\n            {{ end }}\n            {{- if .Values.conf.plugins.l2gateway }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/l2gw_plugin.ini\n              subPath: l2gw_plugin.ini\n              readOnly: true\n            {{ end }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.network.server.internal \"path\" \"/etc/neutron/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_neutron_rpc_server.volumeMounts }}{{ toYaml $mounts_neutron_rpc_server.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-shared\n          emptyDir: {}\n        {{- if .Values.manifests.certificates }}\n        - name: wsgi-neutron\n          emptyDir: {}\n        {{- end }}\n        - name: pod-var-neutron\n          emptyDir: {}\n        - name: neutron-bin\n          configMap:\n            name: neutron-bin\n            defaultMode: 0555\n        - name: neutron-etc\n          secret:\n            secretName: neutron-etc\n            defaultMode: 0444\n        - name: neutron-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.network.server.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_neutron_rpc_server.volumes }}{{ toYaml $mounts_neutron_rpc_server.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/deployment-server.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"serverReadinessProbeTemplate\" }}\n{{- if .Values.manifests.certificates }}\nexec:\n  command:\n    - python\n    - -c\n    - \"import requests; r = requests.get('http://127.0.0.1:{{ tuple \"network\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}'); r.raise_for_status()\"\ninitialDelaySeconds: 30\n{{- else }}\nhttpGet:\n  scheme: {{ tuple \"network\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n  path: /\n  port: {{ tuple \"network\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n{{- end }}\n{{- define \"serverLivenessProbeTemplate\" }}\n{{- if .Values.manifests.certificates }}\nexec:\n  command:\n    - python\n    - -c\n    - \"import requests; r = requests.get('http://127.0.0.1:{{ tuple \"network\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}'); r.raise_for_status()\"\ninitialDelaySeconds: 30\n{{- else }}\nhttpGet:\n  scheme: {{ tuple \"network\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n  path: /\n  {{- if .Values.pod.probes.server.server.liveness.port }}\n  port: {{ .Values.pod.probes.server.server.liveness.port }}\n  {{ else }}\n  port: {{ tuple \"network\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  {{- end}}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.deployment_server }}\n{{- $envAll := . }}\n\n{{- $dependencyOpts := dict \"envAll\" $envAll \"dependencyMixinParam\" $envAll.Values.network.backend \"dependencyKey\" \"server\" -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_resolver\" $dependencyOpts | toString | fromYaml }}\n\n{{- $mounts_neutron_server := .Values.pod.mounts.neutron_server.neutron_server }}\n{{- $mounts_neutron_server_init := .Values.pod.mounts.neutron_server.init_container }}\n{{- $etcSources := .Values.pod.etcSources.neutron_server }}\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"neutron-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"neutron-server\" }}\n{{ tuple $envAll \"pod_dependency\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: neutron-server\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"neutron\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.server }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"neutron\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"neutron\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"neutron_server\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"neutron-server\" \"containerNames\" (list \"neutron-server\" \"init\" \"nginx\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"neutron_server\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n{{ tuple \"neutron_server\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"neutron_server\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"neutron\" \"server\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.server.node_selector_key }}: {{ .Values.labels.server.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.neutron.enabled }}\n{{ tuple $envAll \"neutron\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.server.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"pod_dependency\" $mounts_neutron_server_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        {{- if ( has \"ovn\" .Values.network.backend ) }}\n        - name: ovn-neutron-init\n{{ tuple $envAll \"neutron_server\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n          command:\n            - /tmp/neutron-ovn-init.sh\n          volumeMounts:\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: neutron-bin\n              mountPath: /tmp/neutron-ovn-init.sh\n              subPath: neutron-ovn-init.sh\n              readOnly: true\n        {{- end }}\n      containers:\n        {{- if $envAll.Values.manifests.certificates }}\n        - name: nginx\n{{ tuple $envAll \"nginx\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.nginx | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_server\" \"container\" \"nginx\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          ports:\n            - name: q-api\n              containerPort: {{ tuple \"network\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          env:\n            - name: PORT\n              value: {{ tuple \"network\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: POD_IP\n              valueFrom:\n                fieldRef:\n                  fieldPath: status.podIP\n            - name: SHORTNAME\n              value: {{ tuple \"network\" \"internal\" .  | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" | quote }}\n          readinessProbe:\n            httpGet:\n              scheme: HTTPS\n              path: /\n              port: {{ tuple \"network\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          command:\n            - /tmp/nginx.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/nginx.sh\n                  - stop\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.neutron.oslo_concurrency.lock_path }}\n            - name: neutron-bin\n              mountPath: /tmp/nginx.sh\n              subPath: nginx.sh\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/nginx/nginx.conf\n              subPath: nginx.conf\n              readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.network.server.internal \"path\" \"/etc/nginx/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n        {{- end }}\n        - name: neutron-server\n{{ tuple $envAll \"neutron_server\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.server | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"neutron_server\" \"container\" \"neutron_server\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"server\" \"container\" \"server\" \"type\" \"readiness\" \"probeTemplate\" (include \"serverReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"server\" \"container\" \"server\" \"type\" \"liveness\" \"probeTemplate\" (include \"serverLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          command:\n            - /tmp/neutron-server.sh\n            - start\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n          env:\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/neutron/certs/ca.crt\"\n{{- end }}\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/neutron-server.sh\n                  - stop\n{{- if not $envAll.Values.manifests.certificates }}\n          ports:\n            - name: q-api\n              containerPort: {{ tuple \"network\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: pod-var-neutron\n              mountPath: {{ .Values.conf.neutron.DEFAULT.state_path }}\n            - name: neutron-bin\n              mountPath: /tmp/neutron-server.sh\n              subPath: neutron-server.sh\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: neutron-etc-snippets\n              mountPath: /etc/neutron/neutron.conf.d/\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron-api-uwsgi.ini\n              subPath: neutron-api-uwsgi.ini\n              readOnly: true\n            {{- if .Values.conf.neutron.DEFAULT.log_config_append }}\n            - name: neutron-etc\n              mountPath: {{ .Values.conf.neutron.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.neutron.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/api_audit_map.conf\n              subPath: api_audit_map.conf\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini\n              subPath: ml2_conf.ini\n              readOnly: true\n            {{ if ( has \"sriov\" .Values.network.backend ) }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/plugins/ml2/sriov_agent.ini\n              subPath: sriov_agent.ini\n              readOnly: true\n            {{ end }}\n            {{- if .Values.conf.plugins.taas.taas.enabled }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/taas_plugin.ini\n              subPath: taas_plugin.ini\n              readOnly: true\n            {{ end }}\n            {{- if .Values.conf.plugins.l2gateway }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/l2gw_plugin.ini\n              subPath: l2gw_plugin.ini\n              readOnly: true\n            {{ end }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: neutron-etc\n              mountPath: /etc/neutron/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            {{- if contains \"vpnaas\" .Values.conf.neutron.DEFAULT.service_plugins }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron_vpnaas.conf\n              subPath: neutron_vpnaas.conf\n              readOnly: true\n            {{- end }}\n            {{- if contains \"ovn-vpnaas\" .Values.conf.neutron.DEFAULT.service_plugins }}\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron_ovn_vpn_agent.ini\n              subPath: neutron_ovn_vpn_agent.ini\n              readOnly: true\n            {{- end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.network.server.internal \"path\" \"/etc/neutron/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_neutron_server.volumeMounts }}{{ toYaml $mounts_neutron_server.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-shared\n          emptyDir: {}\n        {{- if .Values.manifests.certificates }}\n        - name: wsgi-neutron\n          emptyDir: {}\n        {{- end }}\n        - name: pod-var-neutron\n          emptyDir: {}\n        - name: neutron-bin\n          configMap:\n            name: neutron-bin\n            defaultMode: 0555\n        - name: neutron-etc\n          secret:\n            secretName: neutron-etc\n            defaultMode: 0444\n        - name: neutron-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.network.server.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_neutron_server.volumes }}{{ toYaml $mounts_neutron_server.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "neutron/templates/ingress-server.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_server .Values.network.server.ingress.public }}\n{{- $envAll := . }}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendService\" \"server\" \"backendServiceType\" \"network\" \"backendPort\" \"q-api\" -}}\n{{- $secretName := $envAll.Values.secrets.tls.network.server.internal -}}\n{{- if and .Values.manifests.certificates $secretName }}\n{{- $_ := set $ingressOpts \"certIssuer\" .Values.endpoints.network.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end }}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.bootstrap\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"5\"\n{{- end }}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $bootstrapJob := dict \"envAll\" . \"serviceName\" \"neutron\" \"keystoneUser\" .Values.bootstrap.ks_user \"logConfigFile\" .Values.conf.neutron.DEFAULT.log_config_append  -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $bootstrapJob \"tlsSecret\" .Values.secrets.tls.network.server.internal -}}\n{{- end -}}\n{{- $_ := set $bootstrapJob \"jobAnnotations\" (include \"metadata.annotations.job.bootstrap\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.neutron.enabled -}}\n{{- $_ := set $bootstrapJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $bootstrapJob | include \"helm-toolkit.manifests.job_bootstrap\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/job-db-drop.yaml",
    "content": "\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" \"neutron\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbDropJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- if .Values.pod.tolerations.neutron.enabled -}}\n{{- $_ := set $dbDropJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"neutron\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbInitJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbInitJob \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.neutron.enabled -}}\n{{- $_ := set $dbInitJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"neutron\" \"podVolMounts\" .Values.pod.mounts.neutron_db_sync.neutron_db_sync.volumeMounts \"podVols\" .Values.pod.mounts.neutron_db_sync.neutron_db_sync.volumes -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbSyncJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.neutron.enabled -}}\n{{- $_ := set $dbSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.repo_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\n{{- end }}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"neutron\" -}}\n{{- $_ := set $imageRepoSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.repo_sync\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.neutron.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_endpoints\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-2\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksEndpointsJob := dict \"envAll\" . \"serviceName\" \"neutron\" \"serviceTypes\" ( tuple \"network\" ) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksEndpointsJob \"tlsSecret\" .Values.secrets.tls.network.server.internal -}}\n{{- end -}}\n{{- $_ := set $ksEndpointsJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_endpoints\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.neutron.enabled -}}\n{{- $_ := set $ksEndpointsJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksEndpointsJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_service\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"neutron\" \"serviceTypes\" ( tuple \"network\" ) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.network.server.internal -}}\n{{- end -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_service\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.neutron.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $serviceUsers := (tuple \"neutron\" \"nova\" \"placement\") -}}\n{{- if eq (.Values.conf.neutron.DEFAULT.external_dns_driver | default \"\") \"designate\" -}}\n{{- $serviceUsers = append $serviceUsers \"designate\" -}}\n{{- end -}}\n{{- if (has \"baremetal\" .Values.network.backend) -}}\n{{- $serviceUsers = append $serviceUsers \"ironic\" -}}\n{{- end -}}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"neutron\" \"serviceUsers\" $serviceUsers -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksUserJob \"tlsSecret\" .Values.secrets.tls.network.server.internal -}}\n{{- end -}}\n{{- $_ := set $ksUserJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.neutron.enabled -}}\n{{- $_ := set $ksUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/job-rabbit-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.rabbit_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rmqUserJob := dict \"envAll\" . \"serviceName\" \"neutron\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $rmqUserJob \"tlsSecret\" .Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $rmqUserJob \"jobAnnotations\" (include \"metadata.annotations.job.rabbit_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.neutron.enabled -}}\n{{- $_ := set $rmqUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $rmqUserJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/network_policy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"neutron\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "neutron/templates/pdb-server.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_server }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: neutron-server\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.server.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"neutron\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/pod-rally-test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if .Values.manifests.pod_rally_test }}\n{{- $envAll := . }}\n\n{{- $mounts_tests := .Values.pod.mounts.neutron_tests.neutron_tests }}\n{{- $mounts_tests_init := .Values.pod.mounts.neutron_tests.init_container }}\n\n{{- $serviceAccountName := print .deployment_name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: {{ print .deployment_name \"-test\" }}\n  annotations:\n    \"helm.sh/hook\": test-success\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"neutron\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{ dict \"envAll\" $envAll \"podName\" \"neutron-test\" \"containerNames\" (list \"init\" \"neutron-test\" \"neutron-test-ks-user\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.neutron.enabled }}\n{{ tuple $envAll \"neutron\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 2 }}\n{{ end }}\n  restartPolicy: Never\n{{ tuple \"neutron_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 2 }}\n{{ tuple \"neutron_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 2 }}\n  serviceAccountName: {{ $serviceAccountName }}\n  initContainers:\n{{ tuple $envAll \"tests\" $mounts_tests_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n    - name: neutron-test-ks-user\n{{ tuple $envAll \"ks_user\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.ks_user | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n      command:\n        - /tmp/ks-user.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: neutron-bin\n          mountPath: /tmp/ks-user.sh\n          subPath: ks-user.sh\n          readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.network.server.internal | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 8 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_SERVICE_NAME\n          value: \"test\"\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.test }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_ROLE\n          value: {{ .Values.endpoints.identity.auth.test.role | quote }}\n  containers:\n    - name: neutron-test\n{{ tuple $envAll \"test\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.test }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: RALLY_ENV_NAME\n          value: {{.Release.Name}}\n      command:\n        - /tmp/rally-test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: neutron-etc\n          mountPath: /etc/rally/rally_tests.yaml\n          subPath: rally_tests.yaml\n          readOnly: true\n        - name: neutron-bin\n          mountPath: /tmp/rally-test.sh\n          subPath: rally-test.sh\n          readOnly: true\n        - name: rally-db\n          mountPath: /var/lib/rally\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.network.server.internal | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 8 }}\n{{ if $mounts_tests.volumeMounts }}{{ toYaml $mounts_tests.volumeMounts | indent 8 }}{{ end }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: neutron-etc\n      secret:\n        secretName: neutron-etc\n        defaultMode: 0444\n    - name: neutron-bin\n      configMap:\n        name: neutron-bin\n        defaultMode: 0555\n    - name: rally-db\n      emptyDir: {}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.network.server.internal | include \"helm-toolkit.snippets.tls_volume\"  | indent 4 }}\n{{ if $mounts_tests.volumes }}{{ toYaml $mounts_tests.volumes | indent 4 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"neutron\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n{{- $connection := tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- if $envAll.Values.manifests.certificates }}\n  DB_CONNECTION: {{ (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | b64enc -}}\n{{- else }}\n  DB_CONNECTION: {{  $connection | b64enc -}}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{- include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendService\" \"server\" \"backendServiceType\" \"network\" ) }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $userClass, $val := $envAll.Values.endpoints.identity.auth }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/secret-ks-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $envAll := . -}}\n{{/* the endpoints.identity.auth sections with the oslo conf sections they get rendered to */}}\n{{- $ksUsers := dict\n  \"neutron\" \"keystone_authtoken\"\n  \"nova\" \"nova\"\n  \"placement\" \"placement\"\n-}}\n{{- if eq (.Values.conf.neutron.DEFAULT.external_dns_driver | default \"\") \"designate\" -}}\n{{- $_ := set $ksUsers \"designate\" \"designate\" -}}\n{{- end -}}\n{{- if (has \"baremetal\" .Values.network.backend) -}}\n{{- $_ := set $ksUsers \"ironic\" \"ironic\" -}}\n{{- end -}}\n{{ dict\n  \"envAll\" $envAll\n  \"serviceName\" \"neutron\"\n  \"serviceUserSections\" $ksUsers\n  | include \"helm-toolkit.manifests.secret_ks_etc\"\n}}\n{{- end }}"
  },
  {
    "path": "neutron/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- $rabbitmqProtocol := \"http\" }}\n{{- if $envAll.Values.manifests.certificates }}\n{{- $rabbitmqProtocol = \"https\" }}\n{{- end }}\n{{- range $key1, $userClass := tuple \"admin\" \"neutron\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_messaging\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass $rabbitmqProtocol $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/service-ingress-neutron.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_server .Values.network.server.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"network\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "neutron/templates/service-server.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_server }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"network\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: q-api\n      port: {{ tuple \"network\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.server.node_port.enabled }}\n      nodePort: {{ .Values.network.server.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"neutron\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.server.node_port.enabled }}\n  type: NodePort\n  {{ if .Values.network.server.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{ end }}\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "neutron/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for neutron.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nrelease_group: null\n\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    neutron_db_sync: quay.io/airshipit/neutron:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    netoffload: ghcr.io/vexxhost/netoffload:v1.0.1\n    neutron_server: quay.io/airshipit/neutron:2025.1-ubuntu_noble\n    neutron_rpc_server: quay.io/airshipit/neutron:2025.1-ubuntu_noble\n    neutron_dhcp: quay.io/airshipit/neutron:2025.1-ubuntu_noble\n    neutron_metadata: quay.io/airshipit/neutron:2025.1-ubuntu_noble\n    neutron_ovn_metadata: quay.io/airshipit/neutron:2025.1-ubuntu_noble\n    neutron_ovn_db_sync: quay.io/airshipit/neutron:2025.1-ubuntu_noble\n    neutron_ovn_vpn: quay.io/airshipit/neutron:2025.1-ubuntu_noble\n    neutron_l3: quay.io/airshipit/neutron:2025.1-ubuntu_noble\n    neutron_l2gw: quay.io/airshipit/neutron:2025.1-ubuntu_noble\n    neutron_openvswitch_agent: quay.io/airshipit/neutron:2025.1-ubuntu_noble\n    neutron_linuxbridge_agent: quay.io/airshipit/neutron:2025.1-ubuntu_noble\n    neutron_sriov_agent: quay.io/airshipit/neutron:stein-18.04-sriov\n    neutron_sriov_agent_init: quay.io/airshipit/neutron:stein-18.04-sriov\n    neutron_bagpipe_bgp: quay.io/airshipit/neutron:2025.1-ubuntu_noble\n    neutron_bgp_dragent: quay.io/airshipit/neutron:2025.1-ubuntu_noble\n    neutron_ironic_agent_init: quay.io/airshipit/neutron:2025.1-ubuntu_noble\n    neutron_ironic_agent: quay.io/airshipit/neutron:2025.1-ubuntu_noble\n    neutron_netns_cleanup_cron: quay.io/airshipit/neutron:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: docker.io/docker:17.07.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  agent:\n    dhcp:\n      node_selector_key: openstack-control-plane\n      node_selector_value: enabled\n    l3:\n      node_selector_key: openstack-control-plane\n      node_selector_value: enabled\n    metadata:\n      node_selector_key: openstack-control-plane\n      node_selector_value: enabled\n    l2gw:\n      node_selector_key: openstack-control-plane\n      node_selector_value: enabled\n    ovn_vpn:\n      node_selector_key: openstack-control-plane\n      node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  lb:\n    node_selector_key: linuxbridge\n    node_selector_value: enabled\n  # openvswitch is a special case, requiring a special\n  # label that can apply to both control hosts\n  # and compute hosts, until we get more sophisticated\n  # with our daemonset scheduling\n  ovs:\n    node_selector_key: openvswitch\n    node_selector_value: enabled\n  sriov:\n    node_selector_key: sriov\n    node_selector_value: enabled\n  bagpipe_bgp:\n    node_selector_key: openstack-compute-node\n    node_selector_value: enabled\n  bgp_dragent:\n    node_selector_key: openstack-compute-node\n    node_selector_value: enabled\n  server:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  rpc_server:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  ironic_agent:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  netns_cleanup_cron:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nnetwork:\n  # provide what type of network wiring will be used\n  backend:\n    - openvswitch\n  # NOTE(Portdirect): Share network namespaces with the host,\n  # allowing agents to be restarted without packet loss and simpler\n  # debugging. This feature requires mount propagation support.\n  share_namespaces: true\n  interface:\n    # Tunnel interface will be used for VXLAN tunneling.\n    tunnel: null\n    # If tunnel is null there is a fallback mechanism to search\n    # for interface with routing using tunnel network cidr.\n    tunnel_network_cidr: \"0/0\"\n  # To perform setup of network interfaces using the SR-IOV init\n  # container you can use a section similar to:\n  # sriov:\n  #   - device: ${DEV}\n  #     num_vfs: 8\n  #     mtu: 9214\n  #     promisc: false\n  #     qos:\n  #       - vf_num: 0\n  #         share: 10\n  #     queues_per_vf:\n  #       - num_queues: 16\n  #         exclude_vf: 0,11,21\n  server:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30096\n\nbootstrap:\n  enabled: false\n  ks_user: neutron\n  script: |\n    openstack token issue\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - neutron-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n    targeted:\n      sriov: {}\n      l2gateway: {}\n      bagpipe_bgp: {}\n      ovn:\n        server:\n          pod: null\n      bgp_dragent: {}\n      openvswitch:\n        dhcp:\n          pod:\n            - requireSameNode: true\n              labels:\n                application: neutron\n                component: neutron-ovs-agent\n        l3:\n          pod:\n            - requireSameNode: true\n              labels:\n                application: neutron\n                component: neutron-ovs-agent\n        metadata:\n          pod:\n            - requireSameNode: true\n              labels:\n                application: neutron\n                component: neutron-ovs-agent\n      linuxbridge:\n        dhcp:\n          pod:\n            - requireSameNode: true\n              labels:\n                application: neutron\n                component: neutron-lb-agent\n        l3:\n          pod:\n            - requireSameNode: true\n              labels:\n                application: neutron\n                component: neutron-lb-agent\n        metadata:\n          pod:\n            - requireSameNode: true\n              labels:\n                application: neutron\n                component: neutron-lb-agent\n        lb_agent:\n          pod: null\n  static:\n    bootstrap:\n      services:\n        - endpoint: internal\n          service: network\n        - endpoint: internal\n          service: compute\n    db_drop:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - neutron-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    dhcp:\n      pod: null\n      jobs:\n        - neutron-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: network\n        - endpoint: internal\n          service: compute\n    ks_endpoints:\n      jobs:\n        - neutron-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    rabbit_init:\n      services:\n        - service: oslo_messaging\n          endpoint: internal\n    l3:\n      pod: null\n      jobs:\n        - neutron-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: network\n        - endpoint: internal\n          service: compute\n    lb_agent:\n      pod: null\n      jobs:\n        - neutron-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: network\n    metadata:\n      pod: null\n      jobs:\n        - neutron-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: network\n        - endpoint: internal\n          service: compute\n        - endpoint: internal\n          service: compute_metadata\n    ovn_metadata:\n      pod:\n        - requireSameNode: true\n          labels:\n            application: ovn\n            component: ovn-controller\n      services:\n        - endpoint: internal\n          service: compute_metadata\n        - endpoint: internal\n          service: network\n    ovn_vpn_agent:\n      pod:\n        - requireSameNode: true\n          labels:\n            application: ovn\n            component: ovn-controller\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: network\n    ovs_agent:\n      jobs:\n        - neutron-rabbit-init\n      pod:\n        - requireSameNode: true\n          labels:\n            application: openvswitch\n            component: server\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: network\n    server:\n      jobs:\n        - neutron-db-sync\n        - neutron-ks-user\n        - neutron-ks-endpoints\n        - neutron-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: oslo_cache\n        - endpoint: internal\n          service: identity\n    rpc_server:\n      jobs:\n        - neutron-db-sync\n        - neutron-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: oslo_cache\n        - endpoint: internal\n          service: identity\n    ironic_agent:\n      jobs:\n        - neutron-db-sync\n        - neutron-ks-user\n        - neutron-ks-endpoints\n        - neutron-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: oslo_cache\n        - endpoint: internal\n          service: identity\n    tests:\n      services:\n        - endpoint: internal\n          service: network\n        - endpoint: internal\n          service: compute\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\npod:\n  use_fqdn:\n    neutron_agent: true\n  probes:\n    rpc_timeout: 60\n    rpc_retries: 2\n    dhcp_agent:\n      dhcp_agent:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            periodSeconds: 190\n            timeoutSeconds: 185\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 120\n            periodSeconds: 600\n            timeoutSeconds: 580\n    l3_agent:\n      l3_agent:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            periodSeconds: 190\n            timeoutSeconds: 185\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 120\n            periodSeconds: 600\n            timeoutSeconds: 580\n    lb_agent:\n      lb_agent:\n        readiness:\n          enabled: true\n    metadata_agent:\n      metadata_agent:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            periodSeconds: 190\n            timeoutSeconds: 185\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 120\n            periodSeconds: 600\n            timeoutSeconds: 580\n    ovn_vpn_agent:\n      ovn_vpn_agent:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            periodSeconds: 190\n            timeoutSeconds: 185\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 120\n            periodSeconds: 600\n            timeoutSeconds: 580\n    ovn_metadata_agent:\n      ovn_metadata_agent:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            periodSeconds: 190\n            timeoutSeconds: 185\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 120\n            periodSeconds: 600\n            timeoutSeconds: 580\n    ovs_agent:\n      ovs_agent:\n        readiness:\n          enabled: true\n          params:\n            timeoutSeconds: 10\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 120\n            periodSeconds: 600\n            timeoutSeconds: 580\n    sriov_agent:\n      sriov_agent:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            periodSeconds: 190\n            timeoutSeconds: 185\n    bagpipe_bgp:\n      bagpipe_bgp:\n        readiness:\n          enabled: true\n          params:\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 60\n    bgp_dragent:\n      bgp_dragent:\n        readiness:\n          enabled: false\n          params:\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 60\n    l2gw_agent:\n      l2gw_agent:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            periodSeconds: 15\n            timeoutSeconds: 65\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 120\n            periodSeconds: 90\n            timeoutSeconds: 70\n    server:\n      server:\n        readiness:\n          enabled: true\n          params:\n            periodSeconds: 15\n            timeoutSeconds: 10\n        liveness:\n          enabled: true\n          port: 1717\n          params:\n            initialDelaySeconds: 60\n            periodSeconds: 15\n            timeoutSeconds: 10\n    rpc_server:\n      rpc_server:\n        readiness:\n          enabled: true\n          params:\n            periodSeconds: 15\n            timeoutSeconds: 10\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 60\n            periodSeconds: 15\n            timeoutSeconds: 10\n  security_context:\n    neutron_dhcp_agent:\n      pod:\n        runAsUser: 42424\n      container:\n        neutron_dhcp_agent:\n          readOnlyRootFilesystem: true\n          privileged: true\n    neutron_l2gw_agent:\n      pod:\n        runAsUser: 42424\n      container:\n        neutron_l2gw_agent:\n          readOnlyRootFilesystem: true\n          privileged: true\n    neutron_bagpipe_bgp:\n      pod:\n        runAsUser: 42424\n      container:\n        neutron_bagpipe_bgp:\n          readOnlyRootFilesystem: true\n          privileged: true\n    neutron_bgp_dragent:\n      pod:\n        runAsUser: 42424\n      container:\n        neutron_bgp_dragent:\n          readOnlyRootFilesystem: true\n          privileged: true\n    neutron_l3_agent:\n      pod:\n        runAsUser: 42424\n      container:\n        neutron_l3_agent:\n          readOnlyRootFilesystem: true\n          privileged: true\n    neutron_lb_agent:\n      pod:\n        runAsUser: 42424\n      container:\n        neutron_lb_agent_kernel_modules:\n          capabilities:\n            add:\n              - SYS_MODULE\n              - SYS_CHROOT\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        neutron_lb_agent_init:\n          privileged: true\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        neutron_lb_agent:\n          readOnlyRootFilesystem: true\n          privileged: true\n    neutron_metadata_agent:\n      pod:\n        runAsUser: 42424\n      container:\n        neutron_metadata_agent_init:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n    neutron_ovn_metadata_agent:\n      pod:\n        runAsUser: 42424\n      container:\n        neutron_ovn_metadata_agent_init:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n    ovn_vpn_agent:\n      pod:\n        runAsUser: 42424\n      container:\n        ovn_vpn_agent_init:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n    neutron_ovs_agent:\n      pod:\n        runAsUser: 42424\n      container:\n        neutron_openvswitch_agent_kernel_modules:\n          capabilities:\n            add:\n              - SYS_MODULE\n              - SYS_CHROOT\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        netoffload:\n          privileged: true\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        neutron_ovs_agent_init:\n          privileged: true\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        neutron_ovs_agent:\n          readOnlyRootFilesystem: true\n          privileged: true\n    neutron_server:\n      pod:\n        runAsUser: 42424\n      container:\n        nginx:\n          runAsUser: 0\n          readOnlyRootFilesystem: false\n        neutron_server:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    neutron_rpc_server:\n      pod:\n        runAsUser: 42424\n      container:\n        neutron_rpc_server:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    neutron_sriov_agent:\n      pod:\n        runAsUser: 42424\n      container:\n        neutron_sriov_agent_init:\n          privileged: true\n          runAsUser: 0\n          readOnlyRootFilesystem: false\n        neutron_sriov_agent:\n          readOnlyRootFilesystem: true\n          privileged: true\n    neutron_ironic_agent:\n      pod:\n        runAsUser: 42424\n      container:\n        neutron_ironic_agent_init:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        neutron_ironic_agent:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    neutron_netns_cleanup_cron:\n      pod:\n        runAsUser: 42424\n      container:\n        neutron_netns_cleanup_cron:\n          readOnlyRootFilesystem: true\n          privileged: true\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    neutron:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n        effect: NoSchedule\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n        effect: NoSchedule\n  mounts:\n    neutron_server:\n      init_container: null\n      neutron_server:\n        volumeMounts:\n        volumes:\n    neutron_rpc_server:\n      init_container: null\n      neutron_rpc_server:\n        volumeMounts:\n        volumes:\n    neutron_dhcp_agent:\n      init_container: null\n      neutron_dhcp_agent:\n        volumeMounts:\n        volumes:\n    neutron_l3_agent:\n      init_container: null\n      neutron_l3_agent:\n        volumeMounts:\n        volumes:\n    neutron_lb_agent:\n      init_container: null\n      neutron_lb_agent:\n        volumeMounts:\n        volumes:\n    neutron_metadata_agent:\n      init_container: null\n      neutron_metadata_agent:\n        volumeMounts:\n        volumes:\n    neutron_ovn_db_sync:\n      init_container: null\n      neutron_ovn_db_sync:\n        volumeMounts:\n        volumes:\n    neutron_ovn_metadata_agent:\n      init_container: null\n      neutron_ovn_metadata_agent:\n        volumeMounts:\n        volumes:\n    ovn_vpn_agent:\n      init_container: null\n      ovn_vpn_agent:\n        volumeMounts:\n        volumes:\n    neutron_ovs_agent:\n      init_container: null\n      neutron_ovs_agent:\n        volumeMounts:\n        volumes:\n    neutron_sriov_agent:\n      init_container: null\n      neutron_sriov_agent:\n        volumeMounts:\n        volumes:\n    neutron_l2gw_agent:\n      init_container: null\n      neutron_l2gw_agent:\n        volumeMounts:\n        volumes:\n    bagpipe_bgp:\n      init_container: null\n      bagpipe_bgp:\n        volumeMounts:\n        volumes:\n    bgp_dragent:\n      init_container: null\n      bgp_dragent:\n        volumeMounts:\n        volumes:\n    neutron_ironic_agent:\n      init_container: null\n      neutron_ironic_agent:\n        volumeMounts:\n        volumes:\n    neutron_netns_cleanup_cron:\n      init_container: null\n      neutron_netns_cleanup_cron:\n        volumeMounts:\n        volumes:\n    neutron_tests:\n      init_container: null\n      neutron_tests:\n        volumeMounts:\n        volumes:\n    neutron_bootstrap:\n      init_container: null\n      neutron_bootstrap:\n        volumeMounts:\n        volumes:\n    neutron_db_sync:\n      neutron_db_sync:\n        volumeMounts:\n          - name: db-sync-conf\n            mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini\n            subPath: ml2_conf.ini\n            readOnly: true\n        volumes:\n  # -- This allows users to add Kubernetes Projected Volumes to be mounted at /etc/neutron/neutron.conf.d/\n  ## This is a list of projected volume source objects for each deployment/statefulset/daemonset/cronjob\n  ## https://kubernetes.io/docs/concepts/storage/projected-volumes/\n  etcSources:\n    neutron_server: []\n    neutron_rpc_server: []\n    neutron_dhcp_agent: []\n    neutron_l3_agent: []\n    neutron_lb_agent: []\n    neutron_metadata_agent: []\n    neutron_ovn_db_sync: []\n    neutron_ovn_metadata_agent: []\n    ovn_vpn_agent: []\n    neutron_ovs_agent: []\n    neutron_sriov_agent: []\n    neutron_l2gw_agent: []\n    bagpipe_bgp: []\n    bgp_dragent: []\n    neutron_ironic_agent: []\n    neutron_netns_cleanup_cron: []\n    neutron_tests: []\n    neutron_bootstrap: []\n    neutron_db_sync: []\n  replicas:\n    server: 1\n    rpc_server: 1\n    ironic_agent: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        dhcp_agent:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n        l3_agent:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n        lb_agent:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n        metadata_agent:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n        ovn_metadata_agent:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n        ovn_vpn_agent:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n        ovs_agent:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n        sriov_agent:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n        netns_cleanup_cron:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n    disruption_budget:\n      server:\n        min_available: 0\n    termination_grace_period:\n      server:\n        timeout: 30\n      rpc_server:\n        timeout: 30\n      ironic_agent:\n        timeout: 30\n  resources:\n    enabled: false\n    agent:\n      dhcp:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      l3:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      lb:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      metadata:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ovn_metadata:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ovn_vpn:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ovs:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      sriov:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      l2gw:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      bagpipe_bgp:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      bgp_dragent:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n    server:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    ironic_agent:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    netns_cleanup_cron:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rabbit_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ovn_db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nconf:\n  rally_tests:\n    run_tempest: false\n    clean_up: |\n      # NOTE: We will make the best effort to clean up rally generated networks and routers,\n      # but should not block further automated deployment.\n      set +e\n      PATTERN=\"^[sc]_rally_\"\n\n      ROUTERS=$(openstack router list --format=value -c Name | grep -e $PATTERN | sort | tr -d '\\r')\n      NETWORKS=$(openstack network list --format=value -c Name | grep -e $PATTERN | sort | tr -d '\\r')\n\n      for ROUTER in $ROUTERS\n      do\n        openstack router unset --external-gateway $ROUTER\n        openstack router set --disable --no-ha $ROUTER\n\n        SUBNS=$(openstack router show $ROUTER -c interfaces_info --format=value | python -m json.tool | grep -oP '(?<=\"subnet_id\": \")[a-f0-9\\-]{36}(?=\")' | sort | uniq)\n        for SUBN in $SUBNS\n        do\n          openstack router remove subnet $ROUTER $SUBN\n        done\n\n        for PORT in $(openstack port list --router $ROUTER --format=value -c ID | tr -d '\\r')\n        do\n          openstack router remove port $ROUTER $PORT\n        done\n\n        openstack router delete $ROUTER\n      done\n\n      for NETWORK in $NETWORKS\n      do\n        for PORT in $(openstack port list --network $NETWORK --format=value -c ID | tr -d '\\r')\n        do\n          openstack port delete $PORT\n        done\n        openstack network delete $NETWORK\n      done\n      set -e\n    tests:\n      NeutronNetworks.create_and_delete_networks:\n        - args:\n            network_create_args: {}\n          context:\n            quotas:\n              neutron:\n                network: -1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NeutronNetworks.create_and_delete_ports:\n        - args:\n            network_create_args: {}\n            port_create_args: {}\n            ports_per_network: 10\n          context:\n            network: {}\n            quotas:\n              neutron:\n                network: -1\n                port: -1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NeutronNetworks.create_and_delete_routers:\n        - args:\n            network_create_args: {}\n            router_create_args: {}\n            subnet_cidr_start: 1.1.0.0/30\n            subnet_create_args: {}\n            subnets_per_network: 2\n          context:\n            network: {}\n            quotas:\n              neutron:\n                network: -1\n                router: -1\n                subnet: -1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NeutronNetworks.create_and_delete_subnets:\n        - args:\n            network_create_args: {}\n            subnet_cidr_start: 1.1.0.0/30\n            subnet_create_args: {}\n            subnets_per_network: 2\n          context:\n            network: {}\n            quotas:\n              neutron:\n                network: -1\n                subnet: -1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NeutronNetworks.create_and_list_routers:\n        - args:\n            network_create_args: {}\n            router_create_args: {}\n            subnet_cidr_start: 1.1.0.0/30\n            subnet_create_args: {}\n            subnets_per_network: 2\n          context:\n            network: {}\n            quotas:\n              neutron:\n                network: -1\n                router: -1\n                subnet: -1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NeutronNetworks.create_and_list_subnets:\n        - args:\n            network_create_args: {}\n            subnet_cidr_start: 1.1.0.0/30\n            subnet_create_args: {}\n            subnets_per_network: 2\n          context:\n            network: {}\n            quotas:\n              neutron:\n                network: -1\n                subnet: -1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NeutronNetworks.create_and_show_network:\n        - args:\n            network_create_args: {}\n          context:\n            quotas:\n              neutron:\n                network: -1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NeutronNetworks.create_and_update_networks:\n        - args:\n            network_create_args: {}\n            network_update_args:\n              admin_state_up: false\n          context:\n            quotas:\n              neutron:\n                network: -1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NeutronNetworks.create_and_update_ports:\n        - args:\n            network_create_args: {}\n            port_create_args: {}\n            port_update_args:\n              admin_state_up: false\n              device_id: dummy_id\n              device_owner: dummy_owner\n            ports_per_network: 5\n          context:\n            network: {}\n            quotas:\n              neutron:\n                network: -1\n                port: -1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NeutronNetworks.create_and_update_routers:\n        - args:\n            network_create_args: {}\n            router_create_args: {}\n            router_update_args:\n              admin_state_up: false\n            subnet_cidr_start: 1.1.0.0/30\n            subnet_create_args: {}\n            subnets_per_network: 2\n          context:\n            network: {}\n            quotas:\n              neutron:\n                network: -1\n                router: -1\n                subnet: -1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NeutronNetworks.create_and_update_subnets:\n        - args:\n            network_create_args: {}\n            subnet_cidr_start: 1.4.0.0/16\n            subnet_create_args: {}\n            subnet_update_args:\n              enable_dhcp: false\n            subnets_per_network: 2\n          context:\n            network: {}\n            quotas:\n              neutron:\n                network: -1\n                subnet: -1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NeutronNetworks.list_agents:\n        - args:\n            agent_args: {}\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NeutronSecurityGroup.create_and_list_security_groups:\n        - args:\n            security_group_create_args: {}\n          context:\n            quotas:\n              neutron:\n                security_group: -1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NeutronSecurityGroup.create_and_update_security_groups:\n        - args:\n            security_group_create_args: {}\n            security_group_update_args: {}\n          context:\n            quotas:\n              neutron:\n                security_group: -1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n  paste:\n    composite:neutron:\n      use: egg:Paste#urlmap\n      /: neutronversions_composite\n      /v2.0: neutronapi_v2_0\n    composite:neutronapi_v2_0:\n      use: call:neutron.auth:pipeline_factory\n      noauth: cors http_proxy_to_wsgi request_id catch_errors extensions neutronapiapp_v2_0\n      keystone: cors http_proxy_to_wsgi request_id catch_errors authtoken audit keystonecontext extensions neutronapiapp_v2_0\n    composite:neutronversions_composite:\n      use: call:neutron.auth:pipeline_factory\n      noauth: cors http_proxy_to_wsgi neutronversions\n      keystone: cors http_proxy_to_wsgi neutronversions\n    filter:request_id:\n      paste.filter_factory: oslo_middleware:RequestId.factory\n    filter:catch_errors:\n      paste.filter_factory: oslo_middleware:CatchErrors.factory\n    filter:cors:\n      paste.filter_factory: oslo_middleware.cors:filter_factory\n      oslo_config_project: neutron\n    filter:http_proxy_to_wsgi:\n      paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory\n    filter:keystonecontext:\n      paste.filter_factory: neutron.auth:NeutronKeystoneContext.factory\n    filter:authtoken:\n      paste.filter_factory: keystonemiddleware.auth_token:filter_factory\n    filter:audit:\n      paste.filter_factory: keystonemiddleware.audit:filter_factory\n      audit_map_file: /etc/neutron/api_audit_map.conf\n    filter:extensions:\n      paste.filter_factory: neutron.api.extensions:plugin_aware_extension_middleware_factory\n    app:neutronversions:\n      paste.app_factory: neutron.pecan_wsgi.app:versions_factory\n    app:neutronapiapp_v2_0:\n      paste.app_factory: neutron.api.v2.router:APIRouter.factory\n    filter:osprofiler:\n      paste.filter_factory: osprofiler.web:WsgiMiddleware.factory\n  neutron_api_uwsgi:\n    uwsgi:\n      add-header: \"Connection: close\"\n      buffer-size: 65535\n      die-on-term: true\n      enable-threads: true\n      exit-on-reload: false\n      hook-master-start: unix_signal:15 gracefully_kill_them_all\n      lazy-apps: true\n      log-x-forwarded-for: true\n      master: true\n      procname-prefix-spaced: \"neutron-api:\"\n      route-user-agent: '^kube-probe.* donotlog:'\n      # start-time provides unix time at instance startup, used by ML2/OVN\n      # for OVN hash ring registers. See:\n      # https://docs.openstack.org/neutron/latest/admin/config-wsgi.html\n      start-time: \"%t\"\n      thunder-lock: true\n      worker-reload-mercy: 80\n      wsgi-file: /var/lib/openstack/bin/neutron-api\n      stats: 0.0.0.0:1717\n      stats-http: true\n  policy: {}\n  api_audit_map:\n    DEFAULT:\n      target_endpoint_type: None\n    custom_actions:\n      add_router_interface: update/add\n      remove_router_interface: update/remove\n    path_keywords:\n      floatingips: ip\n      healthmonitors: healthmonitor\n      health_monitors: health_monitor\n      lb: None\n      members: member\n      metering-labels: label\n      metering-label-rules: rule\n      networks: network\n      pools: pool\n      ports: port\n      routers: router\n      quotas: quota\n      security-groups: security-group\n      security-group-rules: rule\n      subnets: subnet\n      vips: vip\n    service_endpoints:\n      network: service/network\n  neutron_sudoers: |\n    # This sudoers file supports rootwrap for both Kolla and LOCI Images.\n    Defaults !requiretty\n    Defaults secure_path=\"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin\"\n    neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *, /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *\n    neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf, /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf\n  rootwrap: |\n    # Configuration for neutron-rootwrap\n    # This file should be owned by (and only-writeable by) the root user\n\n    [DEFAULT]\n    # List of directories to load filter definitions from (separated by ',').\n    # These directories MUST all be only writeable by root !\n    filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap,/var/lib/openstack/etc/neutron/rootwrap.d\n\n    # List of directories to search executables in, in case filters do not\n    # explicitely specify a full path (separated by ',')\n    # If not specified, defaults to system PATH environment variable.\n    # These directories MUST all be only writeable by root !\n    exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin\n\n    # Enable logging to syslog\n    # Default value is False\n    use_syslog=False\n\n    # Which syslog facility to use.\n    # Valid values include auth, authpriv, syslog, local0, local1...\n    # Default value is 'syslog'\n    syslog_log_facility=syslog\n\n    # Which messages to log.\n    # INFO means log all usage\n    # ERROR means only log unsuccessful attempts\n    syslog_log_level=ERROR\n  rootwrap_filters:\n    debug:\n      pods:\n        - dhcp_agent\n        - l3_agent\n        - lb_agent\n        - metadata_agent\n        - ovn_metadata_agent\n        - ovn_vpn_agent\n        - ovs_agent\n        - sriov_agent\n      content: |\n        # neutron-rootwrap command filters for nodes on which neutron is\n        # expected to control network\n        #\n        # This file should be owned by (and only-writeable by) the root user\n\n        # format seems to be\n        # cmd-name: filter-name, raw-command, user, args\n\n        [Filters]\n\n        # This is needed because we should ping\n        # from inside a namespace which requires root\n        # _alt variants allow to match -c and -w in any order\n        #   (used by NeutronDebugAgent.ping_all)\n        ping: RegExpFilter, ping, root, ping, -w, \\d+, -c, \\d+, [0-9\\.]+\n        ping_alt: RegExpFilter, ping, root, ping, -c, \\d+, -w, \\d+, [0-9\\.]+\n        ping6: RegExpFilter, ping6, root, ping6, -w, \\d+, -c, \\d+, [0-9A-Fa-f:]+\n        ping6_alt: RegExpFilter, ping6, root, ping6, -c, \\d+, -w, \\d+, [0-9A-Fa-f:]+\n    dibbler:\n      pods:\n        - dhcp_agent\n        - l3_agent\n        - lb_agent\n        - metadata_agent\n        - ovn_metadata_agent\n        - ovn_vpn_agent\n        - ovs_agent\n        - sriov_agent\n      content: |\n        # neutron-rootwrap command filters for nodes on which neutron is\n        # expected to control network\n        #\n        # This file should be owned by (and only-writeable by) the root user\n\n        # format seems to be\n        # cmd-name: filter-name, raw-command, user, args\n\n        [Filters]\n\n        # Filters for the dibbler-based reference implementation of the pluggable\n        # Prefix Delegation driver. Other implementations using an alternative agent\n        # should include a similar filter in this folder.\n\n        # prefix_delegation_agent\n        dibbler-client: CommandFilter, dibbler-client, root\n    ipset_firewall:\n      pods:\n        - dhcp_agent\n        - l3_agent\n        - lb_agent\n        - metadata_agent\n        - ovn_metadata_agent\n        - ovn_vpn_agent\n        - ovs_agent\n        - sriov_agent\n      content: |\n        # neutron-rootwrap command filters for nodes on which neutron is\n        # expected to control network\n        #\n        # This file should be owned by (and only-writeable by) the root user\n\n        # format seems to be\n        # cmd-name: filter-name, raw-command, user, args\n\n        [Filters]\n        # neutron/agent/linux/iptables_firewall.py\n        #   \"ipset\", \"-A\", ...\n        ipset: CommandFilter, ipset, root\n    l3:\n      pods:\n        - dhcp_agent\n        - l3_agent\n        - lb_agent\n        - metadata_agent\n        - ovn_metadata_agent\n        - ovn_vpn_agent\n        - ovs_agent\n        - sriov_agent\n      content: |\n        # neutron-rootwrap command filters for nodes on which neutron is\n        # expected to control network\n        #\n        # This file should be owned by (and only-writeable by) the root user\n\n        # format seems to be\n        # cmd-name: filter-name, raw-command, user, args\n\n        [Filters]\n\n        # arping\n        arping: CommandFilter, arping, root\n\n        # l3_agent\n        sysctl: CommandFilter, sysctl, root\n        route: CommandFilter, route, root\n        radvd: CommandFilter, radvd, root\n\n        # haproxy\n        haproxy: RegExpFilter, haproxy, root, haproxy, -f, .*\n        kill_haproxy: KillFilter, root, haproxy, -15, -9, -HUP\n\n        # metadata proxy\n        metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root\n        # RHEL invocation of the metadata proxy will report /usr/bin/python\n        kill_metadata: KillFilter, root, python, -15, -9\n        kill_metadata2: KillFilter, root, python2, -15, -9\n        kill_metadata7: KillFilter, root, python2.7, -15, -9\n        kill_metadata3: KillFilter, root, python3, -15, -9\n        kill_metadata35: KillFilter, root, python3.5, -15, -9\n        kill_metadata36: KillFilter, root, python3.6, -15, -9\n        kill_metadata37: KillFilter, root, python3.7, -15, -9\n        kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -15, -9, -HUP\n        kill_radvd: KillFilter, root, /sbin/radvd, -15, -9, -HUP\n\n        # ip_lib\n        ip: IpFilter, ip, root\n        find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*\n        ip_exec: IpNetnsExecFilter, ip, root\n\n        # l3_tc_lib\n        l3_tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+\n        l3_tc_add_qdisc_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress\n        l3_tc_add_qdisc_egress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, root, handle, 1:, htb\n        l3_tc_show_filters: RegExpFilter, tc, root, tc, -p, -s, -d, filter, show, dev, .+, parent, .+, prio, 1\n        l3_tc_delete_filters: RegExpFilter, tc, root, tc, filter, del, dev, .+, parent, .+, prio, 1, handle, .+, u32\n        l3_tc_add_filter_ingress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, dst, .+, police, rate, .+, burst, .+, drop, flowid, :1\n        l3_tc_add_filter_egress:  RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, src, .+, police, rate, .+, burst, .+, drop, flowid, :1\n\n        # For ip monitor\n        kill_ip_monitor: KillFilter, root, ip, -9\n\n        # ovs_lib (if OVSInterfaceDriver is used)\n        ovs-vsctl: CommandFilter, ovs-vsctl, root\n\n        # iptables_manager\n        iptables-save: CommandFilter, iptables-save, root\n        iptables-restore: CommandFilter, iptables-restore, root\n        ip6tables-save: CommandFilter, ip6tables-save, root\n        ip6tables-restore: CommandFilter, ip6tables-restore, root\n\n        # Keepalived\n        keepalived: CommandFilter, keepalived, root\n        kill_keepalived: KillFilter, root, keepalived, -HUP, -15, -9\n\n        # l3 agent to delete floatingip's conntrack state\n        conntrack: CommandFilter, conntrack, root\n\n        # keepalived state change monitor\n        keepalived_state_change: CommandFilter, neutron-keepalived-state-change, root\n        # The following filters are used to kill the keepalived state change monitor.\n        # Since the monitor runs as a Python script, the system reports that the\n        # command of the process to be killed is python.\n        # TODO(mlavalle) These kill filters will be updated once we come up with a\n        # mechanism to kill using the name of the script being executed by Python\n        kill_keepalived_monitor_py: KillFilter, root, python, -15\n        kill_keepalived_monitor_py27: KillFilter, root, python2.7, -15\n        kill_keepalived_monitor_py3: KillFilter, root, python3, -15\n        kill_keepalived_monitor_py35: KillFilter, root, python3.5, -15\n        kill_keepalived_monitor_py36: KillFilter, root, python3.6, -15\n        kill_keepalived_monitor_py37: KillFilter, root, python3.7, -15\n    netns_cleanup:\n      pods:\n        - dhcp_agent\n        - l3_agent\n        - lb_agent\n        - metadata_agent\n        - ovn_metadata_agent\n        - ovn_vpn_agent\n        - ovs_agent\n        - sriov_agent\n        - netns_cleanup_cron\n      content: |\n        # neutron-rootwrap command filters for nodes on which neutron is\n        # expected to control network\n        #\n        # This file should be owned by (and only-writeable by) the root user\n\n        # format seems to be\n        # cmd-name: filter-name, raw-command, user, args\n\n        [Filters]\n\n        # netns-cleanup\n        netstat: CommandFilter, netstat, root\n    dhcp:\n      pods:\n        - dhcp_agent\n        - l3_agent\n        - lb_agent\n        - metadata_agent\n        - ovn_metadata_agent\n        - ovn_vpn_agent\n        - ovs_agent\n        - sriov_agent\n        - netns_cleanup_cron\n      content: |\n        # neutron-rootwrap command filters for nodes on which neutron is\n        # expected to control network\n        #\n        # This file should be owned by (and only-writeable by) the root user\n\n        # format seems to be\n        # cmd-name: filter-name, raw-command, user, args\n\n        [Filters]\n\n        # dhcp-agent\n        dnsmasq: CommandFilter, dnsmasq, root\n        # dhcp-agent uses kill as well, that's handled by the generic KillFilter\n        # it looks like these are the only signals needed, per\n        # neutron/agent/linux/dhcp.py\n        kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP, -15\n        kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP, -15\n\n        ovs-vsctl: CommandFilter, ovs-vsctl, root\n        ivs-ctl: CommandFilter, ivs-ctl, root\n        mm-ctl: CommandFilter, mm-ctl, root\n        dhcp_release: CommandFilter, dhcp_release, root\n        dhcp_release6: CommandFilter, dhcp_release6, root\n\n        # metadata proxy\n        metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root\n        # RHEL invocation of the metadata proxy will report /usr/bin/python\n        kill_metadata: KillFilter, root, python, -9\n        kill_metadata2: KillFilter, root, python2, -9\n        kill_metadata7: KillFilter, root, python2.7, -9\n        kill_metadata3: KillFilter, root, python3, -9\n        kill_metadata35: KillFilter, root, python3.5, -9\n        kill_metadata36: KillFilter, root, python3.6, -9\n        kill_metadata37: KillFilter, root, python3.7, -9\n\n        # ip_lib\n        ip: IpFilter, ip, root\n        find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*\n        ip_exec: IpNetnsExecFilter, ip, root\n    ebtables:\n      pods:\n        - dhcp_agent\n        - l3_agent\n        - lb_agent\n        - metadata_agent\n        - ovn_metadata_agent\n        - ovn_vpn_agent\n        - ovs_agent\n        - sriov_agent\n      content: |\n        # neutron-rootwrap command filters for nodes on which neutron is\n        # expected to control network\n        #\n        # This file should be owned by (and only-writeable by) the root user\n\n        # format seems to be\n        # cmd-name: filter-name, raw-command, user, args\n\n        [Filters]\n\n        ebtables: CommandFilter, ebtables, root\n    iptables_firewall:\n      pods:\n        - dhcp_agent\n        - l3_agent\n        - lb_agent\n        - metadata_agent\n        - ovn_metadata_agent\n        - ovn_vpn_agent\n        - ovs_agent\n        - sriov_agent\n      content: |\n        # neutron-rootwrap command filters for nodes on which neutron is\n        # expected to control network\n        #\n        # This file should be owned by (and only-writeable by) the root user\n\n        # format seems to be\n        # cmd-name: filter-name, raw-command, user, args\n\n        [Filters]\n\n        # neutron/agent/linux/iptables_firewall.py\n        #   \"iptables-save\", ...\n        iptables-save: CommandFilter, iptables-save, root\n        iptables-restore: CommandFilter, iptables-restore, root\n        ip6tables-save: CommandFilter, ip6tables-save, root\n        ip6tables-restore: CommandFilter, ip6tables-restore, root\n\n        # neutron/agent/linux/iptables_firewall.py\n        #   \"iptables\", \"-A\", ...\n        iptables: CommandFilter, iptables, root\n        ip6tables: CommandFilter, ip6tables, root\n\n        # neutron/agent/linux/iptables_firewall.py\n        sysctl: CommandFilter, sysctl, root\n\n        # neutron/agent/linux/ip_conntrack.py\n        conntrack: CommandFilter, conntrack, root\n    linuxbridge_plugin:\n      pods:\n        - dhcp_agent\n        - l3_agent\n        - lb_agent\n        - metadata_agent\n        - ovn_metadata_agent\n        - ovn_vpn_agent\n        - ovs_agent\n        - sriov_agent\n      content: |\n        # neutron-rootwrap command filters for nodes on which neutron is\n        # expected to control network\n        #\n        # This file should be owned by (and only-writeable by) the root user\n\n        # format seems to be\n        # cmd-name: filter-name, raw-command, user, args\n\n        [Filters]\n\n        # linuxbridge-agent\n        # unclear whether both variants are necessary, but I'm transliterating\n        # from the old mechanism\n        brctl: CommandFilter, brctl, root\n        bridge: CommandFilter, bridge, root\n\n        # ip_lib\n        ip: IpFilter, ip, root\n        find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*\n        ip_exec: IpNetnsExecFilter, ip, root\n\n        # tc commands needed for QoS support\n        tc_replace_tbf: RegExpFilter, tc, root, tc, qdisc, replace, dev, .+, root, tbf, rate, .+, latency, .+, burst, .+\n        tc_add_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress, handle, .+\n        tc_delete: RegExpFilter, tc, root, tc, qdisc, del, dev, .+, .+\n        tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+\n        tc_show_filters: RegExpFilter, tc, root, tc, filter, show, dev, .+, parent, .+\n        tc_add_filter: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, all, prio, .+, basic, police, rate, .+, burst, .+, mtu, .+, drop\n    openvswitch_plugin:\n      pods:\n        - dhcp_agent\n        - l3_agent\n        - lb_agent\n        - metadata_agent\n        - ovn_metadata_agent\n        - ovn_vpn_agent\n        - ovs_agent\n        - sriov_agent\n      content: |\n        # neutron-rootwrap command filters for nodes on which neutron is\n        # expected to control network\n        #\n        # This file should be owned by (and only-writeable by) the root user\n\n        # format seems to be\n        # cmd-name: filter-name, raw-command, user, args\n\n        [Filters]\n\n        # openvswitch-agent\n        # unclear whether both variants are necessary, but I'm transliterating\n        # from the old mechanism\n        ovs-vsctl: CommandFilter, ovs-vsctl, root\n        # NOTE(yamamoto): of_interface=native doesn't use ovs-ofctl\n        ovs-ofctl: CommandFilter, ovs-ofctl, root\n        ovs-appctl: CommandFilter, ovs-appctl, root\n        kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9\n        ovsdb-client: CommandFilter, ovsdb-client, root\n        xe: CommandFilter, xe, root\n\n        # ip_lib\n        ip: IpFilter, ip, root\n        find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*\n        ip_exec: IpNetnsExecFilter, ip, root\n\n        # needed for FDB extension\n        bridge: CommandFilter, bridge, root\n    privsep:\n      pods:\n        - dhcp_agent\n        - l3_agent\n        - lb_agent\n        - metadata_agent\n        - ovn_metadata_agent\n        - ovn_vpn_agent\n        - ovs_agent\n        - sriov_agent\n        - netns_cleanup_cron\n      content: |\n        # Command filters to allow privsep daemon to be started via rootwrap.\n        #\n        # This file should be owned by (and only-writeable by) the root user\n\n        [Filters]\n\n        # By installing the following, the local admin is asserting that:\n        #\n        # 1. The python module load path used by privsep-helper\n        #    command as root (as started by sudo/rootwrap) is trusted.\n        # 2. Any oslo.config files matching the --config-file\n        #    arguments below are trusted.\n        # 3. Users allowed to run sudo/rootwrap with this configuration(*) are\n        #    also allowed to invoke python \"entrypoint\" functions from\n        #    --privsep_context with the additional (possibly root) privileges\n        #    configured for that context.\n        #\n        # (*) ie: the user is allowed by /etc/sudoers to run rootwrap as root\n        #\n        # In particular, the oslo.config and python module path must not\n        # be writeable by the unprivileged user.\n\n        # oslo.privsep default neutron context\n        privsep: PathFilter, privsep-helper, root,\n         --config-file, /etc,\n         --privsep_context, neutron.privileged.default,\n         --privsep_sock_path, /\n\n        # NOTE: A second `--config-file` arg can also be added above. Since\n        # many neutron components are installed like that (eg: by devstack).\n        # Adjust to suit local requirements.\n    linux_vxlan:\n      pods:\n        - bagpipe_bgp\n      content: |\n        # bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is\n        # expected to control VXLAN Linux Bridge dataplane\n        #\n        # This file should be owned by (and only-writeable by) the root user\n\n        # format seems to be\n        # cmd-name: filter-name, raw-command, user, args\n\n        [Filters]\n\n        #\n        modprobe: CommandFilter, modprobe, root\n\n        #\n        brctl: CommandFilter, brctl, root\n        bridge: CommandFilter, bridge, root\n\n        # ip_lib\n        ip: IpFilter, ip, root\n        ip_exec: IpNetnsExecFilter, ip, root\n\n        # shell (for piped commands)\n        sh: CommandFilter, sh, root\n    mpls_ovs_dataplane:\n      pods:\n        - bagpipe_bgp\n      content: |\n        # bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is\n        # expected to control MPLS OpenVSwitch dataplane\n        #\n        # This file should be owned by (and only-writeable by) the root user\n\n        # format seems to be\n        # cmd-name: filter-name, raw-command, user, args\n\n        [Filters]\n\n        # openvswitch\n        ovs-vsctl: CommandFilter, ovs-vsctl, root\n        ovs-ofctl: CommandFilter, ovs-ofctl, root\n\n        # ip_lib\n        ip: IpFilter, ip, root\n        ip_exec: IpNetnsExecFilter, ip, root\n\n        # shell (for piped commands)\n        sh: CommandFilter, sh, root\n  neutron:\n    DEFAULT:\n      metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy\n      log_config_append: /etc/neutron/logging.conf\n      # NOTE(portdirect): the bind port should not be defined, and is manipulated\n      # via the endpoints section.\n      bind_port: null\n      default_availability_zones: nova\n      api_workers: 1\n      rpc_workers: 4\n      allow_overlapping_ips: True\n      state_path: /var/lib/neutron\n      # core_plugin can be: ml2, calico\n      core_plugin: ml2\n      # service_plugin can be: router, empty for calico,\n      # networking_ovn.l3.l3_ovn.OVNL3RouterPlugin for OVN\n      service_plugins: router\n      allow_automatic_l3agent_failover: True\n      l3_ha: True\n      max_l3_agents_per_router: 2\n      l3_ha_network_type: vxlan\n      network_auto_schedule: True\n      router_auto_schedule: True\n      # (NOTE)portdirect: if unset this is populated dynamically from the value in\n      # 'network.backend' to sane defaults.\n      interface_driver: null\n    oslo_concurrency:\n      lock_path: /var/lock\n    database:\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    agent:\n      root_helper: sudo /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf\n      root_helper_daemon: sudo /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf\n    oslo_messaging_notifications:\n      driver: messagingv2\n    oslo_messaging_rabbit:\n      rabbit_ha_queues: true\n    oslo_middleware:\n      enable_proxy_headers_parsing: true\n    oslo_policy:\n      policy_file: /etc/neutron/policy.yaml\n    ovn:\n      ovn_metadata_enabled: true\n    nova:\n      auth_type: password\n      auth_version: v3\n      endpoint_type: internal\n    placement:\n      auth_type: password\n      auth_version: v3\n      endpoint_type: internal\n    designate:\n      auth_type: password\n      auth_version: v3\n      endpoint_type: internal\n      allow_reverse_dns_lookup: true\n    ironic:\n      auth_type: password\n      auth_version: v3\n      endpoint_type: internal\n    keystone_authtoken:\n      service_token_roles: service\n      service_token_roles_required: true\n      memcache_security_strategy: ENCRYPT\n      auth_type: password\n      auth_version: v3\n      service_type: network\n    octavia:\n      request_poll_timeout: 3000\n  logging:\n    loggers:\n      keys:\n        - root\n        - neutron\n        - neutron_taas\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: WARNING\n      handlers: 'null'\n    logger_neutron:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: neutron\n    logger_neutron_taas:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: neutron_taas\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    formatter_default:\n      format: \"%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n  plugins:\n    ml2_conf:\n      ml2:\n        extension_drivers: port_security\n        # (NOTE)portdirect: if unset this is populated dyanmicly from the value\n        # in 'network.backend' to sane defaults.\n        mechanism_drivers: null\n        type_drivers: flat,vlan,vxlan,local\n        tenant_network_types: vxlan\n      ml2_type_vxlan:\n        vni_ranges: 1:1000\n        vxlan_group: 239.1.1.1\n      ml2_type_flat:\n        flat_networks: \"*\"\n      # If you want to use the external network as a tagged provider network,\n      # a range should be specified including the intended VLAN target\n      # using ml2_type_vlan.network_vlan_ranges:\n      # ml2_type_vlan:\n      #   network_vlan_ranges: \"external:1100:1110\"\n      ml2_type_geneve:\n        vni_ranges: 1:65536\n        max_header_size: 38\n      agent:\n        extensions: \"\"\n      ovn: {}\n    ml2_conf_sriov: null\n    taas:\n      taas:\n        enabled: False\n    openvswitch_agent:\n      agent:\n        tunnel_types: vxlan\n        l2_population: True\n        arp_responder: True\n      ovs:\n        bridge_mappings: \"external:br-ex\"\n      securitygroup:\n        firewall_driver: neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver\n    linuxbridge_agent:\n      linux_bridge:\n        # To define Flat and VLAN connections, in LB we can assign\n        # specific interface to the flat/vlan network name using:\n        # physical_interface_mappings: \"external:eth3\"\n        # Or we can set the mapping between the network and bridge:\n        bridge_mappings: \"external:br-ex\"\n        # The two above options are exclusive, do not use both of them at once\n      securitygroup:\n        firewall_driver: iptables\n      vxlan:\n        l2_population: True\n        arp_responder: True\n    macvtap_agent: null\n    sriov_agent:\n      securitygroup:\n        firewall_driver: neutron.agent.firewall.NoopFirewallDriver\n      sriov_nic:\n        physical_device_mappings: physnet2:enp3s0f1\n        # NOTE: do not use null here, use an empty string\n        exclude_devices: \"\"\n  dhcp_agent:\n    DEFAULT:\n      # (NOTE)portdirect: if unset this is populated dyanmicly from the value in\n      # 'network.backend' to sane defaults.\n      interface_driver: null\n      dnsmasq_config_file: /etc/neutron/dnsmasq.conf\n      force_metadata: True\n    # NOTE(mnaser): This has to be here in order for the DHCP agent to work with OVN.\n    ovs: {}\n  dnsmasq: |\n      #no-hosts\n      #port=5353\n      #cache-size=500\n      #no-negcache\n      #dns-forward-max=100\n      #resolve-file=\n      #strict-order\n      #bind-interface\n      #bind-dynamic\n      #domain=\n      #dhcp-range=10.10.10.10,10.10.10.100,24h\n      #dhcp-lease-max=150\n      #dhcp-host=11:22:33:44:55:66,ignore\n      #dhcp-option=3,10.10.10.1\n      #dhcp-option-force=26,1450\n\n  neutron_vpnaas: null\n  ovn_vpn_agent:\n    DEFAULT:\n      interface_driver: openvswitch\n    vpnagent:\n      vpn_device_driver: neutron_vpnaas.services.vpn.device_drivers.ovn_ipsec.OvnStrongSwanDriver\n    ovs:\n      ovsdb_connection: unix:/run/openvswitch/db.sock\n  l3_agent:\n    DEFAULT:\n      # (NOTE)portdirect: if unset this is populated dyanmicly from the value in\n      # 'network.backend' to sane defaults.\n      interface_driver: null\n      agent_mode: legacy\n  metering_agent: null\n  metadata_agent:\n    DEFAULT:\n      log_config_append: /etc/neutron/logging.conf\n      # we cannot change the proxy socket path as it is declared\n      # as a hostPath volume from agent daemonsets\n      metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy\n      metadata_proxy_shared_secret: \"password\"\n    cache:\n      enabled: true\n      backend: dogpile.cache.memcached\n  bagpipe_bgp: {}\n  ovn_metadata_agent:\n    DEFAULT:\n      # we cannot change the proxy socket path as it is declared\n      # as a hostPath volume from agent daemonsets\n      metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy\n      metadata_proxy_shared_secret: \"password\"\n      metadata_workers: 2\n    cache:\n      enabled: true\n      backend: dogpile.cache.memcached\n    ovs:\n      ovsdb_connection: unix:/run/openvswitch/db.sock\n  bgp_dragent: {}\n\n  rabbitmq:\n    # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones\n    policies:\n      - vhost: \"neutron\"\n        name: \"ha_ttl_neutron\"\n        definition:\n          # mirror messges to other nodes in rmq cluster\n          ha-mode: \"all\"\n          ha-sync-mode: \"automatic\"\n          # 70s\n          message-ttl: 70000\n        priority: 0\n        apply-to: all\n        pattern: '^(?!(amq\\.|reply_)).*'\n  ## NOTE: \"besteffort\" is meant for dev env with mixed compute type only.\n  ##       This helps prevent sriov init script from failing due to mis-matched NIC\n  ##       For prod env, target NIC should match and init script should fail otherwise.\n  ## sriov_init:\n  ##   - besteffort\n  sriov_init:\n    -\n  # auto_bridge_add is a table of \"bridge: interface\" pairs\n  # To automatically add a physical interfaces to a specific bridges,\n  # for example eth3 to bridge br-physnet1, if0 to br0 and iface_two\n  # to br1 do something like:\n  #\n  # auto_bridge_add:\n  #   br-physnet1: eth3\n  #   br0: if0\n  #   br1: iface_two\n  # br-ex will be added by default\n  auto_bridge_add:\n    br-ex: null\n\n  # Network off-loading configuration\n  netoffload:\n    enabled: false\n    asap2:\n      # - dev: enp97s0f0\n      #   vfs: 16\n\n  # configuration of OVS DPDK bridges and NICs\n  # this is a separate section and not part of the auto_bridge_add section\n  # because additional parameters are needed\n  ovs_dpdk:\n    enabled: false\n    # setting update_dpdk_bond_config to true will have default behavior,\n    # which may cause disruptions in ovs dpdk traffic in case of neutron\n    # ovs agent restart or when dpdk nic/bond configurations are changed.\n    # Setting this to false will configure dpdk in the first run and\n    # disable nic/bond config on event of restart or config update.\n    update_dpdk_bond_config: true\n    driver: uio_pci_generic\n    # In case bonds are configured, the nics which are part of those bonds\n    # must NOT be provided here.\n    nics:\n      - name: dpdk0\n        # Optionally, instead of using pci_id you can use the name of\n        # the interface. If both are used, pci_id has presedence.\n        # iface: eth0\n        pci_id: '0000:05:00.0'\n        # Set VF Index in case some particular VF(s) need to be\n        # used with ovs-dpdk.\n        # vf_index: 0\n        bridge: br-phy\n        migrate_ip: true\n        n_rxq: 2\n        n_txq: 2\n        pmd_rxq_affinity: \"0:3,1:27\"\n        ofport_request: 1\n        # optional parameters for tuning the OVS DPDK config\n        # in alignment with the available hardware resources\n        # mtu: 2000\n        # n_rxq_size: 1024\n        # n_txq_size: 1024\n        # vhost-iommu-support: true\n    bridges:\n      - name: br-phy\n      # optional parameter, in case tunnel traffic needs to be transported over a vlan underlay\n      # - tunnel_underlay_vlan: 45\n    # Optional parameter for configuring bonding in OVS-DPDK\n    #   - name: br-phy-bond0\n    # bonds:\n    #   - name: dpdkbond0\n    #     bridge: br-phy-bond0\n    #     # The IP from the first nic in nics list shall be used\n    #     migrate_ip: true\n    #     mtu: 2000\n    #     # Please note that n_rxq is set for each NIC individually\n    #     # rather than denoting the total number of rx queues for\n    #     # the bond as a whole. So setting n_rxq = 2 below for ex.\n    #     # would be 4 rx queues in total for the bond.\n    #     # Same for n_txq\n    #     n_rxq: 2\n    #     n_txq: 2\n    #     ofport_request: 1\n    #     n_rxq_size: 1024\n    #     n_txq_size: 1024\n    #     vhost-iommu-support: true\n    #     ovs_options: \"bond_mode=active-backup\"\n    #     nics:\n    #       - name: dpdk_b0s0\n    #         # Optionally, instead of using pci_id you can use the name of\n    #         # the interface. If both are used, pci_id has presedence.\n    #         # iface: eth0\n    #         pci_id: '0000:06:00.0'\n    #         pmd_rxq_affinity: \"0:3,1:27\"\n    #         # Set VF Index in case some particular VF(s) need to be\n    #         # used with ovs-dpdk. In which case pci_id of PF must be\n    #         # provided above.\n    #         # vf_index: 0\n    #       - name: dpdk_b0s1\n    #         pci_id: '0000:07:00.0'\n    #         pmd_rxq_affinity: \"0:3,1:27\"\n    #         # Set VF Index in case some particular VF(s) need to be\n    #         # used with ovs-dpdk. In which case pci_id of PF must be\n    #         # provided above.\n    #         # vf_index: 0\n    #\n    # Set the log level for each target module (default level is always dbg)\n    # Supported log levels are: off, emer, err, warn, info, dbg\n    #\n    # modules:\n    #   - name: dpdk\n    #     log_level: info\n\n# Names of secrets used by bootstrap and environmental checks\nsecrets:\n  identity:\n    admin: neutron-keystone-admin\n    neutron: neutron-keystone-user\n    nova: neutron-keystone-nova\n    placement: neutron-keystone-placement\n    designate: neutron-keystone-designate\n    ironic: neutron-keystone-ironic\n    test: neutron-keystone-test\n  oslo_db:\n    admin: neutron-db-admin\n    neutron: neutron-db-user\n  oslo_messaging:\n    admin: neutron-rabbitmq-admin\n    neutron: neutron-rabbitmq-user\n  tls:\n    compute_metadata:\n      metadata:\n        internal: metadata-tls-metadata\n    network:\n      server:\n        public: neutron-tls-public\n        internal: neutron-tls-server\n  oci_image_registry:\n    neutron: neutron-oci-image-registry\n\n# typically overridden by environmental\n# values, but should include all endpoints\n# required by this chart\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      neutron:\n        username: neutron\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n        secret:\n          tls:\n            internal: mariadb-tls-direct\n      neutron:\n        username: neutron\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /neutron\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_messaging:\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n        secret:\n          tls:\n            internal: rabbitmq-tls-direct\n      neutron:\n        username: neutron\n        password: password\n    statefulset:\n      replicas: 2\n      name: rabbitmq-rabbitmq\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /neutron\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n  oslo_cache:\n    auth:\n      # NOTE(portdirect): this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  compute:\n    name: nova\n    hosts:\n      default: nova-api\n      public: nova\n    host_fqdn_override:\n      default: null\n    path:\n      default: \"/v2.1/%(tenant_id)s\"\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 8774\n        public: 80\n      novncproxy:\n        default: 6080\n  compute_metadata:\n    name: nova\n    hosts:\n      default: nova-metadata\n      public: metadata\n    host_fqdn_override:\n      default: null\n    path:\n      default: /\n    scheme:\n      default: 'http'\n    port:\n      metadata:\n        default: 8775\n        public: 80\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      neutron:\n        role: admin,service\n        region_name: RegionOne\n        username: neutron\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      nova:\n        role: admin,service\n        region_name: RegionOne\n        project_name: service\n        username: neutron_nova\n        password: password\n        user_domain_name: service\n        project_domain_name: service\n      placement:\n        role: admin,service\n        region_name: RegionOne\n        project_name: service\n        username: neutron_placement\n        password: password\n        user_domain_name: service\n        project_domain_name: service\n      designate:\n        role: admin,service\n        region_name: RegionOne\n        project_name: service\n        username: neutron_designate\n        password: password\n        user_domain_name: service\n        project_domain_name: service\n      ironic:\n        role: admin,service\n        region_name: RegionOne\n        project_name: service\n        username: neutron_ironic\n        password: password\n        user_domain_name: service\n        project_domain_name: service\n      test:\n        role: admin\n        region_name: RegionOne\n        username: neutron-test\n        password: password\n        project_name: test\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  network:\n    name: neutron\n    hosts:\n      default: neutron-server\n      public: neutron\n    host_fqdn_override:\n      default: null\n      # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public\n      # endpoints using the following format:\n      # public:\n      #   host: null\n      #   tls:\n      #     crt: null\n      #     key: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n      service: 'http'\n    port:\n      api:\n        default: 9696\n        public: 80\n        service: 9696\n  load_balancer:\n    name: octavia\n    hosts:\n      default: octavia-api\n      public: octavia\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      api:\n        default: 9876\n        public: 80\n  fluentd:\n    namespace: osh-infra\n    name: fluentd\n    hosts:\n      default: fluentd-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: 'http'\n    port:\n      service:\n        default: 24224\n      metrics:\n        default: 24220\n  dns:\n    name: designate\n    hosts:\n      default: designate-api\n      public: designate\n    host_fqdn_override:\n      default: null\n    path:\n      default: /\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 9001\n        public: 80\n  baremetal:\n    name: ironic\n    hosts:\n      default: ironic-api\n      public: ironic\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 6385\n        public: 80\n  # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress\n  # They are using to enable the Egress K8s network policy.\n  kube_dns:\n    namespace: kube-system\n    name: kubernetes-dns\n    hosts:\n      default: kube-dns\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: http\n    port:\n      dns:\n        default: 53\n        protocol: UDP\n  ingress:\n    namespace: null\n    name: ingress\n    hosts:\n      default: ingress\n    port:\n      ingress:\n        default: 80\n\nnetwork_policy:\n  neutron:\n    # TODO(lamt): Need to tighten this ingress for security.\n    ingress:\n      - {}\n    egress:\n      - {}\n\nhealth_probe:\n  logging:\n    level: ERROR\n\njobs:\n  ovn_db_sync:\n    cron: \"*/5 * * * *\"\n    sync_mode: log\n    history:\n      success: 3\n      failed: 1\n\ntls:\n  identity: false\n  oslo_messaging: false\n  oslo_db: false\n\nmanifests:\n  certificates: false\n  cron_job_ovn_db_sync: false\n  configmap_bin: true\n  configmap_etc: true\n  daemonset_dhcp_agent: true\n  daemonset_l3_agent: true\n  daemonset_lb_agent: true\n  daemonset_metadata_agent: true\n  daemonset_ovs_agent: true\n  daemonset_sriov_agent: true\n  daemonset_l2gw_agent: false\n  daemonset_bagpipe_bgp: false\n  daemonset_bgp_dragent: false\n  daemonset_netns_cleanup_cron: true\n  daemonset_ovn_metadata_agent: false\n  daemonset_ovn_vpn_agent: false\n  deployment_ironic_agent: false\n  deployment_server: true\n  deployment_rpc_server: true\n  ingress_server: true\n  job_bootstrap: true\n  job_db_init: true\n  job_db_sync: true\n  job_db_drop: false\n  job_image_repo_sync: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  job_rabbit_init: true\n  pdb_server: true\n  pod_rally_test: true\n  network_policy: false\n  secret_db: true\n  secret_ingress_tls: true\n  secret_keystone: true\n  secret_ks_etc: true\n  secret_rabbitmq: true\n  secret_registry: true\n  service_ingress_server: true\n  service_server: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "nfs-provisioner/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v2.2.1\ndescription: OpenStack-Helm NFS\nname: nfs-provisioner\nversion: 2025.2.0\nhome: https://github.com/kubernetes-incubator/external-storage\nsources:\n  - https://github.com/kubernetes-incubator/external-storage\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "nfs-provisioner/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: nfs-bin\ndata:\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "nfs-provisioner/templates/deployment.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := printf \"%s-%s\" .Release.Name \"nfs-provisioner\" }}\n{{ tuple $envAll \"nfs\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - ''\n    resources:\n      - persistentvolumes\n    verbs:\n      - get\n      - list\n      - watch\n      - create\n      - delete\n  - apiGroups:\n      - ''\n    resources:\n      - persistentvolumeclaims\n    verbs:\n      - get\n      - list\n      - watch\n      - update\n  - apiGroups:\n      - storage.k8s.io\n    resources:\n      - storageclasses\n    verbs:\n      - get\n      - list\n      - watch\n  - apiGroups:\n      - ''\n    resources:\n      - events\n    verbs:\n      - list\n      - watch\n      - create\n      - update\n      - patch\n  - apiGroups:\n      - ''\n    resources:\n      - services\n    verbs:\n      - get\n  - apiGroups:\n      - ''\n    resources:\n      - endpoints\n    verbs:\n      - get\n      - create\n      - update\n      - patch\n  - apiGroups:\n      - policy\n    resources:\n      - podsecuritypolicies\n    resourceNames:\n      - nfs-provisioner\n    verbs:\n      - use\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ .Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $serviceAccountName }}\n  apiGroup: rbac.authorization.k8s.io\n---\nkind: Deployment\napiVersion: apps/v1\nmetadata:\n  name: nfs-provisioner\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"nfs\" \"provisioner\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.server }}\n  strategy:\n    type: Recreate\n  selector:\n    matchLabels:\n{{ tuple $envAll \"nfs\" \"provisioner\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"nfs\" \"provisioner\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"nfs\" \"provisioner\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.nfs.node_selector_key }}: {{ .Values.labels.nfs.node_selector_value | quote }}\n      initContainers:\n{{ tuple $envAll \"nfs\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: nfs-provisioner\n{{ tuple $envAll \"nfs_provisioner\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.server | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          securityContext:\n            capabilities:\n              add:\n                - DAC_READ_SEARCH\n                - SYS_RESOURCE\n          ports:\n            - name: nfs\n              containerPort: 2049\n            - name: nfs-udp\n              containerPort: 2049\n              protocol: UDP\n            - name: mountd\n              containerPort: 20048\n            - name: mountd-udp\n              containerPort: 20048\n              protocol: UDP\n            - name: rpcbind\n              containerPort: 111\n            - name: rpcbind-udp\n              containerPort: 111\n              protocol: UDP\n            - name: port-662\n              containerPort: 662\n            - name: port-662-udp\n              containerPort: 662\n              protocol: UDP\n            - name: port-875\n              containerPort: 875\n            - name: port-875-udp\n              containerPort: 875\n              protocol: UDP\n            - name: port-32803\n              containerPort: 32803\n            - name: port-32803-udp\n              containerPort: 32803\n              protocol: UDP\n          env:\n            - name: POD_IP\n              valueFrom:\n                fieldRef:\n                  fieldPath: status.podIP\n            - name: SERVICE_NAME\n              value: {{ tuple \"nfs\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n            - name: POD_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n          args:\n            {{ if empty .Values.storageclass.provisioner -}}\n            - \"-provisioner=nfs/{{ .Release.Name }}\"\n            {{- else -}}\n            - \"-provisioner={{ .Values.storageclass.provisioner }}\"\n            {{- end }}\n            - \"-grace-period=10\"\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: export-volume\n              mountPath: /export\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: export-volume\n          {{- if eq .Values.storage.type \"persistentVolumeClaim\" }}\n          persistentVolumeClaim:\n            {{ if empty .Values.storage.persistentVolumeClaim.name -}}\n            claimName: {{ .Release.Name }}\n            {{- else -}}\n            claimName: {{ .Values.storage.persistentVolumeClaim.name }}\n            {{- end }}\n          {{- else if eq .Values.storage.type \"hostPath\" }}\n          hostPath:\n            path: {{ .Values.storage.hostPath.path }}\n          {{- end }}\n{{- end }}\n"
  },
  {
    "path": "nfs-provisioner/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "nfs-provisioner/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"nfs-provisioner\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "nfs-provisioner/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "nfs-provisioner/templates/service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service }}\n{{- $envAll := . }}\n---\nkind: Service\napiVersion: v1\nmetadata:\n  name: {{ tuple \"nfs\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  labels:\n{{ tuple $envAll \"nfs\" \"provisioner\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  ports:\n    - name: nfs\n      port: 2049\n    - name: nfs-udp\n      port: 2049\n      protocol: UDP\n    - name: mountd\n      port: 20048\n    - name: mountd-udp\n      port: 20048\n      protocol: UDP\n    - name: rpcbind\n      port: 111\n    - name: rpcbind-udp\n      port: 111\n      protocol: UDP\n    - name: port-662\n      port: 662\n    - name: port-662-udp\n      port: 662\n      protocol: UDP\n    - name: port-875\n      port: 875\n    - name: port-875-udp\n      port: 875\n      protocol: UDP\n    - name: port-32803\n      port: 32803\n    - name: port-32803-udp\n      port: 32803\n      protocol: UDP\n  selector:\n{{ tuple $envAll \"nfs\" \"provisioner\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "nfs-provisioner/templates/storage_class.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.storage_class }}\n{{- $envAll := . }}\n---\nkind: StorageClass\napiVersion: storage.k8s.io/v1\nmetadata:\n  {{ if empty .Values.storageclass.name -}}\n  name: {{ .Release.Name }}\n  {{- else -}}\n  name: {{ .Values.storageclass.name }}\n  {{- end }}\n{{ if empty .Values.storageclass.provisioner -}}\nprovisioner: nfs/{{ .Release.Name }}\n{{- else -}}\nprovisioner: {{ .Values.storageclass.provisioner }}\n{{- end }}\nparameters:\n  mountOptions: vers=4.1\n{{- end }}\n"
  },
  {
    "path": "nfs-provisioner/templates/volume_claim.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.volume_claim }}\n{{- if eq .Values.storage.type \"persistentVolumeClaim\" }}\n{{- $envAll := . }}\n---\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  {{ if empty .Values.storage.persistentVolumeClaim.name -}}\n  name: {{ .Release.Name }}\n  {{- else -}}\n  name: {{ .Values.storage.persistentVolumeClaim.name }}\n  {{- end }}\nspec:\n  accessModes:\n    - {{ .Values.storage.persistentVolumeClaim.access_mode }}\n  resources:\n    requests:\n      storage: {{ .Values.storage.persistentVolumeClaim.size }}\n  storageClassName: {{ .Values.storage.persistentVolumeClaim.class_name }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "nfs-provisioner/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for NFS.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\npod:\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  replicas:\n    # only 1 replica currently supported\n    server: 1\n  resources:\n    enabled: false\n    server:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nimages:\n  tags:\n    nfs_provisioner: quay.io/kubernetes_incubator/nfs-provisioner:v2.3.0\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nstorage:\n  type: hostPath\n  hostPath:\n    path: /var/lib/openstack-helm/nfs\n  persistentVolumeClaim:\n    access_mode: ReadWriteOnce\n    class_name: general\n    # NOTE(portdirect): Unless explicity set the PV name will be populated to\n    # match \"{{ .Release.Name }}\".\n    name: null\n    size: 10Gi\n\nlabels:\n  nfs:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nstorageclass:\n  # NOTE(portdirect): Unless explicity set the provisioner name will be generated\n  # with the format \"nfs/{{ .Release.Name }}\"\n  provisioner: null\n  # NOTE(portdirect): Unless explicity set the PV name will be populated to\n  # match \"{{ .Release.Name }}\".\n  name: null\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - nfs-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    nfs:\n      services: null\n\nsecrets:\n  oci_image_registry:\n    nfs-provisioner: nfs-provisioner-oci-image-registry-key\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      nfs-provisioner:\n        username: nfs-provisioner\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  nfs:\n    hosts:\n      default: nfs-provisioner\n    host_fqdn_override:\n      default: null\n    path: null\n    scheme: null\n    port:\n      nfs:\n        default: null\n\nmanifests:\n  configmap_bin: true\n  deployment: true\n  job_image_repo_sync: true\n  secret_registry: true\n  service: true\n  storage_class: true\n  volume_claim: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "nova/.helmignore",
    "content": "values_overrides\n"
  },
  {
    "path": "nova/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Nova\nname: nova\nversion: 2025.2.0\nhome: https://docs.openstack.org/nova/latest/\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Nova/OpenStack_Project_Nova_vertical.png\nsources:\n  - https://opendev.org/openstack/nova\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "nova/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport HOME=/tmp\n\n{{ if .Values.bootstrap.structured.flavors.enabled }}\n{{- range $i, $params := .Values.bootstrap.structured.flavors.options }}\n{\nopenstack flavor show {{ $params.name }} || \\\n  openstack flavor create \\\n  {{- range $key, $val := $params }}\n    {{- if ne $key \"name\" }}\n      {{- if eq $key \"extra_specs\" }}\n        {{- if kindIs \"slice\" $val }}\n          {{- range $idx, $spec := $val }}\n  --property {{ $spec }} \\\n          {{- end }}\n        {{- end }}\n      {{- else if eq $key \"is_public\" }}\n        {{- if $val }}\n  --public \\\n        {{- else if not $val }}\n  --private \\\n        {{- end }}\n      {{- else }}\n  --{{ $key }} {{ $val }} \\\n      {{- end }}\n    {{- end }}\n  {{- end }}\n  {{ $params.name }}\n} &\n{{ end }}\nwait\n{{ end }}\n\n{{ if .Values.bootstrap.wait_for_computes.enabled }}\n{{ .Values.bootstrap.wait_for_computes.scripts.wait_script }}\n{{ else }}\necho 'Wait for Computes script not enabled'\n{{ end }}\n\n{{ .Values.bootstrap.script | default \"echo 'No other bootstrap customizations found.'\" }}\n"
  },
  {
    "path": "nova/templates/bin/_cell-setup-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n{{- if .Values.jobs.cell_setup.extended_wait.enabled }}\niteration={{ .Values.jobs.cell_setup.extended_wait.iteration }}\nduration={{ .Values.jobs.cell_setup.extended_wait.duration }}\nextra_wait=true\n# Init for case wait_for_computes is not enabled. It'll have\n# the same effect as the original code that checks for at\n# least one compute is registered\nexpected_computes=1\n\nif [[ -f /tmp/compute_nodes.txt ]]\nthen\n  expected_computes=$(cat /tmp/compute_nodes.txt | wc -w)\nfi\n\nwhile [[ \"$extra_wait\" == true ]]\ndo\n  nova_computes=$(openstack compute service list --service nova-compute -f value -c State)\n\n  if [[ -z \"$(echo $nova_computes | grep down)\" ]]\n  then\n    # No more down. Although all present computes are up,\n    # the number of present computes may not be the total\n    # expected number of computes as some of the remaining\n    # computes may take a bit longer to register/join.\n    actual_computes=$(echo $nova_computes | wc -w)\n    if [[ \"$actual_computes\" -ge \"$expected_computes\" ]]\n    then\n      # All expected nodes are up\n      extra_wait=false\n    fi\n  fi\n\n  if [[ \"$extra_wait\" == true ]]\n  then\n    sleep \"$duration\"\n\n    if [[ \"$iteration\" -gt 1 ]]\n    then\n      ((iteration=iteration-1))\n    else\n      extra_wait=false\n\n      # List out the info to see whether any nodes is still down\n      openstack compute service list --service nova-compute\n    fi\n  fi\ndone\n{{- end }}\n\nuntil openstack compute service list --service nova-compute -f value -c State | grep -q \"^up$\" ;do\n  echo \"Waiting for Nova Compute processes to register\"\n  sleep 10\ndone\n\n{{- if .Values.jobs.cell_setup.extra_command }}\n{{ .Values.jobs.cell_setup.extra_command }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/bin/_cell-setup.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nNOVA_VERSION=$(nova-manage --version 2>&1 | grep -Eo '[0-9]+[.][0-9]+[.][0-9]+')\n\n# NOTE(portdirect): check if nova fully supports cells v2, and manage\n# accordingly. Support was complete in ocata (V14.x.x).\n\nif [ \"${NOVA_VERSION%%.*}\" -gt \"14\" ]; then\n  nova-manage cell_v2 discover_hosts --verbose\nfi\n"
  },
  {
    "path": "nova/templates/bin/_ceph-admin-keyring.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport HOME=/tmp\n\ncat > /etc/ceph/ceph.client.admin.keyring << EOF\n[client.admin]\n{{- if .Values.conf.ceph.admin_keyring }}\n    key = {{ .Values.conf.ceph.admin_keyring }}\n{{- else }}\n    key = $(cat /tmp/client-keyring)\n{{- end }}\nEOF\n\nexit 0\n"
  },
  {
    "path": "nova/templates/bin/_ceph-keyring.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexport HOME=/tmp\n\ncp -vf /etc/ceph/ceph.conf.template /etc/ceph/ceph.conf\n\nKEYRING=/etc/ceph/ceph.client.${CEPH_CINDER_USER}.keyring\n{{- if .Values.conf.ceph.cinder.keyring }}\ncat > ${KEYRING} <<EOF\n[client.{{ .Values.conf.ceph.cinder.user }}]\n    key = {{ .Values.conf.ceph.cinder.keyring }}\nEOF\n{{- else }}\nif ! [ \"x${CEPH_CINDER_USER}\" == \"xadmin\" ]; then\n  #\n  # If user is not client.admin, check if it already exists. If not create\n  # the user. If the cephx user does not exist make sure the caps are set\n  # according to best practices\n  #\n  if USERINFO=$(ceph auth get client.${CEPH_CINDER_USER}); then\n    echo \"Cephx user client.${CEPH_CINDER_USER} already exist\"\n    echo \"Update user client.${CEPH_CINDER_USER} caps\"\n    ceph auth caps client.${CEPH_CINDER_USER} \\\n       mon \"profile rbd\" \\\n       osd \"profile rbd\"\n    ceph auth get client.${CEPH_CINDER_USER} -o ${KEYRING}\n  else\n    echo \"Creating Cephx user client.${CEPH_CINDER_USER}\"\n    ceph auth get-or-create client.${CEPH_CINDER_USER} \\\n      mon \"profile rbd\" \\\n      osd \"profile rbd\" \\\n      -o ${KEYRING}\n  fi\n  rm -f /etc/ceph/ceph.client.admin.keyring\nfi\n{{- end }}\n"
  },
  {
    "path": "nova/templates/bin/_db-archive-deleted-row.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec nova-manage db archive_deleted_rows \\\n{{- if .Values.conf.archive_deleted_rows.until_completion }}\n   --until-complete \\\n{{- end}}\n{{- if .Values.conf.archive_deleted_rows.purge_deleted_rows }}\n   --purge \\\n{{- end }}\n{{- if .Values.conf.archive_deleted_rows.all_cells }}\n   --all-cells \\\n{{- end}}\n{{- if .Values.conf.archive_deleted_rows.max_rows.enabled }}\n   --max_rows {{ .Values.conf.archive_deleted_rows.max_rows.rows }} \\\n{{- end }}\n{{- if .Values.conf.archive_deleted_rows.before.enabled }}\n   --before \"{{ .Values.conf.archive_deleted_rows.before.date }}\" \\\n{{- end }}\n   --verbose\n"
  },
  {
    "path": "nova/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nNOVA_VERSION=$(nova-manage --version 2>&1 | grep -Eo '[0-9]+[.][0-9]+[.][0-9]+')\n\nfunction manage_cells () {\n  # NOTE(portdirect): check if nova fully supports cells v2, and manage\n  # accordingly. Support was complete in ocata (V14.x.x).\n  if [ \"${NOVA_VERSION%%.*}\" -gt \"14\" ]; then\n    nova-manage cell_v2 map_cell0\n    nova-manage cell_v2 list_cells | grep -q \" cell1 \" || \\\n      nova-manage cell_v2 create_cell --name=cell1 --verbose\n\n    CELL0_ID=$(nova-manage cell_v2 list_cells | awk -F '|' '/ cell0 / { print $3 }' | tr -d ' ')\n    CELL1_ID=$(nova-manage cell_v2 list_cells | awk -F '|' '/ cell1 / { print $3 }' | tr -d ' ')\n    set +x\n\n    CELL0_TRANSPORT=$(nova-manage cell_v2 list_cells | awk -F '|' '/ cell0 / { print $4 }' | tr -d ' ')\n    if [ -z \"${DB_CONNECTION_CELL0}\" ]; then\n      echo \"ERROR: missing DB_CONNECTION_CELL0\"\n      exit 1\n    fi\n    nova-manage cell_v2 update_cell \\\n      --cell_uuid=\"${CELL0_ID}\" \\\n      --name=\"cell0\" \\\n      --transport-url=\"${CELL0_TRANSPORT}\" \\\n      --database_connection=\"${DB_CONNECTION_CELL0}\"\n\n    for VAR in TRANSPORT_URL DB_CONNECTION; do\n      if [ -z \"${!VAR}\" ]; then\n        echo \"ERROR: missing $VAR variable\"\n        exit 1\n      fi\n    done\n    nova-manage cell_v2 update_cell \\\n      --cell_uuid=\"${CELL1_ID}\" \\\n      --name=\"cell1\" \\\n      --transport-url=\"${TRANSPORT_URL}\" \\\n      --database_connection=\"${DB_CONNECTION}\"\n    set -x\n  fi\n}\n\nnova-manage api_db sync\nmanage_cells\n\nnova-manage db sync\n\nnova-manage db online_data_migrations\n\necho 'Finished DB migrations'\n"
  },
  {
    "path": "nova/templates/bin/_fake-iptables.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nexit 0\n"
  },
  {
    "path": "nova/templates/bin/_health-probe.py.tpl",
    "content": "#!/usr/bin/env python\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nHealth probe script for OpenStack service that uses RPC/unix domain socket for\ncommunication. Check's the RPC tcp socket status on the process and send\nmessage to service through rpc call method and expects a reply.\nUse nova's ping method that is designed just for such simple purpose.\n\nScript returns failure to Kubernetes only when\n  a. TCP socket for the RPC communication are not established.\n  b. service is not reachable or\n  c. service times out sending a reply.\n\nsys.stderr.write() writes to pod's events on failures.\n\nUsage example for Nova Compute:\n# python health-probe.py \\\n#  --config-file /etc/nova/nova.conf \\\n#  --config-dir /etc/nova/nova.conf.d \\\n#  --service-queue-name compute\n\n\"\"\"\n\nimport json\nimport os\nimport psutil\nimport signal\nimport socket\nimport sys\n\nfrom oslo_config import cfg\nfrom oslo_context import context\nfrom oslo_db import options as oslo_db_options\nfrom oslo_db.sqlalchemy import utils as oslo_db_utils\nfrom oslo_log import log\nimport oslo_messaging\n\nrpc_timeout = int(os.getenv('RPC_PROBE_TIMEOUT', '60'))\nrpc_retries = int(os.getenv('RPC_PROBE_RETRIES', '2'))\n\ntcp_established = \"ESTABLISHED\"\n\n\ndef _get_hostname(topic, use_fqdn):\n    configured_host = cfg.CONF.host\n    if configured_host:\n        return configured_host\n    if use_fqdn and topic == \"compute\":\n        return socket.getfqdn()\n    return socket.gethostname()\n\n\ndef check_service_status(transport):\n    \"\"\"Verify service status. Return success if service consumes message\"\"\"\n    try:\n        service_queue_name = cfg.CONF.service_queue_name\n        use_fqdn = cfg.CONF.use_fqdn\n        target = oslo_messaging.Target(\n            topic=service_queue_name,\n            server=_get_hostname(service_queue_name, use_fqdn),\n            namespace='baseapi',\n            version=\"1.1\")\n        if hasattr(oslo_messaging, 'get_rpc_client'):\n            client = oslo_messaging.get_rpc_client(transport, target,\n                                                   timeout=rpc_timeout,\n                                                   retry=rpc_retries)\n        else:\n            client = oslo_messaging.RPCClient(transport, target,\n                                              timeout=rpc_timeout,\n                                              retry=rpc_retries)\n        client.call(context.RequestContext(),\n                    'ping',\n                    arg=None)\n    except oslo_messaging.exceptions.MessageDeliveryFailure:\n        # Log to pod events\n        sys.stderr.write(\"Health probe unable to reach message bus\")\n        sys.exit(0)  # return success\n    except oslo_messaging.rpc.client.RemoteError as re:\n        message = getattr(re, \"message\", str(re))\n        if (\"Endpoint does not support RPC method\" in message) or \\\n                (\"Endpoint does not support RPC version\" in message):\n            sys.exit(0)  # Call reached the service\n        else:\n            sys.stderr.write(\"Health probe unable to reach service\")\n            sys.exit(1)  # return failure\n    except oslo_messaging.exceptions.MessagingTimeout:\n        sys.stderr.write(\"Health probe timed out. Agent is down or response \"\n                         \"timed out\")\n        sys.exit(1)  # return failure\n    except Exception as ex:\n        message = getattr(ex, \"message\", str(ex))\n        sys.stderr.write(\"Health probe caught exception sending message to \"\n                         \"service: %s\" % message)\n        sys.exit(0)\n    except:\n        sys.stderr.write(\"Health probe caught exception sending message to\"\n                         \" service\")\n        sys.exit(0)\n\n    finally:\n        if transport:\n            transport.cleanup()\n\n\ndef tcp_socket_status(process, ports):\n    \"\"\"Check the tcp socket status on a process\"\"\"\n    for p in psutil.process_iter():\n        try:\n            with p.oneshot():\n                if process in \" \".join(p.cmdline()):\n                    pcon = p.net_connections()\n                    for con in pcon:\n                        try:\n                            rport = con.raddr[1]\n                            status = con.status\n                        except IndexError:\n                            continue\n                        if rport in ports and status == tcp_established:\n                            return 1\n        except psutil.Error:\n            continue\n    return 0\n\n\ndef configured_port_in_conf():\n    \"\"\"Get the rabbitmq/Database port configured in config file\"\"\"\n\n    rabbit_ports = set()\n    database_ports = set()\n\n    try:\n        transport_url = oslo_messaging.TransportURL.parse(cfg.CONF)\n        for host in transport_url.hosts:\n            rabbit_ports.add(host.port or 5672)\n    except Exception as ex:\n        message = getattr(ex, \"message\", str(ex))\n        sys.stderr.write(\"Health probe caught exception reading \"\n                         \"RabbitMQ ports: %s\" % message)\n        sys.exit(0)  # return success\n\n    try:\n        for section in ['database', 'api_database', 'cell0_database']:\n            group = getattr(cfg.CONF, section)\n            if not group.connection:\n                continue\n            connection = oslo_db_utils.make_url(group.connection)\n            if connection.port:\n                database_ports.add(int(connection.port))\n    except Exception as ex:\n        message = getattr(ex, \"message\", str(ex))\n        sys.stderr.write(\"Health probe caught exception reading \"\n                         \"database ports: %s\" % message)\n        sys.exit(0)  # return success\n\n    return rabbit_ports, database_ports\n\n\ndef test_tcp_socket(service):\n    \"\"\"Check tcp socket to rabbitmq/db is in Established state\"\"\"\n    dict_services = {\n        \"compute\": \"nova-compute\",\n        \"conductor\": \"nova-conductor\",\n        \"scheduler\": \"nova-scheduler\"\n    }\n    r_ports, d_ports = configured_port_in_conf()\n\n    if service in dict_services:\n        proc = dict_services[service]\n        transport = oslo_messaging.TransportURL.parse(cfg.CONF)\n        if r_ports and tcp_socket_status(proc, r_ports) == 0:\n            sys.stderr.write(\"RabbitMQ socket not established for service \"\n                             \"%s with transport %s\" % (proc, transport))\n            # Do not kill the pod if RabbitMQ is not reachable/down\n            if not cfg.CONF.liveness_probe:\n                sys.exit(1)\n\n        # let's do the db check\n        if service != \"compute\":\n            if d_ports and tcp_socket_status(proc, d_ports) == 0:\n                sys.stderr.write(\"Database socket not established for service \"\n                                 \"%s with transport %s\" % (proc, transport))\n                # Do not kill the pod if database is not reachable/down\n                # there could be no socket as well as typically connections\n                # get closed after an idle timeout\n                # Just log it to pod events\n                if not cfg.CONF.liveness_probe:\n                    sys.exit(1)\n\n\ndef test_rpc_liveness():\n    \"\"\"Test if service can consume message from queue\"\"\"\n    oslo_messaging.set_transport_defaults(control_exchange='nova')\n\n    rabbit_group = cfg.OptGroup(name='oslo_messaging_rabbit',\n                                title='RabbitMQ options')\n    cfg.CONF.register_group(rabbit_group)\n    cfg.CONF.register_cli_opt(cfg.StrOpt('service-queue-name'))\n    cfg.CONF.register_cli_opt(cfg.BoolOpt('liveness-probe', default=False,\n                                          required=False))\n    cfg.CONF.register_cli_opt(cfg.BoolOpt('use-fqdn', default=False,\n                                          required=False))\n\n    # Opts need to be registered to be accessible by this script.\n    cfg.CONF.register_opt(cfg.StrOpt('host'))\n    cfg.CONF.register_opts(oslo_db_options.database_opts, 'database')\n    cfg.CONF.register_opts(oslo_db_options.database_opts, 'api_database')\n    # cell0_database is an OSH specific section used by db-init and db-drop Job.\n    # It is not an official Nova configuration section.\n    cfg.CONF.register_opts(oslo_db_options.database_opts, 'cell0_database')\n\n    cfg.CONF(sys.argv[1:], project='nova')\n\n    log.logging.basicConfig(level=log.{{ .Values.health_probe.logging.level }})\n\n    try:\n        transport = oslo_messaging.get_rpc_transport(cfg.CONF)\n    except Exception as ex:\n        message = getattr(ex, \"message\", str(ex))\n        sys.stderr.write(\"Message bus driver load error: %s\" % message)\n        sys.exit(0)  # return success\n\n    if not cfg.CONF.transport_url or \\\n            not cfg.CONF.service_queue_name:\n        sys.stderr.write(\"Both message bus URL and service's queue name are \"\n                         \"required for health probe to work\")\n        sys.exit(0)  # return success\n\n    try:\n        cfg.CONF.set_override('rabbit_max_retries', 2,\n                              group=rabbit_group)  # 3 attempts\n    except cfg.NoSuchOptError as ex:\n        cfg.CONF.register_opt(cfg.IntOpt('rabbit_max_retries', default=2),\n                              group=rabbit_group)\n\n    service = cfg.CONF.service_queue_name\n    test_tcp_socket(service)\n\n    check_service_status(transport)\n\ndef check_pid_running(pid):\n    if psutil.pid_exists(int(pid)):\n        return True\n    else:\n        return False\n\nif __name__ == \"__main__\":\n\n    if \"liveness-probe\" in ','.join(sys.argv):\n        pidfile = \"/tmp/liveness.pid\"  #nosec\n    else:\n        pidfile = \"/tmp/readiness.pid\"  #nosec\n    data = {}\n    if os.path.isfile(pidfile):\n        with open(pidfile,'r') as f:\n            file_content = f.read().strip()\n            if file_content:\n                data = json.loads(file_content)\n\n    if 'pid' in data and check_pid_running(data['pid']):\n        if 'exit_count' in data and data['exit_count'] > 1:\n            # Third time in, kill the previous process\n            os.kill(int(data['pid']), signal.SIGTERM)\n        else:\n            data['exit_count'] = data.get('exit_count', 0) + 1\n            with open(pidfile, 'w') as f:\n                json.dump(data, f)\n            sys.exit(0)\n    data['pid'] = os.getpid()\n    data['exit_count'] = 0\n    with open(pidfile, 'w') as f:\n        json.dump(data, f)\n\n    test_rpc_liveness()\n\n    sys.exit(0)  # return success\n"
  },
  {
    "path": "nova/templates/bin/_iscsiadm.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2020 The Openstack-Helm Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nchroot /mnt/host-rootfs /usr/bin/env -i PATH=\"/sbin:/bin:/usr/bin\" \\\n      iscsiadm \"${@:1}\"\n"
  },
  {
    "path": "nova/templates/bin/_multipath.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nchroot /mnt/host-rootfs /usr/bin/env -i PATH=\"/sbin:/bin:/usr/bin\" \\\n      multipath \"${@:1}\"\n"
  },
  {
    "path": "nova/templates/bin/_multipathd.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nchroot /mnt/host-rootfs /usr/bin/env -i PATH=\"/sbin:/bin:/usr/bin\" \\\n      multipathd \"${@:1}\"\n"
  },
  {
    "path": "nova/templates/bin/_nova-api-metadata-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nmetadata_ip=\"{{- .Values.endpoints.compute_metadata.ip.ingress -}}\"\nif [ -z \"${metadata_ip}\" ] ; then\n    metadata_ip=$(getent hosts metadata | awk '{print $1}')\nfi\n\ncat <<EOF>/tmp/pod-shared/nova-api-metadata.ini\n[DEFAULT]\nmetadata_host=$metadata_ip\nEOF\n"
  },
  {
    "path": "nova/templates/bin/_nova-api-metadata.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n{{- if .Values.manifests.certificates }}\n  if [ -f /etc/apache2/envvars ]; then\n    # Loading Apache2 ENV variables\n    source /etc/apache2/envvars\n    mkdir -p ${APACHE_RUN_DIR}\n  fi\n\n{{- if .Values.conf.software.apache2.a2enmod }}\n  {{- range .Values.conf.software.apache2.a2enmod }}\n  a2enmod {{ . }}\n  {{- end }}\n{{- end }}\n\n{{- if .Values.conf.software.apache2.a2dismod }}\n  {{- range .Values.conf.software.apache2.a2dismod }}\n  a2dismod {{ . }}\n  {{- end }}\n{{- end }}\n\n  if [ -f /var/run/apache2/apache2.pid ]; then\n    # Remove the stale pid for debian/ubuntu images\n    rm -f /var/run/apache2/apache2.pid\n  fi\n  # Starts Apache2\n  exec {{ .Values.conf.software.apache2.binary }} {{ .Values.conf.software.apache2.start_parameters }}\n{{- else }}\n  exec uwsgi --ini /etc/nova/nova-metadata-uwsgi.ini\n{{- end }}\n}\n\nfunction stop () {\n{{- if .Values.manifests.certificates }}\n  if [ -f /etc/apache2/envvars ]; then\n    source /etc/apache2/envvars\n  fi\n  {{ .Values.conf.software.apache2.binary }} -k graceful-stop\n{{- else }}\n  kill -TERM 1\n{{- end }}\n}\n\n$COMMAND\n"
  },
  {
    "path": "nova/templates/bin/_nova-api.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n{{- if .Values.manifests.certificates }}\n  if [ -f /etc/apache2/envvars ]; then\n    # Loading Apache2 ENV variables\n    source /etc/apache2/envvars\n    mkdir -p ${APACHE_RUN_DIR}\n  fi\n\n{{- if .Values.conf.software.apache2.a2enmod }}\n  {{- range .Values.conf.software.apache2.a2enmod }}\n  a2enmod {{ . }}\n  {{- end }}\n{{- end }}\n\n{{- if .Values.conf.software.apache2.a2dismod }}\n  {{- range .Values.conf.software.apache2.a2dismod }}\n  a2dismod {{ . }}\n  {{- end }}\n{{- end }}\n\n\n  if [ -f /var/run/apache2/apache2.pid ]; then\n    # Remove the stale pid for debian/ubuntu images\n    rm -f /var/run/apache2/apache2.pid\n  fi\n  # Starts Apache2\n  exec {{ .Values.conf.software.apache2.binary }} {{ .Values.conf.software.apache2.start_parameters }}\n{{- else }}\n  exec uwsgi --ini /etc/nova/nova-api-uwsgi.ini\n{{- end }}\n}\n\nfunction stop () {\n{{- if .Values.manifests.certificates }}\n  if [ -f /etc/apache2/envvars ]; then\n    source /etc/apache2/envvars\n  fi\n  {{ .Values.conf.software.apache2.binary }} -k graceful-stop\n{{- else }}\n  kill -TERM 1\n{{- end }}\n}\n\n$COMMAND\n"
  },
  {
    "path": "nova/templates/bin/_nova-compute-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n{{- if and .Values.hosts_uuids (not .Values.manifests.compute_uuid_self_provisioning) }}\n# Extract Host's uuid from helm chart and save it to the compute_id file\n  {{- range $host := .Values.hosts_uuids }}\nhostname=\"{{- $host.name}}\"\nif [ \"$hostname\" == $HOSTNAME ]; then\n  echo \"{{ $host.uuid }}\" > {{ $.Values.conf.nova.DEFAULT.state_path }}/compute_id\nfi\n  {{- end }}\n{{- end }}\n\n# Make the Nova Instances Dir as this is not autocreated.\nmkdir -p /var/lib/nova/instances\n\n# Set Ownership of nova dirs to the nova user\nchown ${NOVA_USER_UID} /var/lib/nova /var/lib/nova/instances\n\nmigration_interface=\"{{- .Values.conf.libvirt.live_migration_interface -}}\"\nif [[ -z $migration_interface ]]; then\n    # search for interface with default routing\n    # If there is not default gateway, exit\n    migration_network_cidr=\"{{- .Values.conf.libvirt.live_migration_network_cidr -}}\"\n    if [ -z \"${migration_network_cidr}\" ] ; then\n        migration_network_cidr=\"0/0\"\n    fi\n    migration_interface=$(ip -4 route list ${migration_network_cidr} | awk -F 'dev' '{ print $2; exit }' | awk '{ print $1 }') || exit 1\nfi\n\nmigration_address=$(ip a s $migration_interface | grep 'inet ' | awk '{print $2}' | awk -F \"/\" '{print $1}' | head -1)\n\nif [ -z \"${migration_address}\" ] ; then\n  echo \"Var live_migration_interface is empty\"\n  exit 1\nfi\n\ntee > /tmp/pod-shared/nova-libvirt.conf << EOF\n[libvirt]\nlive_migration_inbound_addr = $migration_address\nEOF\n\nhypervisor_interface=\"{{- .Values.conf.hypervisor.host_interface -}}\"\nif [[ -z $hypervisor_interface ]]; then\n    # search for interface with default routing\n    # If there is not default gateway, exit\n    hypervisor_network_cidr=\"{{- .Values.conf.hypervisor.host_network_cidr -}}\"\n    if [ -z \"${hypervisor_network_cidr}\" ] ; then\n        hypervisor_network_cidr=\"0/0\"\n    fi\n    hypervisor_interface=$(ip -4 route list ${hypervisor_network_cidr} | awk -F 'dev' '{ print $2; exit }' | awk '{ print $1 }') || exit 1\nfi\n\nhypervisor_address=$(ip a s $hypervisor_interface | grep 'inet ' | awk '{print $2}' | awk -F \"/\" '{print $1}' | head -1)\n\nif [ -z \"${hypervisor_address}\" ] ; then\n  echo \"Var my_ip is empty\"\n  exit 1\nfi\n\ntee > /tmp/pod-shared/nova-hypervisor.conf << EOF\n[DEFAULT]\nmy_ip  = $hypervisor_address\nEOF\n\n{{- if and ( empty .Values.conf.nova.DEFAULT.host ) ( .Values.pod.use_fqdn.compute ) }}\ntee > /tmp/pod-shared/nova-compute-fqdn.conf << EOF\n[DEFAULT]\nhost = $(hostname --fqdn)\nEOF\n{{- end }}\n"
  },
  {
    "path": "nova/templates/bin/_nova-compute-ironic.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec nova-compute \\\n      --config-file /etc/nova/nova-compute.conf \\\n      --config-file /etc/nova/nova-ironic.conf \\\n      --config-dir /etc/nova/nova.conf.d\n"
  },
  {
    "path": "nova/templates/bin/_nova-compute.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec nova-compute \\\n      --config-file /etc/nova/nova.conf \\\n{{- if .Values.console.address_search_enabled }}\n      --config-file /tmp/pod-shared/nova-console.conf \\\n{{- end }}\n{{- if .Values.conf.libvirt.address_search_enabled }}\n      --config-file /tmp/pod-shared/nova-libvirt.conf \\\n{{- end }}\n{{- if and ( empty .Values.conf.nova.DEFAULT.host ) ( .Values.pod.use_fqdn.compute ) }}\n      --config-file /tmp/pod-shared/nova-compute-fqdn.conf \\\n{{- end }}\n{{- if .Values.conf.hypervisor.address_search_enabled }}\n      --config-file /tmp/pod-shared/nova-hypervisor.conf \\\n{{- end }}\n      --config-dir /etc/nova/nova.conf.d\n"
  },
  {
    "path": "nova/templates/bin/_nova-conductor.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\n\nexec nova-conductor \\\n      --config-file /etc/nova/nova.conf \\\n      --config-dir /etc/nova/nova.conf.d\n"
  },
  {
    "path": "nova/templates/bin/_nova-console-compute-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nconsole_kind=\"{{- .Values.console.console_kind -}}\"\n\nif [ \"${console_kind}\" == \"novnc\" ] ; then\n    client_address=\"{{- .Values.conf.nova.vnc.server_proxyclient_address -}}\"\n    client_interface=\"{{- .Values.console.novnc.compute.vncserver_proxyclient_interface -}}\"\n    client_network_cidr=\"{{- .Values.console.novnc.compute.vncserver_proxyclient_network_cidr -}}\"\n    listen_ip=\"{{- .Values.conf.nova.vnc.server_listen -}}\"\nelif [ \"${console_kind}\" == \"spice\" ] ; then\n    client_address=\"{{- .Values.conf.nova.spice.server_proxyclient_address -}}\"\n    client_interface=\"{{- .Values.console.spice.compute.server_proxyclient_interface -}}\"\n    client_network_cidr=\"{{- .Values.console.spice.compute.server_proxyclient_network_cidr -}}\"\n    listen_ip=\"{{- .Values.conf.nova.spice.server_listen -}}\"\nelif [ \"${console_kind}\" == \"serial\" ] ; then\n    client_address=\"{{- .Values.conf.nova.serial_console.proxyclient_address -}}\"\n    client_interface=\"{{- .Values.console.serial.compute.server_proxyclient_interface -}}\"\n    client_network_cidr=\"{{- .Values.console.serial.compute.server_proxyclient_network_cidr -}}\"\nfi\n\nif [ -z \"${client_address}\" ] ; then\n    if [ -z \"${client_interface}\" ] ; then\n        if [ -z \"${client_network_cidr}\" ] ; then\n            client_network_cidr=\"0/0\"\n        fi\n        client_interface=$(ip -4 route list ${client_network_cidr} | awk -F 'dev' '{ print $2; exit }' | awk '{ print $1 }') || exit 1\n    fi\n\n    # determine client ip dynamically based on interface provided\n    client_address=$(ip a s $client_interface | grep 'inet ' | awk '{print $2}' | awk -F \"/\" '{print $1}' | head -1)\nfi\n\nif [ -z \"${listen_ip}\" ] ; then\n    # The server component listens on all IP addresses and the proxy component\n    # only listens on the management interface IP address of the compute node.\n    listen_ip=0.0.0.0\nfi\n\ntouch /tmp/pod-shared/nova-console.conf\nif [ \"${console_kind}\" == \"novnc\" ] ; then\n  cat > /tmp/pod-shared/nova-console.conf <<EOF\n[vnc]\nserver_proxyclient_address = $client_address\nserver_listen = $listen_ip\nEOF\nelif [ \"${console_kind}\" == \"spice\" ] ; then\n  cat > /tmp/pod-shared/nova-console.conf <<EOF\n[spice]\nserver_proxyclient_address = $client_address\nserver_listen = $listen_ip\nEOF\nelif [ \"${console_kind}\" == \"serial\" ] ; then\n  cat > /tmp/pod-shared/nova-console.conf <<EOF\n[serial_console]\nproxyclient_address = $client_address\nEOF\nfi\n"
  },
  {
    "path": "nova/templates/bin/_nova-console-proxy-init-assets.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nconsole_kind=\"{{- .Values.console.console_kind -}}\"\nif [ \"${console_kind}\" == \"novnc\" ] ; then\n    cp -vaRf /usr/share/novnc/* /tmp/usr/share/novnc/\nelif [ \"${console_kind}\" == \"spice\" ] ; then\n    cp -vaRf /usr/share/spice-html5/* /tmp/usr/share/spice-html5/\nfi\n"
  },
  {
    "path": "nova/templates/bin/_nova-console-proxy-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nconsole_kind=\"{{- .Values.console.console_kind -}}\"\n\nif [ \"${console_kind}\" == \"novnc\" ] ; then\n    client_address=\"{{- .Values.conf.nova.vnc.server_proxyclient_address -}}\"\n    client_interface=\"{{- .Values.console.novnc.vncproxy.vncserver_proxyclient_interface -}}\"\n    client_network_cidr=\"{{- .Values.console.novnc.vncproxy.vncserver_proxyclient_network_cidr -}}\"\n    listen_ip=\"{{- .Values.conf.nova.vnc.server_listen -}}\"\nelif [ \"${console_kind}\" == \"spice\" ] ; then\n    client_address=\"{{- .Values.conf.nova.spice.server_proxyclient_address -}}\"\n    client_interface=\"{{- .Values.console.spice.proxy.server_proxyclient_interface -}}\"\n    client_network_cidr=\"{{- .Values.console.spice.proxy.server_proxyclient_network_cidr -}}\"\n    listen_ip=\"{{- .Values.conf.nova.spice.server_listen -}}\"\nelif [ \"${console_kind}\" == \"serial\" ] ; then\n    client_address=\"{{- .Values.conf.nova.serial_console.proxyclient_address -}}\"\n    client_interface=\"{{- .Values.console.serial.proxy.server_proxyclient_interface -}}\"\n    client_network_cidr=\"{{- .Values.console.serial.proxy.server_proxyclient_network_cidr -}}\"\nfi\n\nif [ -z \"${client_address}\" ] ; then\n    if [ -z \"${client_interface}\" ] ; then\n        if [ -z \"${client_network_cidr}\" ] ; then\n            client_network_cidr=\"0/0\"\n        fi\n        client_interface=$(ip -4 route list ${client_network_cidr} | awk -F 'dev' '{ print $2; exit }' | awk '{ print $1 }') || exit 1\n    fi\n\n    # determine client ip dynamically based on interface provided\n    client_address=$(ip a s $client_interface | grep 'inet ' | awk '{print $2}' | awk -F \"/\" '{print $1}' | head -1)\nfi\n\nif [ -z \"${listen_ip}\" ] ; then\n    listen_ip=$client_address\nfi\n\nif [ \"${console_kind}\" == \"novnc\" ] ; then\ncat <<EOF>/tmp/pod-shared/nova-vnc.ini\n[vnc]\nserver_proxyclient_address = $client_address\nserver_listen = $listen_ip\nnovncproxy_host = $listen_ip\nEOF\nelif [ \"${console_kind}\" == \"spice\" ] ; then\ncat <<EOF>/tmp/pod-shared/nova-spice.ini\n[spice]\nserver_proxyclient_address = $client_address\nserver_listen = $listen_ip\nEOF\nelif [ \"${console_kind}\" == \"serial\" ] ; then\ncat <<EOF>/tmp/pod-shared/nova-serial.ini\n[serial_console]\nproxyclient_address = $client_address\nEOF\nfi\n"
  },
  {
    "path": "nova/templates/bin/_nova-console-proxy.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\n\nconsole_kind=\"{{- .Values.console.console_kind -}}\"\nif [ \"${console_kind}\" == \"novnc\" ] ; then\n    exec nova-novncproxy \\\n        --config-file /etc/nova/nova.conf \\\n        --config-file /tmp/pod-shared/nova-vnc.ini \\\n        --config-dir /etc/nova/nova.conf.d\nelif [ \"${console_kind}\" == \"spice\" ] ; then\n    exec nova-spicehtml5proxy\\\n        --config-file /etc/nova/nova.conf \\\n        --config-file /tmp/pod-shared/nova-spice.ini \\\n        --config-dir /etc/nova/nova.conf.d\nelif [ \"${console_kind}\" == \"serial\" ] ; then\n    exec nova-serialproxy\\\n        --config-file /etc/nova/nova.conf \\\n        --config-file /tmp/pod-shared/nova-serial.ini \\\n        --config-dir /etc/nova/nova.conf.d\nfi\n"
  },
  {
    "path": "nova/templates/bin/_nova-scheduler.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -xe\n\nexec nova-scheduler \\\n      --config-file /etc/nova/nova.conf \\\n      --config-dir /etc/nova/nova.conf.d\n"
  },
  {
    "path": "nova/templates/bin/_nova-service-cleaner.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -xe\n\n# If any non-compute service is down, then sleep for 2 times the report_interval\n# to confirm service is still down.\nDISABLED_SVC=\"$(openstack compute service list -f value | grep -v 'nova-compute' | grep 'down' || true)\"\nif [ ! -z \"${DISABLED_SVC}\" ]; then\n  sleep {{ .Values.jobs.service_cleaner.sleep_time }}\nfi\n\nNOVA_SERVICES_TO_CLEAN=\"$(openstack compute service list -f value -c Binary | sort | uniq | grep -v '^nova-compute$')\"\nfor NOVA_SERVICE in ${NOVA_SERVICES_TO_CLEAN}; do\n  DEAD_SERVICE_IDS=$(openstack compute service list --service ${NOVA_SERVICE} -f json | jq -r '.[] | select(.State == \"down\") | .ID')\n  for SERVICE_ID in ${DEAD_SERVICE_IDS}; do\n    openstack compute service delete \"${SERVICE_ID}\"\n  done\ndone\n\n{{- if .Values.jobs.service_cleaner.extra_command }}\n{{ .Values.jobs.service_cleaner.extra_command }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/bin/_ssh-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexport NOVA_USERNAME=$(id -u ${NOVA_USER_UID} -n)\nexport NOVA_USER_HOME=$(eval echo ~${NOVA_USERNAME})\n\nmkdir -p ${NOVA_USER_HOME}/.ssh\n\ncat > ${NOVA_USER_HOME}/.ssh/config <<EOF\nHost *\n  StrictHostKeyChecking no\n  UserKnownHostsFile /dev/null\n  port $SSH_PORT\n  IdentitiesOnly yes\nEOF\n\ncp /tmp/nova-ssh/* ${NOVA_USER_HOME}/.ssh/\nchmod 600 ${NOVA_USER_HOME}/.ssh/id_rsa\nchown -R ${NOVA_USERNAME}:${NOVA_USERNAME} ${NOVA_USER_HOME}/.ssh\n"
  },
  {
    "path": "nova/templates/bin/_ssh-start.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nIFS=','\nfor KEY_TYPE in $KEY_TYPES; do\n    KEY_PATH=/etc/ssh/ssh_host_${KEY_TYPE}_key\n    if [[ ! -f \"${KEY_PATH}\" ]]; then\n        ssh-keygen -q -t ${KEY_TYPE} -f ${KEY_PATH} -N \"\"\n    fi\ndone\nIFS=''\n\nsubnet_address=\"{{- .Values.network.ssh.from_subnet -}}\"\n\nif [ -z \"${subnet_address}\" ] ; then\n    subnet_address=\"0.0.0.0/0\"\nfi\nlisten_interface=$(ip -4 route list ${subnet_address} | awk -F 'dev' '{ print $2; exit }' | awk '{ print $1 }') || exit 1\nlisten_address=$(ip a s $listen_interface | grep 'inet ' | awk '{print $2}' | awk -F \"/\" '{print $1}' | head -1)\n\ncat > /tmp/sshd_config_extend <<EOF\nListenAddress $listen_address\nPasswordAuthentication no\nMatch Address $subnet_address\n    PermitRootLogin without-password\nEOF\ncat /tmp/sshd_config_extend >> /etc/ssh/sshd_config\n\nrm /tmp/sshd_config_extend\n\nmkdir -p /run/sshd\n\nexec /usr/sbin/sshd -D -e -o Port=$SSH_PORT\n"
  },
  {
    "path": "nova/templates/bin/_storage-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -x\nif [ \"x$STORAGE_BACKEND\" == \"xrbd\" ]; then\n  SECRET=$(mktemp --suffix .yaml)\n  KEYRING=$(mktemp --suffix .keyring)\n  function cleanup {\n      rm -f ${SECRET} ${KEYRING}\n  }\n  trap cleanup EXIT\nfi\n\nset -ex\nif [ \"x$STORAGE_BACKEND\" == \"xrbd\" ]; then\n  ceph -s\n  function ensure_pool () {\n    ceph osd pool stats $1 || ceph osd pool create $1 $2\n    if [[ $(ceph mgr versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then\n        ceph osd pool application enable $1 $3\n    fi\n    size_protection=$(ceph osd pool get $1 nosizechange | cut -f2 -d: | tr -d '[:space:]')\n    ceph osd pool set $1 nosizechange 0\n    ceph osd pool set $1 size ${RBD_POOL_REPLICATION}\n    ceph osd pool set $1 nosizechange ${size_protection}\n    ceph osd pool set $1 crush_rule \"${RBD_POOL_CRUSH_RULE}\"\n  }\n  ensure_pool ${RBD_POOL_NAME} ${RBD_POOL_CHUNK_SIZE} ${RBD_POOL_APP_NAME}\nfi"
  },
  {
    "path": "nova/templates/bin/_wait-for-computes-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n{{ .Values.bootstrap.wait_for_computes.scripts.init_script | default \"echo 'No wait-for-compute script configured'\" }}\n"
  },
  {
    "path": "nova/templates/certificates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (contains \"vencrypt\" .Values.conf.nova.vnc.auth_schemes) -}}\n{{ dict \"envAll\" . \"service\" \"compute_novnc_vencrypt\" \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end }}\n{{- if .Values.manifests.certificates -}}\n{{ dict \"envAll\" . \"service\" \"compute\" \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- if .Values.manifests.deployment_novncproxy }}\n{{ dict \"envAll\" . \"service\" \"compute_novnc_proxy\" \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end }}\n{{- if .Values.manifests.deployment_placement }}\n{{ dict \"envAll\" . \"service\" \"placement\" \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end }}\n{{  dict \"envAll\" . \"service\" \"compute_metadata\" \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- if .Values.manifests.deployment_spiceproxy }}\n{{ dict \"envAll\" . \"service\" \"compute_spice_proxy\" \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end }}\n{{- if .Values.manifests.deployment_serialproxy }}\n{{ dict \"envAll\" . \"service\" \"compute_serial_proxy\" \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end }}\n{{- end -}}\n"
  },
  {
    "path": "nova/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n{{- $rallyTests := .Values.conf.rally_tests }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: nova-bin\ndata:\n{{- if .Values.conf.enable_iscsi }}\n  iscsiadm: |\n{{ tuple \"bin/_iscsiadm.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  multipath: |\n{{ tuple \"bin/_multipath.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  multipathd: |\n{{ tuple \"bin/_multipathd.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  rally-test.sh: |\n{{ tuple $rallyTests | include \"helm-toolkit.scripts.rally_test\" | indent 4 }}\n  storage-init.sh: |\n{{ tuple \"bin/_storage-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n{{- if .Values.conf.ceph.enabled }}\n  ceph-keyring.sh: |\n{{ tuple \"bin/_ceph-keyring.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ceph-admin-keyring.sh: |\n{{ tuple \"bin/_ceph-admin-keyring.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n{{- if .Values.manifests.cron_job_archive_deleted_rows }}\n  archive-deleted-rows.sh: |\n{{ tuple \"bin/_db-archive-deleted-row.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  health-probe.py: |\n{{ tuple \"bin/_health-probe.py.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  nova-api.sh: |\n{{ tuple \"bin/_nova-api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  nova-api-metadata.sh: |\n{{ tuple \"bin/_nova-api-metadata.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  nova-api-metadata-init.sh: |\n{{ tuple \"bin/_nova-api-metadata-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  nova-compute.sh: |\n{{ tuple \"bin/_nova-compute.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  nova-compute-init.sh: |\n{{ tuple \"bin/_nova-compute-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  nova-compute-ironic.sh: |\n{{ tuple \"bin/_nova-compute-ironic.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  nova-conductor.sh: |\n{{ tuple \"bin/_nova-conductor.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  nova-scheduler.sh: |\n{{ tuple \"bin/_nova-scheduler.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  fake-iptables.sh: |\n{{ tuple \"bin/_fake-iptables.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  nova-console-compute-init.sh: |\n{{ tuple \"bin/_nova-console-compute-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  nova-console-proxy.sh: |\n{{ tuple \"bin/_nova-console-proxy.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  nova-console-proxy-init.sh: |\n{{ tuple \"bin/_nova-console-proxy-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  nova-console-proxy-init-assets.sh: |\n{{ tuple \"bin/_nova-console-proxy-init-assets.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ssh-init.sh: |\n{{ tuple \"bin/_ssh-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ssh-start.sh: |\n{{ tuple \"bin/_ssh-start.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  cell-setup.sh: |\n{{ tuple \"bin/_cell-setup.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  cell-setup-init.sh: |\n{{ tuple \"bin/_cell-setup-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  nova-service-cleaner.sh: |\n{{ tuple \"bin/_nova-service-cleaner.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" . | indent 4 }}\n  wait-for-computes-init.sh: |\n{{ tuple \"bin/_wait-for-computes-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- define \"nova.configmap.etc\" }}\n{{- $configMapName := index . 0 }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n\n{{- if empty .Values.conf.nova.keystone_authtoken.auth_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.nova.keystone_authtoken \"auth_uri\" -}}\n{{- end -}}\n{{- if empty .Values.conf.nova.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.nova.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.nova.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.nova.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n{{- if empty .Values.conf.nova.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.nova.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if .Values.conf.nova.service_user.send_service_user_token -}}\n\n{{- if empty .Values.conf.nova.service_user.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.nova.service_user \"auth_url\" -}}\n{{- end -}}\n\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.nova.database.connection)) (empty .Values.conf.nova.database.connection) -}}\n{{- $connection := tuple \"oslo_db\" \"internal\" \"nova\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | set .Values.conf.nova.database \"connection\" -}}\n{{- else -}}\n{{- $_ := set .Values.conf.nova.database \"connection\" $connection -}}\n{{- end -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.nova.api_database.connection)) (empty .Values.conf.nova.api_database.connection) -}}\n{{- $connection := tuple \"oslo_db_api\" \"internal\" \"nova\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | set .Values.conf.nova.api_database \"connection\" -}}\n{{- else -}}\n{{- $_ := set .Values.conf.nova.api_database \"connection\" $connection -}}\n{{- end -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.nova.cell0_database.connection)) (empty .Values.conf.nova.cell0_database.connection) -}}\n{{- $connection := tuple \"oslo_db_cell0\" \"internal\" \"nova\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | set .Values.conf.nova.cell0_database \"connection\" -}}\n{{- else -}}\n{{- $_ := set .Values.conf.nova.cell0_database \"connection\" $connection -}}\n{{- end -}}\n{{- end -}}\n\n{{- if empty .Values.conf.nova.DEFAULT.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"nova\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.nova.DEFAULT \"transport_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.nova.glance.api_servers -}}\n{{- $_ := tuple \"image\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.nova.glance \"api_servers\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.nova.neutron.url -}}\n{{- $_ := tuple \"network\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.nova.neutron \"url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.nova.neutron.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.nova.neutron \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.nova.cache.memcache_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.nova.cache \"memcache_servers\" -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.nova.DEFAULT.metadata_host) .Values.endpoints.compute_metadata.ip.ingress -}}\n{{- $_ := set .Values.conf.nova.DEFAULT \"metadata_host\" .Values.endpoints.compute_metadata.ip.ingress -}}\n{{- end -}}\n\n{{- if empty .Values.conf.nova.DEFAULT.metadata_listen_port -}}\n{{- $_ := tuple \"compute_metadata\" \"internal\" \"metadata\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set .Values.conf.nova.DEFAULT \"metadata_listen_port\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.nova.placement.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.nova.placement \"auth_url\" -}}\n{{- end -}}\n\n{{- if eq .Values.console.console_kind \"novnc\"}}\n{{- $_ := \"true\" | set .Values.conf.nova.vnc \"enabled\" -}}\n{{- if empty .Values.conf.nova.vnc.novncproxy_base_url -}}\n{{- $_ := tuple \"compute_novnc_proxy\" \"public\" \"novnc_proxy\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.nova.vnc \"novncproxy_base_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.nova.vnc.novncproxy_port -}}\n{{- $_ := tuple \"compute_novnc_proxy\" \"internal\" \"novnc_proxy\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set .Values.conf.nova.vnc \"novncproxy_port\" -}}\n{{- end -}}\n{{- end -}}\n\n{{- if (contains \"vencrypt\" .Values.conf.nova.vnc.auth_schemes) -}}\n{{- if empty .Values.conf.nova.vnc.vencrypt_client_key }}\n{{- $_ := set $envAll.Values.conf.nova.vnc \"vencrypt_client_key\" \"/etc/pki/nova-novncproxy/tls.key\" -}}\n{{- end }}\n{{- if empty .Values.conf.nova.vnc.vencrypt_client_cert }}\n{{- $_ := set $envAll.Values.conf.nova.vnc \"vencrypt_client_cert\" \"/etc/pki/nova-novncproxy/tls.crt\" -}}\n{{- end }}\n{{- if empty .Values.conf.nova.vnc.vencrypt_ca_certs }}\n{{- $_ := set $envAll.Values.conf.nova.vnc \"vencrypt_ca_certs\" \"/etc/pki/nova-novncproxy/ca.crt\" -}}\n{{- end }}\n{{- end }}\n\n{{- if eq .Values.console.console_kind \"spice\"}}\n{{- $_ := \"false\" | set .Values.conf.nova.vnc \"enabled\" -}}\n{{- $_ := \"true\" | set .Values.conf.nova.spice \"enabled\" -}}\n{{- if empty .Values.conf.nova.spice.html5proxy_base_url -}}\n{{- $_ := tuple \"compute_spice_proxy\" \"public\" \"spice_proxy\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.nova.spice \"html5proxy_base_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.nova.spice.html5proxy_port -}}\n{{- $_ := tuple \"compute_spice_proxy\" \"internal\" \"spice_proxy\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set .Values.conf.nova.spice \"html5proxy_port\" -}}\n{{- end -}}\n{{- end -}}\n\n{{- if eq .Values.console.console_kind \"serial\"}}\n{{- $_ := \"false\" | set .Values.conf.nova.vnc \"enabled\" -}}\n{{- $_ := \"false\" | set .Values.conf.nova.spice \"enabled\" -}}\n{{- $_ := \"true\" | set .Values.conf.nova.serial_console \"enabled\" -}}\n{{- if empty .Values.conf.nova.serial_console.base_url -}}\n{{- $_ := tuple \"compute_serial_proxy\" \"public\" \"serial_proxy\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.nova.serial_console \"base_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.nova.serial_console.serialproxy_port -}}\n{{- $_ := tuple \"compute_serial_proxy\" \"internal\" \"serial_proxy\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set .Values.conf.nova.serial_console \"serialproxy_port\" -}}\n{{- end -}}\n{{- end -}}\n\n{{- if empty .Values.conf.nova.ironic.api_endpoint -}}\n{{- $_ := tuple \"baremetal\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.nova.ironic \"api_endpoint\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.nova.ironic.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.nova.ironic \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.nova.ironic.auth_type -}}\n{{- $_ := set .Values.conf.nova.ironic \"auth_type\" .Values.endpoints.identity.auth.ironic.auth_type -}}\n{{- end -}}\n{{- if empty .Values.conf.nova.ironic.auth_version -}}\n{{- $_ := set .Values.conf.nova.ironic \"auth_version\" .Values.endpoints.identity.auth.ironic.auth_version -}}\n{{- end -}}\n\n{{- if .Values.conf.nova.cinder.auth_type -}}\n{{- if eq .Values.conf.nova.cinder.auth_type \"password\" -}}\n\n{{- if empty .Values.conf.nova.cinder.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.nova.cinder \"auth_url\" -}}\n{{- end -}}\n\n{{- end -}}\n{{- end -}}\n\n{{- if empty .Values.conf.nova.DEFAULT.osapi_compute_listen_port -}}\n{{- $_ := tuple \"compute\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set .Values.conf.nova.DEFAULT \"osapi_compute_listen_port\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.nova_api_uwsgi.uwsgi.processes -}}\n{{- $_ := set .Values.conf.nova_api_uwsgi.uwsgi \"processes\" .Values.conf.nova.DEFAULT.osapi_compute_workers -}}\n{{- end -}}\n{{- if empty (index .Values.conf.nova_api_uwsgi.uwsgi \"http-socket\") -}}\n{{- $http_socket_port := tuple \"compute\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | toString }}\n{{- $http_socket := printf \"0.0.0.0:%s\" $http_socket_port }}\n{{- $_ := set .Values.conf.nova_api_uwsgi.uwsgi \"http-socket\" $http_socket -}}\n{{- end -}}\n\n{{- if empty .Values.conf.nova_metadata_uwsgi.uwsgi.processes -}}\n{{- $_ := set .Values.conf.nova_metadata_uwsgi.uwsgi \"processes\" .Values.conf.nova.DEFAULT.metadata_workers -}}\n{{- end -}}\n{{- if empty (index .Values.conf.nova_metadata_uwsgi.uwsgi \"http-socket\") -}}\n{{- $http_socket_port := .Values.network.metadata.port | toString }}\n{{- $http_socket := printf \"0.0.0.0:%s\" $http_socket_port }}\n{{- $_ := set .Values.conf.nova_metadata_uwsgi.uwsgi \"http-socket\" $http_socket -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.handler_fluent) (has \"fluent\" .Values.conf.logging.handlers.keys) -}}\n{{- $fluentd_host := tuple \"fluentd\" \"internal\" $envAll | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\n{{- $fluentd_port := tuple \"fluentd\" \"internal\" \"service\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $fluent_args := printf \"('%s.%s', '%s', %s)\" .Release.Namespace .deployment_name $fluentd_host $fluentd_port }}\n{{- $handler_fluent := dict \"class\" \"fluent.handler.FluentHandler\" \"formatter\" \"fluent\" \"args\" $fluent_args -}}\n{{- $_ := set .Values.conf.logging \"handler_fluent\" $handler_fluent -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.formatter_fluent) (has \"fluent\" .Values.conf.logging.formatters.keys) -}}\n{{- $formatter_fluent := dict \"class\" \"oslo_log.formatters.FluentFormatter\" -}}\n{{- $_ := set .Values.conf.logging \"formatter_fluent\" $formatter_fluent -}}\n{{- end -}}\n\n{{ $__nova_compute := dict }}\n{{ $_ := set $__nova_compute \"config\" .Values.conf.nova  }}\n{{ range .Values.conf.nova_compute_redactions }}\n{{   $_ := set $__nova_compute \"config\" (omit $__nova_compute.config .) }}\n{{ end }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $configMapName }}\ntype: Opaque\ndata:\n  rally_tests.yaml: {{ toYaml .Values.conf.rally_tests.tests | b64enc }}\n  api-paste.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.paste | b64enc }}\n  policy.yaml: {{ toYaml .Values.conf.policy | b64enc }}\n  nova_sudoers: {{ $envAll.Values.conf.nova_sudoers | b64enc }}\n  rootwrap.conf: {{ .Values.conf.rootwrap | b64enc }}\n{{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n{{- $filePrefix := replace \"_\" \"-\"  $key }}\n  {{ printf \"%s.filters\" $filePrefix }}: {{ $value.content | b64enc }}\n{{- end }}\n  nova.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.nova | b64enc }}\n  nova-compute.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" $__nova_compute.config | b64enc }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.logging | b64enc }}\n  api_audit_map.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.api_audit_map | b64enc }}\n  nova-ironic.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.nova_ironic | b64enc }}\n  nova-api-uwsgi.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.nova_api_uwsgi | b64enc }}\n  nova-metadata-uwsgi.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.nova_metadata_uwsgi | b64enc }}\n{{- if .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.mpm_event \"key\" \"mpm_event.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.wsgi_nova_api \"key\" \"wsgi-api.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.wsgi_nova_metadata \"key\" \"wsgi-metadata.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- end }}\n{{- if .Values.conf.security }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.security \"key\" \"security.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- if .Values.manifests.configmap_etc }}\n{{- list \"nova-etc\" . | include \"nova.configmap.etc\" }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/cron-job-archive-deleted-rows.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.cron_job_archive_deleted_rows }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"nova-archive-deleted-rows-cron\" }}\n{{ tuple $envAll \"archive_deleted_rows\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- $etcSources := .Values.pod.etcSources.nova_archive_deleted_rows }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"nova-ks-etc\")) }}\n{{- end }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: nova-archive-deleted-rows\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  schedule: {{ .Values.jobs.archive_deleted_rows.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.archive_deleted_rows.history.success }}\n  failedJobsHistoryLimit: {{ .Values.jobs.archive_deleted_rows.history.failed }}\n  {{- if .Values.jobs.archive_deleted_rows.starting_deadline }}\n  startingDeadlineSeconds: {{ .Values.jobs.archive_deleted_rows.starting_deadline }}\n  {{- end }}\n  concurrencyPolicy: Forbid\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"nova\" \"archive-deleted-rows\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"nova\" \"archive-deleted-rows\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n        spec:\n{{ tuple \"nova_archive_deleted_rows\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 10 }}\n{{ tuple \"nova_archive_deleted_rows\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 10 }}\n          serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"archive_deleted_rows\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 10 }}\n          restartPolicy: OnFailure\n          nodeSelector:\n            {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.nova.enabled }}\n{{ tuple $envAll \"nova\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 10 }}\n{{ end }}\n          initContainers:\n{{ tuple $envAll \"archive-deleted-rows\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 12 }}\n          containers:\n            - name: nova-archive-deleted-rows\n{{ tuple $envAll \"nova_archive_deleted_rows\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.archive_deleted_rows | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{ dict \"envAll\" $envAll \"application\" \"archive_deleted_rows\" \"container\" \"nova_archive_deleted_rows\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 14 }}\n              command:\n                - /tmp/archive-deleted-rows.sh\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n              env:\n                - name: REQUESTS_CA_BUNDLE\n                  value: \"/etc/nova/certs/ca.crt\"\n{{- end }}\n              volumeMounts:\n                - name: pod-tmp\n                  mountPath: /tmp\n                - name: archive-deleted-rows-conf\n                  mountPath: /etc/nova/nova.conf\n                  subPath: nova.conf\n                  readOnly: true\n                - name: nova-etc-snippets\n                  mountPath: /etc/nova/nova.conf.d/\n                  readOnly: true\n                - name: archive-deleted-rows-conf\n                  mountPath: /etc/nova/logging.conf\n                  subPath: logging.conf\n                  readOnly: true\n                - name: archive-deleted-rows\n                  mountPath: /tmp/archive-deleted-rows.sh\n                  readOnly: true\n                  subPath: archive-deleted-rows.sh\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal \"path\" \"/etc/nova/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 16 }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 16 }}\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: archive-deleted-rows\n              configMap:\n                name: nova-bin\n                defaultMode: 0555\n            - name: archive-deleted-rows-conf\n              secret:\n                secretName: nova-etc\n            - name: nova-etc-snippets\n{{- if $etcSources }}\n              projected:\n                sources:\n{{ toYaml $etcSources | indent 18 }}\n{{- else }}\n              emptyDir: {}\n{{ end }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/cron-job-cell-setup.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.cron_job_cell_setup }}\n{{- $envAll := . }}\n\n{{- $etcSources := .Values.pod.etcSources.nova_cell_setup }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"nova-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"nova-cell-setup-cron\" }}\n{{ tuple $envAll \"cell_setup\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: nova-cell-setup\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  schedule: {{ .Values.jobs.cell_setup.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.cell_setup.history.success }}\n  failedJobsHistoryLimit: {{ .Values.jobs.cell_setup.history.failed }}\n  {{- if .Values.jobs.cell_setup.starting_deadline }}\n  startingDeadlineSeconds: {{ .Values.jobs.cell_setup.starting_deadline }}\n  {{- end }}\n  concurrencyPolicy: Forbid\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"nova\" \"cell-setup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"nova\" \"cell-setup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n        spec:\n{{ tuple \"nova_cell_setup\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 10 }}\n{{ tuple \"nova_cell_setup\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 10 }}\n          serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"cell_setup\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 10 }}\n          restartPolicy: OnFailure\n          nodeSelector:\n            {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.nova.enabled }}\n{{ tuple $envAll \"nova\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 10 }}\n{{ end }}\n          initContainers:\n{{ tuple $envAll \"cell_setup\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 12 }}\n          containers:\n            - name: nova-cell-setup\n{{ tuple $envAll \"nova_cell_setup\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.cell_setup | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{ dict \"envAll\" $envAll \"application\" \"cell_setup\" \"container\" \"nova_cell_setup\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 14 }}\n              command:\n                - /tmp/cell-setup.sh\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n              env:\n                - name: REQUESTS_CA_BUNDLE\n                  value: \"/etc/nova/certs/ca.crt\"\n{{- end }}\n              volumeMounts:\n                - name: pod-tmp\n                  mountPath: /tmp\n                - name: nova-bin\n                  mountPath: /tmp/cell-setup.sh\n                  subPath: cell-setup.sh\n                  readOnly: true\n                - name: etcnova\n                  mountPath: /etc/nova\n                - name: nova-etc\n                  mountPath: /etc/nova/nova.conf\n                  subPath: nova.conf\n                  readOnly: true\n                - name: nova-etc-snippets\n                  mountPath: /etc/nova/nova.conf.d/\n                  readOnly: true\n                {{- if .Values.conf.nova.DEFAULT.log_config_append }}\n                - name: nova-etc\n                  mountPath: {{ .Values.conf.nova.DEFAULT.log_config_append }}\n                  subPath: {{ base .Values.conf.nova.DEFAULT.log_config_append }}\n                  readOnly: true\n                {{- end }}\n                - name: nova-etc\n                  mountPath: /etc/nova/policy.yaml\n                  subPath: policy.yaml\n                  readOnly: true\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal \"path\" \"/etc/nova/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 16 }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 16 }}\n\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: etcnova\n              emptyDir: {}\n            - name: nova-etc\n              secret:\n                secretName: nova-etc\n                defaultMode: 0444\n            - name: nova-etc-snippets\n{{- if $etcSources }}\n              projected:\n                sources:\n{{ toYaml $etcSources | indent 18 }}\n{{- else }}\n              emptyDir: {}\n{{ end }}\n            - name: nova-bin\n              configMap:\n                name: nova-bin\n                defaultMode: 0555\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n\n{{- end }}\n"
  },
  {
    "path": "nova/templates/cron-job-service-cleaner.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.cron_job_service_cleaner }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"nova-service-cleaner\" }}\n{{ tuple $envAll \"service_cleaner\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- $etcSources := .Values.pod.etcSources.nova_service_cleaner }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"nova-ks-etc\")) }}\n{{- end }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: nova-service-cleaner\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  schedule: {{ .Values.jobs.service_cleaner.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.service_cleaner.history.success }}\n  failedJobsHistoryLimit: {{ .Values.jobs.service_cleaner.history.failed }}\n  {{- if .Values.jobs.service_cleaner.starting_deadline }}\n  startingDeadlineSeconds: {{ .Values.jobs.service_cleaner.starting_deadline }}\n  {{- end }}\n  concurrencyPolicy: Forbid\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"nova\" \"service-cleaner\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"nova\" \"service-cleaner\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n        spec:\n{{ tuple \"nova_service_cleaner\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 10 }}\n{{ tuple \"nova_service_cleaner\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 10 }}\n          serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"service_cleaner\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 10 }}\n          restartPolicy: OnFailure\n          nodeSelector:\n            {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.nova.enabled }}\n{{ tuple $envAll \"nova\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 10 }}\n{{ end }}\n          initContainers:\n{{ tuple $envAll \"service_cleaner\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 12 }}\n          containers:\n            - name: nova-service-cleaner\n{{ tuple $envAll \"nova_service_cleaner\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.service_cleaner | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{ dict \"envAll\" $envAll \"application\" \"service_cleaner\" \"container\" \"nova_service_cleaner\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 14 }}\n              env:\n{{- with $env := dict \"ksUserSecret\" $envAll.Values.secrets.identity.nova \"useCA\" (or .Values.manifests.certificates .Values.tls.identity) }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 14 }}\n{{- end }}\n              command:\n                - /tmp/nova-service-cleaner.sh\n              volumeMounts:\n                - name: pod-tmp\n                  mountPath: /tmp\n                - name: nova-bin\n                  mountPath: /tmp/nova-service-cleaner.sh\n                  subPath: nova-service-cleaner.sh\n                  readOnly: true\n                - name: etcnova\n                  mountPath: /etc/nova\n                - name: nova-etc-snippets\n                  mountPath: /etc/nova/nova.conf.d/\n                  readOnly: true\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 16 }}\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: etcnova\n              emptyDir: {}\n            - name: nova-etc\n              secret:\n                secretName: nova-etc\n                defaultMode: 0444\n            - name: nova-bin\n              configMap:\n                name: nova-bin\n                defaultMode: 0555\n            - name: nova-etc-snippets\n{{- if $etcSources }}\n              projected:\n                sources:\n{{ toYaml $etcSources | indent 18 }}\n{{- else }}\n              emptyDir: {}\n{{ end }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 12 }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/daemonset-compute.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"novaComputeLivenessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/nova/nova.conf\n    - --config-dir\n    - /etc/nova/nova.conf.d\n    - --service-queue-name\n    - compute\n    - --liveness-probe\n    {{- if .Values.pod.use_fqdn.compute }}\n    - --use-fqdn\n    {{- end }}\n{{- end }}\n\n{{- define \"novaComputeReadinessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/nova/nova.conf\n    - --config-dir\n    - /etc/nova/nova.conf.d\n    - --service-queue-name\n    - compute\n    {{- if .Values.pod.use_fqdn.compute }}\n    - --use-fqdn\n    {{- end }}\n{{- end }}\n\n{{- define \"novaComputeStartupProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/nova/nova.conf\n    - --config-dir\n    - /etc/nova/nova.conf.d\n    - --service-queue-name\n    - compute\n    - --liveness-probe\n    {{- if .Values.pod.use_fqdn.compute }}\n    - --use-fqdn\n    {{- end }}\n{{- end }}\n\n{{- define \"nova.compute.daemonset\" }}\n{{- $daemonset := index . 0 }}\n{{- $configMapName := index . 1 }}\n{{- $serviceAccountName := index . 2 }}\n{{- $envAll := index . 3 }}\n{{- with $envAll }}\n\n{{- $mounts_nova_compute := .Values.pod.mounts.nova_compute.nova_compute }}\n{{- $mounts_nova_compute_init := .Values.pod.mounts.nova_compute.init_container }}\n{{- $etcSources := .Values.pod.etcSources.nova_compute }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"nova-ks-etc\")) }}\n{{- end }}\n\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: nova-compute\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll $daemonset | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll .Chart.Name $daemonset | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"nova_compute\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"nova-compute-default\" \"containerNames\" (list \"nova-compute\" \"init\" \"nova-compute-init\" \"nova-compute-vnc-init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"nova_compute\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"nova_compute\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      nodeSelector:\n        {{ .Values.labels.agent.compute.node_selector_key }}: {{ .Values.labels.agent.compute.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.nova.enabled }}\n{{ tuple $envAll \"nova\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      hostNetwork: true\n      hostPID: true\n      hostIPC: true\n      dnsPolicy: ClusterFirstWithHostNet\n      initContainers:\n{{ tuple $envAll \"pod_dependency\" $mounts_nova_compute_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: nova-compute-init\n{{ tuple $envAll \"nova_compute\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_compute_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: NOVA_USER_UID\n              value: \"{{ .Values.pod.security_context.nova.pod.runAsUser }}\"\n          command:\n            - /tmp/nova-compute-init.sh\n          terminationMessagePath: /var/log/termination-log\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: nova-bin\n              mountPath: /tmp/nova-compute-init.sh\n              subPath: nova-compute-init.sh\n              readOnly: true\n            - name: varlibnova\n              mountPath: /var/lib/nova\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n{{ if $mounts_nova_compute.volumeMounts }}{{ toYaml $mounts_nova_compute.volumeMounts | indent 12 }}{{ end }}\n        {{- if .Values.conf.ceph.enabled }}\n        - name: ceph-perms\n{{ tuple $envAll \"nova_compute\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"ceph_perms\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - chown\n            - -R\n            - \"nova:\"\n            - /etc/ceph\n          terminationMessagePath: /var/log/termination-log\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcceph\n              mountPath: /etc/ceph\n{{ if $mounts_nova_compute.volumeMounts }}{{ toYaml $mounts_nova_compute.volumeMounts | indent 12 }}{{ end }}\n        {{- if empty .Values.conf.ceph.cinder.keyring }}\n        - name: ceph-admin-keyring-placement\n{{ tuple $envAll \"nova_compute\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"ceph_admin_keyring_placement\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/ceph-admin-keyring.sh\n          terminationMessagePath: /var/log/termination-log\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: nova-bin\n              mountPath: /tmp/ceph-admin-keyring.sh\n              subPath: ceph-admin-keyring.sh\n              readOnly: true\n            {{- if empty .Values.conf.ceph.admin_keyring }}\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n            {{ end }}\n{{ if $mounts_nova_compute.volumeMounts }}{{ toYaml $mounts_nova_compute.volumeMounts | indent 12 }}{{ end }}\n        {{ end }}\n        - name: ceph-keyring-placement\n{{ tuple $envAll \"nova_compute\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"ceph_keyring_placement\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: CEPH_CINDER_USER\n              value: \"{{ .Values.conf.ceph.cinder.user }}\"\n            {{- if .Values.conf.ceph.cinder.keyring }}\n            - name: CEPH_CINDER_KEYRING\n              value: \"{{ .Values.conf.ceph.cinder.keyring }}\"\n            {{ end }}\n            - name: LIBVIRT_CEPH_SECRET_UUID\n              value: \"{{ .Values.conf.ceph.secret_uuid }}\"\n          command:\n            - /tmp/ceph-keyring.sh\n          terminationMessagePath: /var/log/termination-log\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: nova-bin\n              mountPath: /tmp/ceph-keyring.sh\n              subPath: ceph-keyring.sh\n            - name: ceph-etc\n              mountPath: /etc/ceph/ceph.conf.template\n              subPath: ceph.conf\n              readOnly: true\n{{ if $mounts_nova_compute.volumeMounts }}{{ toYaml $mounts_nova_compute.volumeMounts | indent 12 }}{{ end }}\n        {{ end }}\n        {{- if eq .Values.console.console_kind \"novnc\"}}\n        - name: nova-compute-vnc-init\n{{ tuple $envAll \"nova_compute\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.compute | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_compute_vnc_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/nova-console-compute-init.sh\n          terminationMessagePath: /var/log/termination-log\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: nova-bin\n              mountPath: /tmp/nova-console-compute-init.sh\n              subPath: nova-console-compute-init.sh\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n{{ if $mounts_nova_compute.volumeMounts }}{{ toYaml $mounts_nova_compute.volumeMounts | indent 12 }}{{ end }}\n        {{ end }}\n        {{- if eq .Values.console.console_kind \"spice\"}}\n        - name: nova-compute-spice-init\n{{ tuple $envAll \"nova_compute\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.compute | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_compute_spice_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/nova-console-compute-init.sh\n          terminationMessagePath: /var/log/termination-log\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: nova-bin\n              mountPath: /tmp/nova-console-compute-init.sh\n              subPath: nova-console-compute-init.sh\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n{{ if $mounts_nova_compute.volumeMounts }}{{ toYaml $mounts_nova_compute.volumeMounts | indent 12 }}{{ end }}\n        {{ end }}\n        {{- if eq .Values.console.console_kind \"serial\"}}\n        - name: nova-compute-serial-init\n{{ tuple $envAll \"nova_compute\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.compute | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_compute_serial_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/nova-console-compute-init.sh\n          terminationMessagePath: /var/log/termination-log\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: nova-bin\n              mountPath: /tmp/nova-console-compute-init.sh\n              subPath: nova-console-compute-init.sh\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n{{ if $mounts_nova_compute.volumeMounts }}{{ toYaml $mounts_nova_compute.volumeMounts | indent 12 }}{{ end }}\n        {{ end }}\n        {{- if .Values.network.ssh.enabled }}\n        - name: nova-compute-ssh-init\n{{ tuple $envAll \"nova_compute_ssh\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.ssh | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_compute_ssh_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          terminationMessagePath: /var/log/termination-log\n          env:\n            - name: SSH_PORT\n              value: {{ .Values.network.ssh.port | quote }}\n            - name: NOVA_USER_UID\n              value: \"{{ .Values.pod.security_context.nova.pod.runAsUser }}\"\n          command:\n            - /tmp/ssh-init.sh\n          volumeMounts:\n            - name: varlibnova\n              mountPath: /var/lib/nova\n            - name: nova-ssh\n              mountPath: /tmp/nova-ssh/authorized_keys\n              subPath: public-key\n            - name: nova-ssh\n              mountPath: /tmp/nova-ssh/id_rsa\n              subPath: private-key\n            - name: nova-bin\n              mountPath: /tmp/ssh-init.sh\n              subPath: ssh-init.sh\n              readOnly: true\n{{ if $mounts_nova_compute.volumeMounts }}{{ toYaml $mounts_nova_compute.volumeMounts | indent 12 }}{{ end }}\n        {{- end }}\n      containers:\n        - name: nova-compute\n{{ tuple $envAll \"nova_compute\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.compute | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_compute\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n          {{- if .Values.conf.ceph.enabled }}\n            - name: CEPH_CINDER_USER\n              value: \"{{ .Values.conf.ceph.cinder.user }}\"\n            {{- if .Values.conf.ceph.cinder.keyring }}\n            - name: CEPH_CINDER_KEYRING\n              value: \"{{ .Values.conf.ceph.cinder.keyring }}\"\n            {{ end }}\n            - name: LIBVIRT_CEPH_SECRET_UUID\n              value: \"{{ .Values.conf.ceph.secret_uuid }}\"\n          {{ end }}\n            - name: RPC_PROBE_TIMEOUT\n              value: \"{{ .Values.pod.probes.rpc_timeout }}\"\n            - name: RPC_PROBE_RETRIES\n              value: \"{{ .Values.pod.probes.rpc_retries }}\"\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/nova/certs/ca.crt\"\n{{- end }}\n{{ dict \"envAll\" $envAll \"component\" \"compute\" \"container\" \"default\" \"type\" \"liveness\" \"probeTemplate\" (include \"novaComputeLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"compute\" \"container\" \"default\" \"type\" \"readiness\" \"probeTemplate\" (include \"novaComputeReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"compute\" \"container\" \"default\" \"type\" \"startup\" \"probeTemplate\" (include \"novaComputeStartupProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          command:\n            - /tmp/nova-compute.sh\n          terminationMessagePath: /var/log/termination-log\n          volumeMounts:\n            - name: dev-pts\n              mountPath: /dev/pts\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.nova.oslo_concurrency.lock_path }}\n            - name: nova-bin\n              mountPath: /tmp/nova-compute.sh\n              subPath: nova-compute.sh\n              readOnly: true\n            - name: nova-bin\n              mountPath: /tmp/health-probe.py\n              subPath: health-probe.py\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/nova.conf\n              subPath: nova-compute.conf\n              readOnly: true\n            - name: nova-etc-snippets\n              mountPath: /etc/nova/nova.conf.d/\n              readOnly: true\n            {{- if .Values.conf.nova.DEFAULT.log_config_append }}\n            - name: nova-etc\n              mountPath: {{ .Values.conf.nova.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.nova.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: nova-etc\n              mountPath: /etc/nova/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: nova-etc\n              # NOTE (Portdirect): We mount here to override Kollas\n              # custom sudoers file when using Kolla images, this\n              # location will also work fine for other images.\n              mountPath: /etc/sudoers.d/kolla_nova_sudoers\n              subPath: nova_sudoers\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/rootwrap.conf\n              subPath: rootwrap.conf\n              readOnly: true\n            {{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n            {{- if ( has \"compute\" $value.pods ) }}\n            {{- $filePrefix := replace \"_\" \"-\"  $key }}\n            {{- $rootwrapFile := printf \"/etc/nova/rootwrap.d/%s.filters\" $filePrefix }}\n            - name: nova-etc\n              mountPath: {{ $rootwrapFile }}\n              subPath: {{ base $rootwrapFile }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            {{- if .Values.conf.ceph.enabled }}\n            - name: etcceph\n              mountPath: /etc/ceph\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: Bidirectional\n              {{- end }}\n            {{- if and ( empty .Values.conf.ceph.cinder.keyring ) ( empty .Values.conf.ceph.admin_keyring )}}\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n            {{ end }}\n            {{ end }}\n            - mountPath: /lib/modules\n              name: libmodules\n              readOnly: true\n            - name: varlibnova\n              mountPath: /var/lib/nova\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: Bidirectional\n              {{- end }}\n            - name: varliblibvirt\n              mountPath: /var/lib/libvirt\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: Bidirectional\n              {{- end }}\n            - name: run\n              mountPath: /run\n            - name: cgroup\n              mountPath: /sys/fs/cgroup\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: machine-id\n              mountPath: /etc/machine-id\n              readOnly: true\n            {{- if .Values.conf.enable_iscsi }}\n            - name: host-rootfs\n              mountPath: /mnt/host-rootfs\n              mountPropagation: HostToContainer\n            - name: usrlocalsbin\n              mountPath: /usr/local/sbin\n            - name: etciscsi\n              mountPath: /etc/iscsi\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: HostToContainer\n              {{- end }}\n            - name: dev\n              mountPath: /dev\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: HostToContainer\n              {{- end }}\n            - name: nova-bin\n              mountPath: /usr/local/sbin/iscsiadm\n              subPath: iscsiadm\n            - name: runlock\n              mountPath: /run/lock\n            - name: nova-bin\n              mountPath: /usr/local/sbin/multipath\n              subPath: multipath\n            - name: nova-bin\n              mountPath: /usr/local/sbin/multipathd\n              subPath: multipathd\n            - name: etcmultipath\n              mountPath: /etc/multipath\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: Bidirectional\n              {{- end }}\n            - name: sysblock\n              mountPath: /sys/block\n              {{- if or ( gt .Capabilities.KubeVersion.Major \"1\" ) ( ge .Capabilities.KubeVersion.Minor \"10\" ) }}\n              mountPropagation: HostToContainer\n              {{- end }}\n            {{- end }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal \"path\" \"/etc/nova/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_nova_compute.volumeMounts }}{{ toYaml $mounts_nova_compute.volumeMounts | indent 12 }}{{ end }}\n        {{- if .Values.network.ssh.enabled }}\n        - name: nova-compute-ssh\n{{ tuple $envAll \"nova_compute_ssh\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.ssh | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_compute_ssh\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: KEY_TYPES\n              value: {{ include \"helm-toolkit.utils.joinListWithComma\" .Values.network.ssh.key_types | quote }}\n            - name: SSH_PORT\n              value: {{ .Values.network.ssh.port | quote }}\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/nova/certs/ca.crt\"\n{{- end }}\n          ports:\n            - containerPort: {{ .Values.network.ssh.port }}\n          command:\n            - /tmp/ssh-start.sh\n          terminationMessagePath: /var/log/termination-log\n          volumeMounts:\n            - name: varlibnova\n              mountPath: /var/lib/nova\n            - name: nova-bin\n              mountPath: /tmp/ssh-start.sh\n              subPath: ssh-start.sh\n              readOnly: true\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal \"path\" \"/etc/nova/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_nova_compute.volumeMounts }}{{ toYaml $mounts_nova_compute.volumeMounts | indent 12 }}{{ end }}\n        {{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: nova-bin\n          configMap:\n            name: nova-bin\n            defaultMode: 0555\n        - name: nova-etc\n          secret:\n            secretName: {{ $configMapName }}\n            defaultMode: 0444\n        - name: nova-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        {{- if .Values.network.ssh.enabled }}\n        - name: nova-ssh\n          secret:\n            secretName: nova-ssh\n            defaultMode: 0644\n        {{ end }}\n        {{- if .Values.conf.ceph.enabled }}\n        - name: etcceph\n          hostPath:\n            path: /var/lib/openstack-helm/compute/nova\n        - name: ceph-etc\n          configMap:\n            name: {{ .Values.ceph_client.configmap }}\n            defaultMode: 0444\n        {{- if and ( empty .Values.conf.ceph.cinder.keyring ) ( empty .Values.conf.ceph.admin_keyring ) }}\n        - name: ceph-keyring\n          secret:\n            secretName: {{ .Values.ceph_client.user_secret_name }}\n        {{ end }}\n        {{ end }}\n        - name: dev-pts\n          hostPath:\n            path: /dev/pts\n        - name: libmodules\n          hostPath:\n            path: /lib/modules\n        - name: varlibnova\n          hostPath:\n            path: /var/lib/nova\n        - name: varliblibvirt\n          hostPath:\n            path: /var/lib/libvirt\n        - name: run\n          hostPath:\n            path: /run\n        - name: cgroup\n          hostPath:\n            path: /sys/fs/cgroup\n        - name: pod-shared\n          emptyDir: {}\n        - name: machine-id\n          hostPath:\n            path: /etc/machine-id\n        {{- if .Values.conf.enable_iscsi }}\n        - name: host-rootfs\n          hostPath:\n            path: /\n        - name: runlock\n          hostPath:\n            path: /run/lock\n        - name: etciscsi\n          hostPath:\n            path: /etc/iscsi\n        - name: dev\n          hostPath:\n            path: /dev\n        - name: usrlocalsbin\n          emptyDir: {}\n        - name: etcmultipath\n          hostPath:\n            path: /etc/multipath\n        - name: sysblock\n          hostPath:\n            path: /sys/block\n\n        {{- end }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_nova_compute.volumes }}{{ toYaml $mounts_nova_compute.volumes | indent 8 }}{{ end }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.daemonset_compute }}\n{{- $envAll := . }}\n{{- $daemonset := \"compute\" }}\n{{- $configMapName := \"nova-etc\" }}\n{{- $serviceAccountName := \"nova-compute\" }}\n\n{{- $dependencyOpts := dict \"envAll\" $envAll \"dependencyMixinParam\" $envAll.Values.network.backend \"dependencyKey\" \"compute\" -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_resolver\" $dependencyOpts | toString | fromYaml }}\n\n{{ tuple $envAll \"pod_dependency\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include \"nova.compute.daemonset\" | toString | fromYaml }}\n{{- $configmap_yaml := \"nova.configmap.etc\" }}\n{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include \"helm-toolkit.utils.daemonset_overrides\" }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/deployment-api-metadata.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"novaApiMetadataLivenessProbeTemplate\" }}\nhttpGet:\n  scheme: {{ tuple \"compute_metadata\" \"service\" \"metadata\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n  path: /\n  port: {{ .Values.network.metadata.port }}\n{{- end }}\n\n{{- define \"novaApiMetadataReadinessProbeTemplate\" }}\nhttpGet:\n  scheme: {{ tuple \"compute_metadata\" \"service\" \"metadata\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n  path: /\n  port: {{ .Values.network.metadata.port }}\n{{- end }}\n\n{{- if .Values.manifests.deployment_api_metadata }}\n{{- $envAll := . }}\n\n{{- $mounts_nova_api_metadata := .Values.pod.mounts.nova_api_metadata.nova_api_metadata }}\n{{- $mounts_nova_api_metadata_init := .Values.pod.mounts.nova_api_metadata.init_container }}\n{{- $etcSources := .Values.pod.etcSources.nova_api_metadata }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"nova-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"nova-api-metadata\" }}\n{{ tuple $envAll \"api_metadata\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: nova-api-metadata\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"nova\" \"metadata\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.api_metadata }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"nova\" \"metadata\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"nova\" \"metadata\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"nova_api_metadata\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"nova-api-metadata\" \"containerNames\" (list \"nova-api-metadata-init\" \"nova-api\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"nova_api_metadata\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"nova_api_metadata\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"nova\" \"metadata\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.api_metadata.node_selector_key }}: {{ .Values.labels.api_metadata.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.nova.enabled }}\n{{ tuple $envAll \"nova\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.metadata.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"api_metadata\" $mounts_nova_api_metadata_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: nova-api-metadata-init\n{{ tuple $envAll \"nova_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api_metadata | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_api_metadata_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/nova-api-metadata-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: nova-bin\n              mountPath: /tmp/nova-api-metadata-init.sh\n              subPath: nova-api-metadata-init.sh\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/nova.conf\n              subPath: nova.conf\n              readOnly: true\n            - name: nova-etc-snippets\n              mountPath: /etc/nova/nova.conf.d/\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n      containers:\n        - name: nova-api\n{{ tuple $envAll \"nova_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api_metadata | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_api\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n          env:\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/nova/certs/ca.crt\"\n{{- end }}\n          command:\n            - /tmp/nova-api-metadata.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/nova-api-metadata.sh\n                  - stop\n          ports:\n            - containerPort: {{ .Values.network.metadata.port }}\n{{ dict \"envAll\" $envAll \"component\" \"api-metadata\" \"container\" \"default\" \"type\" \"liveness\" \"probeTemplate\" (include \"novaApiMetadataLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"api-metadata\" \"container\" \"default\" \"type\" \"readiness\" \"probeTemplate\" (include \"novaApiMetadataReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.nova.oslo_concurrency.lock_path }}\n            - name: nova-bin\n              mountPath: /tmp/nova-api-metadata.sh\n              subPath: nova-api-metadata.sh\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/nova-metadata-uwsgi.ini\n              subPath: nova-metadata-uwsgi.ini\n              readOnly: true\n            - name: nova-bin\n              mountPath: /sbin/iptables\n              subPath: fake-iptables.sh\n              readOnly: true\n            - name: nova-bin\n              mountPath: /sbin/iptables-restore\n              subPath: fake-iptables.sh\n              readOnly: true\n            - name: nova-bin\n              mountPath: /sbin/iptables-save\n              subPath: fake-iptables.sh\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/nova.conf\n              subPath: nova.conf\n              readOnly: true\n            - name: nova-etc-snippets\n              mountPath: /etc/nova/nova.conf.d/\n              readOnly: true\n            {{- if .Values.conf.nova.DEFAULT.log_config_append }}\n            - name: nova-etc\n              mountPath: {{ .Values.conf.nova.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.nova.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: nova-etc\n              mountPath: /etc/nova/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/api_audit_map.conf\n              subPath: api_audit_map.conf\n              readOnly: true\n            - name: nova-etc\n              # NOTE (Portdirect): We mount here to override Kollas\n              # custom sudoers file when using Kolla images, this\n              # location will also work fine for other images.\n              mountPath: /etc/sudoers.d/kolla_nova_sudoers\n              subPath: nova_sudoers\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/rootwrap.conf\n              subPath: rootwrap.conf\n              readOnly: true\n            {{- range $key, $value := $envAll.Values.conf.rootwrap_filters }}\n            {{- if ( has \"metadata\" $value.pods ) }}\n            {{- $filePrefix := replace \"_\" \"-\"  $key }}\n            {{- $rootwrapFile := printf \"/etc/nova/rootwrap.d/%s.filters\" $filePrefix }}\n            - name: nova-etc\n              mountPath: {{ $rootwrapFile }}\n              subPath: {{ base $rootwrapFile }}\n              readOnly: true\n            {{- end }}\n            {{- end }}\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n              readOnly: true\n            {{- if .Values.manifests.certificates }}\n            - name: nova-etc\n              mountPath: {{ .Values.conf.software.apache2.conf_dir }}/wsgi-metadata.conf\n              subPath: wsgi-metadata.conf\n              readOnly: true\n            - name: nova-etc\n              mountPath: {{ .Values.conf.software.apache2.mods_dir }}/mpm_event.conf\n              subPath: mpm_event.conf\n              readOnly: true\n            {{- end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute_metadata.metadata.internal \"path\" \"/etc/nova/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_nova_api_metadata.volumeMounts }}{{ toYaml $mounts_nova_api_metadata.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: nova-bin\n          configMap:\n            name: nova-bin\n            defaultMode: 0555\n        - name: nova-etc\n          secret:\n            secretName: nova-etc\n            defaultMode: 0444\n        - name: nova-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: pod-shared\n          emptyDir: {}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute_metadata.metadata.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_nova_api_metadata.volumes }}{{ toYaml $mounts_nova_api_metadata.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/deployment-api-osapi.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"novaApiOsapiLivenessProbeTemplate\" }}\nhttpGet:\n  scheme: {{ tuple \"compute\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n  path: /\n  port: {{ tuple \"compute\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- define \"novaApiOsapiReadinessProbeTemplate\" }}\nhttpGet:\n  scheme: {{ tuple \"compute\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n  path: /\n  port: {{ tuple \"compute\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- if .Values.manifests.deployment_api_osapi }}\n{{- $envAll := . }}\n\n{{- $mounts_nova_api_osapi := .Values.pod.mounts.nova_api_osapi.nova_api_osapi }}\n{{- $mounts_nova_api_osapi_init := .Values.pod.mounts.nova_api_osapi.init_container }}\n{{- $etcSources := .Values.pod.etcSources.nova_api_osapi }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"nova-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"nova-api-osapi\" }}\n{{ tuple $envAll \"api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: nova-api-osapi\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"nova\" \"os-api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.osapi }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"nova\" \"os-api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"nova\" \"os-api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"nova_api_osapi\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"nova-api-osapi\" \"containerNames\" (list \"nova-osapi\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"nova_api_osapi\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"nova_api_osapi\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"nova\" \"os-api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.osapi.node_selector_key }}: {{ .Values.labels.osapi.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.nova.enabled }}\n{{ tuple $envAll \"nova\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.osapi.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"api\" $mounts_nova_api_osapi_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: nova-osapi\n{{ tuple $envAll \"nova_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_osapi\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n          env:\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/nova/certs/ca.crt\"\n{{- end }}\n          command:\n            - /tmp/nova-api.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/nova-api.sh\n                  - stop\n          ports:\n            - name: n-api\n              containerPort: {{ tuple \"compute\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{ dict \"envAll\" $envAll \"component\" \"api-osapi\" \"container\" \"default\" \"type\" \"liveness\" \"probeTemplate\" (include \"novaApiOsapiLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"api-osapi\" \"container\" \"default\" \"type\" \"readiness\" \"probeTemplate\" (include \"novaApiOsapiReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.nova.oslo_concurrency.lock_path }}\n            - name: pod-var-nova\n              mountPath: /var/lib/nova\n            - name: nova-bin\n              mountPath: /tmp/nova-api.sh\n              subPath: nova-api.sh\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/nova.conf\n              subPath: nova.conf\n              readOnly: true\n            - name: nova-etc-snippets\n              mountPath: /etc/nova/nova.conf.d/\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/nova-api-uwsgi.ini\n              subPath: nova-api-uwsgi.ini\n              readOnly: true\n            {{- if .Values.conf.nova.DEFAULT.log_config_append }}\n            - name: nova-etc\n              mountPath: {{ .Values.conf.nova.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.nova.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: nova-etc\n              mountPath: /etc/nova/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/api_audit_map.conf\n              subPath: api_audit_map.conf\n              readOnly: true\n            {{- if .Values.manifests.certificates }}\n            - name: nova-etc\n              mountPath: {{ .Values.conf.software.apache2.conf_dir }}/wsgi-api.conf\n              subPath: wsgi-api.conf\n              readOnly: true\n            - name: nova-etc\n              mountPath: {{ .Values.conf.software.apache2.mods_dir }}/mpm_event.conf\n              subPath: mpm_event.conf\n              readOnly: true\n            {{- end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal \"path\" \"/etc/nova/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_nova_api_osapi.volumeMounts }}{{ toYaml $mounts_nova_api_osapi.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-var-nova\n          emptyDir: {}\n        - name: nova-bin\n          configMap:\n            name: nova-bin\n            defaultMode: 0555\n        - name: nova-etc\n          secret:\n            secretName: nova-etc\n            defaultMode: 0444\n        - name: nova-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_nova_api_osapi.volumes}}{{ toYaml $mounts_nova_api_osapi.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/deployment-conductor.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"novaConductorLivenessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/nova/nova.conf\n    - --config-dir\n    - /etc/nova/nova.conf.d\n    - --service-queue-name\n    - conductor\n    - --liveness-probe\n{{- end }}\n\n{{- define \"novaConductorReadinessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/nova/nova.conf\n    - --config-dir\n    - /etc/nova/nova.conf.d\n    - --service-queue-name\n    - conductor\n{{- end }}\n\n{{- if and .Values.manifests.deployment_conductor (not .Values.manifests.statefulset_conductor) }}\n{{- $envAll := . }}\n\n{{- $mounts_nova_conductor := .Values.pod.mounts.nova_conductor.nova_conductor }}\n{{- $mounts_nova_conductor_init := .Values.pod.mounts.nova_conductor.init_container }}\n{{- $etcSources := .Values.pod.etcSources.nova_conductor }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"nova-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"nova-conductor\" }}\n{{ tuple $envAll \"conductor\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: nova-conductor\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"nova\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.conductor }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"nova\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"nova\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"nova_conductor\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"nova-conductor\" \"containerNames\" (list \"nova-conductor\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"nova_conductor\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"nova_conductor\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"nova\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.conductor.node_selector_key }}: {{ .Values.labels.conductor.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.nova.enabled }}\n{{ tuple $envAll \"nova\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"conductor\" $mounts_nova_conductor_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: nova-conductor\n{{ tuple $envAll \"nova_conductor\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.conductor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_conductor\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"conductor\" \"container\" \"default\" \"type\" \"liveness\" \"probeTemplate\" (include \"novaConductorLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"conductor\" \"container\" \"default\" \"type\" \"readiness\" \"probeTemplate\" (include \"novaConductorReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          env:\n            - name: RPC_PROBE_TIMEOUT\n              value: \"{{ .Values.pod.probes.rpc_timeout }}\"\n            - name: RPC_PROBE_RETRIES\n              value: \"{{ .Values.pod.probes.rpc_retries }}\"\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/nova/certs/ca.crt\"\n{{- end }}\n          command:\n            - /tmp/nova-conductor.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.nova.oslo_concurrency.lock_path }}\n            - name: nova-bin\n              mountPath: /tmp/nova-conductor.sh\n              subPath: nova-conductor.sh\n              readOnly: true\n            - name: nova-bin\n              mountPath: /tmp/health-probe.py\n              subPath: health-probe.py\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/nova.conf\n              subPath: nova.conf\n              readOnly: true\n            - name: nova-etc-snippets\n              mountPath: /etc/nova/nova.conf.d/\n              readOnly: true\n            {{- if .Values.conf.nova.DEFAULT.log_config_append }}\n            - name: nova-etc\n              mountPath: {{ .Values.conf.nova.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.nova.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: nova-etc\n              mountPath: /etc/nova/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal \"path\" \"/etc/nova/certs\" \"certs\" (tuple \"ca.crt\") | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_nova_conductor.volumeMounts }}{{ toYaml $mounts_nova_conductor.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: nova-bin\n          configMap:\n            name: nova-bin\n            defaultMode: 0555\n        - name: nova-etc\n          secret:\n            secretName: nova-etc\n            defaultMode: 0444\n        - name: nova-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_nova_conductor.volumes }}{{ toYaml $mounts_nova_conductor.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/deployment-novncproxy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"novaNovncproxyLivenessProbeTemplate\" }}\ntcpSocket:\n  port: {{ tuple \"compute_novnc_proxy\" \"internal\" \"novnc_proxy\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- define \"novaNovncproxyReadinessProbeTemplate\" }}\ntcpSocket:\n  port: {{ tuple \"compute_novnc_proxy\" \"internal\" \"novnc_proxy\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- if and .Values.manifests.deployment_novncproxy ( eq .Values.console.console_kind \"novnc\" )}}\n{{- $envAll := . }}\n\n{{- $mounts_nova_novncproxy := .Values.pod.mounts.nova_novncproxy.nova_novncproxy }}\n{{- $mounts_nova_novncproxy_init := .Values.pod.mounts.nova_novncproxy.init_novncproxy }}\n{{- $etcSources := .Values.pod.etcSources.nova_novncproxy }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"nova-ks-etc\")) }}\n{{- end }}\n\n{{- $vencrypt_enabled := (contains \"vencrypt\" .Values.conf.nova.vnc.auth_schemes) }}\n\n{{- $serviceAccountName := \"nova-novncproxy\" }}\n{{ tuple $envAll \"novncproxy\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: nova-novncproxy\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"nova\" \"novnc-proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.novncproxy }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"nova\" \"novnc-proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"nova\" \"novnc-proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"nova_novncproxy\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"nova-novncproxy\" \"containerNames\" (list \"nova-novncproxy\" \"nova-novncproxy-init-assets\" \"nova-novncproxy-init\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"nova_novncproxy\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"nova_novncproxy\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"nova\" \"novnc-proxy\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.novncproxy.node_selector_key }}: {{ .Values.labels.novncproxy.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.nova.enabled }}\n{{ tuple $envAll \"nova\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n{{- if .Values.pod.useHostNetwork.novncproxy }}\n      hostNetwork: true\n      dnsPolicy: ClusterFirstWithHostNet\n{{- end }}\n      initContainers:\n{{ tuple $envAll \"novncproxy\" $mounts_nova_novncproxy_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: nova-novncproxy-init\n{{ tuple $envAll \"nova_novncproxy\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.novncproxy | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_novncproxy_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/nova-console-proxy-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: nova-bin\n              mountPath: /tmp/nova-console-proxy-init.sh\n              subPath: nova-console-proxy-init.sh\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/nova.conf\n              subPath: nova.conf\n              readOnly: true\n            - name: nova-etc-snippets\n              mountPath: /etc/nova/nova.conf.d\n              readOnly: true\n            {{- if .Values.conf.nova.DEFAULT.log_config_append }}\n            - name: nova-etc\n              mountPath: {{ .Values.conf.nova.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.nova.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n        - name: nova-novncproxy-init-assets\n{{ tuple $envAll \"nova_novncproxy_assets\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.novncproxy | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_novncproxy_init_assests\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/nova-console-proxy-init-assets.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: nova-bin\n              mountPath: /tmp/nova-console-proxy-init-assets.sh\n              subPath: nova-console-proxy-init-assets.sh\n              readOnly: true\n            - name: pod-usr-share-novnc\n              mountPath: /tmp/usr/share/novnc\n      containers:\n        - name: nova-novncproxy\n{{ tuple $envAll \"nova_novncproxy\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.novncproxy | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_novncproxy\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"novncproxy\" \"container\" \"default\" \"type\" \"liveness\" \"probeTemplate\" (include \"novaNovncproxyLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"novncproxy\" \"container\" \"default\" \"type\" \"readiness\" \"probeTemplate\" (include \"novaNovncproxyReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          command:\n            - /tmp/nova-console-proxy.sh\n          ports:\n            - name: n-novnc\n              containerPort: {{ tuple \"compute_novnc_proxy\" \"internal\" \"novnc_proxy\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.nova.oslo_concurrency.lock_path }}\n            - name: nova-bin\n              mountPath: /tmp/nova-console-proxy.sh\n              subPath: nova-console-proxy.sh\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/nova.conf\n              subPath: nova.conf\n              readOnly: true\n            - name: nova-etc-snippets\n              mountPath: /etc/nova/nova.conf.d\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/logging.conf\n              subPath: logging.conf\n              readOnly: true\n            - name: pod-usr-share-novnc\n              mountPath: /usr/share/novnc\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            {{- if $vencrypt_enabled }}\n            - name: {{ .Values.secrets.tls.compute_novnc_proxy.vencrypt.internal }}\n              mountPath: /etc/pki/nova-novncproxy\n              readOnly: true\n            {{- end }}\n\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.compute_novnc_proxy.novncproxy.internal \"path\" \"/etc/nova/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_nova_novncproxy.volumeMounts }}{{ toYaml $mounts_nova_novncproxy.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: nova-bin\n          configMap:\n            name: nova-bin\n            defaultMode: 0555\n        - name: nova-etc\n          secret:\n            secretName: nova-etc\n            defaultMode: 0444\n        - name: nova-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: pod-usr-share-novnc\n          emptyDir: {}\n        - name: pod-shared\n          emptyDir: {}\n        {{- if $vencrypt_enabled }}\n        - name: {{ .Values.secrets.tls.compute_novnc_proxy.vencrypt.internal }}\n          secret:\n            secretName: {{ .Values.secrets.tls.compute_novnc_proxy.vencrypt.internal }}\n            defaultMode: 0444\n        {{- end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.compute_novnc_proxy.novncproxy.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_nova_novncproxy.volumes }}{{ toYaml $mounts_nova_novncproxy.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/deployment-scheduler.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"novaSchedulerLivenessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/nova/nova.conf\n    - --config-dir\n    - /etc/nova/nova.conf.d\n    - --service-queue-name\n    - scheduler\n    - --liveness-probe\n{{- end }}\n\n{{- define \"novaSchedulerReadinessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/nova/nova.conf\n    - --config-dir\n    - /etc/nova/nova.conf.d\n    - --service-queue-name\n    - scheduler\n{{- end }}\n\n{{- if and .Values.manifests.deployment_scheduler (not .Values.manifests.statefulset_scheduler) }}\n{{- $envAll := . }}\n\n{{- $mounts_nova_scheduler := .Values.pod.mounts.nova_scheduler.nova_scheduler }}\n{{- $mounts_nova_scheduler_init := .Values.pod.mounts.nova_scheduler.init_container }}\n{{- $etcSources := .Values.pod.etcSources.nova_scheduler }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"nova-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"nova-scheduler\" }}\n{{ tuple $envAll \"scheduler\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: nova-scheduler\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"nova\" \"scheduler\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.scheduler }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"nova\" \"scheduler\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"nova\" \"scheduler\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"nova_scheduler\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"nova-scheduler\" \"containerNames\" (list \"nova-scheduler\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"nova_scheduler\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"nova_scheduler\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"nova\" \"scheduler\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.scheduler.node_selector_key }}: {{ .Values.labels.scheduler.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.nova.enabled }}\n{{ tuple $envAll \"nova\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"scheduler\" $mounts_nova_scheduler_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: nova-scheduler\n{{ tuple $envAll \"nova_scheduler\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.scheduler | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_scheduler\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"scheduler\" \"container\" \"default\" \"type\" \"liveness\" \"probeTemplate\" (include \"novaSchedulerLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"scheduler\" \"container\" \"default\" \"type\" \"readiness\" \"probeTemplate\" (include \"novaSchedulerReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          env:\n            - name: RPC_PROBE_TIMEOUT\n              value: \"{{ .Values.pod.probes.rpc_timeout }}\"\n            - name: RPC_PROBE_RETRIES\n              value: \"{{ .Values.pod.probes.rpc_retries }}\"\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/nova/certs/ca.crt\"\n{{- end }}\n          command:\n            - /tmp/nova-scheduler.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.nova.oslo_concurrency.lock_path }}\n            - name: nova-bin\n              mountPath: /tmp/nova-scheduler.sh\n              subPath: nova-scheduler.sh\n              readOnly: true\n            - name: nova-bin\n              mountPath: /tmp/health-probe.py\n              subPath: health-probe.py\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/nova.conf\n              subPath: nova.conf\n              readOnly: true\n            - name: nova-etc-snippets\n              mountPath: /etc/nova/nova.conf.d/\n              readOnly: true\n            {{- if .Values.conf.nova.DEFAULT.log_config_append }}\n            - name: nova-etc\n              mountPath: {{ .Values.conf.nova.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.nova.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: nova-etc\n              mountPath: /etc/nova/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal \"path\" \"/etc/nova/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_nova_scheduler.volumeMounts }}{{ toYaml $mounts_nova_scheduler.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: nova-bin\n          configMap:\n            name: nova-bin\n            defaultMode: 0555\n        - name: nova-etc\n          secret:\n            secretName: nova-etc\n            defaultMode: 0444\n        - name: nova-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_nova_scheduler.volumes }}{{ toYaml $mounts_nova_scheduler.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/deployment-serialproxy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"novaSerialproxyLivenessProbeTemplate\" }}\ntcpSocket:\n  port: {{ tuple \"compute_serial_proxy\" \"internal\" \"serial_proxy\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- define \"novaSerialproxyReadinessProbeTemplate\" }}\ntcpSocket:\n  port: {{ tuple \"compute_serial_proxy\" \"internal\" \"serial_proxy\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- if and .Values.manifests.deployment_serialproxy ( eq .Values.console.console_kind \"serial\" )}}\n{{- $envAll := . }}\n\n{{- $mounts_nova_serialproxy := .Values.pod.mounts.nova_serialproxy.nova_serialproxy }}\n{{- $mounts_nova_serialproxy_init := .Values.pod.mounts.nova_serialproxy.init_serialproxy }}\n{{- $etcSources := .Values.pod.etcSources.nova_serialproxy }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"nova-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"nova-serialproxy\" }}\n{{ tuple $envAll \"serialproxy\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: nova-serialproxy\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"nova\" \"serial-proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.serialproxy }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"nova\" \"serial-proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"nova\" \"serial-proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"nova_serialproxy\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"nova-serialproxy\" \"containerNames\" (list \"nova-serialproxy\" \"nova-serialproxy-init-assets\" \"nova-serialproxy-init\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"nova_serialproxy\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"nova_serialproxy\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"nova\" \"serial-proxy\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.serialproxy.node_selector_key }}: {{ .Values.labels.serialproxy.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.nova.enabled }}\n{{ tuple $envAll \"nova\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n{{- if .Values.pod.useHostNetwork.serialproxy }}\n      hostNetwork: true\n      dnsPolicy: ClusterFirstWithHostNet\n{{- end }}\n      initContainers:\n{{ tuple $envAll \"serialproxy\" $mounts_nova_serialproxy_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: nova-serialproxy-init\n{{ tuple $envAll \"nova_serialproxy\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.serialproxy | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_serialproxy_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/nova-console-proxy-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: nova-bin\n              mountPath: /tmp/nova-console-proxy-init.sh\n              subPath: nova-console-proxy-init.sh\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/nova.conf\n              subPath: nova.conf\n              readOnly: true\n            - name: nova-etc-snippets\n              mountPath: /etc/nova/nova.conf.d/\n              readOnly: true\n            {{- if .Values.conf.nova.DEFAULT.log_config_append }}\n            - name: nova-etc\n              mountPath: {{ .Values.conf.nova.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.nova.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n      containers:\n        - name: nova-serialproxy\n{{ tuple $envAll \"nova_serialproxy\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.serialproxy | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_serialproxy\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"serialproxy\" \"container\" \"default\" \"type\" \"liveness\" \"probeTemplate\" (include \"novaSerialproxyLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"serialproxy\" \"container\" \"default\" \"type\" \"readiness\" \"probeTemplate\" (include \"novaSerialproxyReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          command:\n            - /tmp/nova-console-proxy.sh\n          ports:\n            - name: n-serial\n              containerPort: {{ tuple \"compute_serial_proxy\" \"internal\" \"serial_proxy\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.nova.oslo_concurrency.lock_path }}\n            - name: nova-bin\n              mountPath: /tmp/nova-console-proxy.sh\n              subPath: nova-console-proxy.sh\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/nova.conf\n              subPath: nova.conf\n              readOnly: true\n            - name: nova-etc-snippets\n              mountPath: /etc/nova/nova.conf.d/\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/logging.conf\n              subPath: logging.conf\n              readOnly: true\n            - name: pod-usr-share-serial\n              mountPath: /usr/share/serial\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.compute_serial_proxy.serialproxy.internal \"path\" \"/etc/nova/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_nova_serialproxy.volumeMounts }}{{ toYaml $mounts_nova_serialproxy.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: nova-bin\n          configMap:\n            name: nova-bin\n            defaultMode: 0555\n        - name: nova-etc\n          secret:\n            secretName: nova-etc\n            defaultMode: 0444\n        - name: nova-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: pod-usr-share-serial\n          emptyDir: {}\n        - name: pod-shared\n          emptyDir: {}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.compute_serial_proxy.serialproxy.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_nova_serialproxy.volumes }}{{ toYaml $mounts_nova_serialproxy.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/deployment-spiceproxy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"novaSpiceproxyLivenessProbeTemplate\" }}\ntcpSocket:\n  port: {{ tuple \"compute_spice_proxy\" \"internal\" \"spice_proxy\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- define \"novaSpiceproxyReadinessProbeTemplate\" }}\ntcpSocket:\n  port: {{ tuple \"compute_spice_proxy\" \"internal\" \"spice_proxy\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- if and .Values.manifests.deployment_spiceproxy ( eq .Values.console.console_kind \"spice\" )}}\n{{- $envAll := . }}\n\n{{- $mounts_nova_spiceproxy := .Values.pod.mounts.nova_spiceproxy.nova_spiceproxy }}\n{{- $mounts_nova_spiceproxy_init := .Values.pod.mounts.nova_spiceproxy.init_spiceproxy }}\n{{- $etcSources := .Values.pod.etcSources.nova_spiceproxy }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"nova-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"nova-spiceproxy\" }}\n{{ tuple $envAll \"spiceproxy\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: nova-spiceproxy\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"nova\" \"spice-proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.spiceproxy }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"nova\" \"spice-proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"nova\" \"spice-proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"nova_spiceproxy\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ tuple \"nova_spiceproxy\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"nova_spiceproxy\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"nova\" \"spice-proxy\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.spiceproxy.node_selector_key }}: {{ .Values.labels.spiceproxy.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.nova.enabled }}\n{{ tuple $envAll \"nova\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      hostNetwork: true\n      dnsPolicy: ClusterFirstWithHostNet\n      initContainers:\n{{ tuple $envAll \"spiceproxy\" $mounts_nova_spiceproxy_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: nova-spiceproxy-init\n{{ tuple $envAll \"nova_spiceproxy\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.spiceproxy | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_spiceproxy_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/nova-console-proxy-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: nova-bin\n              mountPath: /tmp/nova-console-proxy-init.sh\n              subPath: nova-console-proxy-init.sh\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/nova.conf\n              subPath: nova.conf\n              readOnly: true\n            {{- if .Values.conf.nova.DEFAULT.log_config_append }}\n            - name: nova-etc\n              mountPath: {{ .Values.conf.nova.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.nova.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n        - name: nova-spiceproxy-init-assets\n{{ tuple $envAll \"nova_spiceproxy_assets\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.spiceproxy | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_spiceproxy_init_assets\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/nova-console-proxy-init-assets.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: nova-bin\n              mountPath: /tmp/nova-console-proxy-init-assets.sh\n              subPath: nova-console-proxy-init-assets.sh\n              readOnly: true\n            - name: pod-usr-share-spice-html5\n              mountPath: /tmp/usr/share/spice-html5\n      containers:\n        - name: nova-spiceproxy\n{{ tuple $envAll \"nova_spiceproxy\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.spiceproxy | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_spiceproxy\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"compute-spice-proxy\" \"container\" \"default\" \"type\" \"liveness\" \"probeTemplate\" (include \"novaSpiceproxyLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"compute-spice-proxy\" \"container\" \"default\" \"type\" \"readiness\" \"probeTemplate\" (include \"novaSpiceproxyReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          command:\n            - /tmp/nova-console-proxy.sh\n          ports:\n            - name: n-spice\n              containerPort: {{ tuple \"compute_spice_proxy\" \"internal\" \"spice_proxy\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.nova.oslo_concurrency.lock_path }}\n            - name: nova-bin\n              mountPath: /tmp/nova-console-proxy.sh\n              subPath: nova-console-proxy.sh\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/nova.conf\n              subPath: nova.conf\n              readOnly: true\n            - name: nova-etc-snippets\n              mountPath: /etc/nova/nova.conf.d/\n              readOnly: true\n            {{- if .Values.conf.nova.DEFAULT.log_config_append }}\n            - name: nova-etc\n              mountPath: {{ .Values.conf.nova.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.nova.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: pod-usr-share-spice-html5\n              mountPath: /usr/share/spice-html5\n              readOnly: true\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.compute_spice_proxy.spiceproxy.internal \"path\" \"/etc/nova/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_nova_spiceproxy.volumeMounts }}{{ toYaml $mounts_nova_spiceproxy.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: nova-bin\n          configMap:\n            name: nova-bin\n            defaultMode: 0555\n        - name: nova-etc\n          secret:\n            secretName: nova-etc\n            defaultMode: 0444\n        - name: nova-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: pod-usr-share-spice-html5\n          emptyDir: {}\n        - name: pod-shared\n          emptyDir: {}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.compute_spice_proxy.spiceproxy.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_nova_spiceproxy.volumes }}{{ toYaml $mounts_nova_spiceproxy.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "nova/templates/ingress-metadata.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_metadata .Values.network.metadata.ingress.public }}\n{{- $envAll := . -}}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendService\" \"metadata\" \"backendServiceType\" \"compute_metadata\" \"backendPort\" \"n-meta\" -}}\n{{- $secretName := $envAll.Values.secrets.tls.compute_metadata.metadata.internal -}}\n{{- if and .Values.manifests.certificates $secretName }}\n{{- $_ := set $ingressOpts \"certIssuer\" .Values.endpoints.compute_metadata.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/ingress-novncproxy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_novncproxy .Values.network.novncproxy.ingress.public (eq .Values.console.console_kind \"novnc\") }}\n{{- $envAll := . }}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendService\" \"novncproxy\" \"backendServiceType\" \"compute_novnc_proxy\" \"backendPort\" \"n-novnc\" -}}\n{{- $secretName := $envAll.Values.secrets.tls.compute_novnc_proxy.novncproxy.internal -}}\n{{- if and .Values.manifests.certificates $secretName }}\n{{- $_ := set $ingressOpts \"certIssuer\" .Values.endpoints.compute_novnc_proxy.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end }}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/ingress-osapi.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_osapi .Values.network.osapi.ingress.public }}\n{{- $envAll := . -}}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendService\" \"osapi\" \"backendServiceType\" \"compute\" \"backendPort\" \"n-api\" -}}\n{{- $secretName := $envAll.Values.secrets.tls.compute.osapi.internal -}}\n{{- if and .Values.manifests.certificates $secretName }}\n{{- $_ := set $ingressOpts \"certIssuer\" .Values.endpoints.compute.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end }}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/ingress-serialproxy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_serialproxy .Values.network.serialproxy.ingress.public (eq .Values.console.console_kind \"serial\") }}\n{{- $envAll := . }}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendService\" \"serialproxy\" \"backendServiceType\" \"compute_serial_proxy\" \"backendPort\" \"n-serial\" -}}\n{{- $secretName := $envAll.Values.secrets.tls.compute_serial_proxy.serialproxy.internal -}}\n{{- if and .Values.manifests.certificates $secretName }}\n{{- $_ := set $ingressOpts \"certIssuer\" .Values.endpoints.compute_serial_proxy.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end }}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/ingress-spiceproxy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_spiceproxy .Values.network.spiceproxy.ingress.public (eq .Values.console.console_kind \"spice\") }}\n{{- $envAll := . }}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendService\" \"spiceproxy\" \"backendServiceType\" \"compute_spice_proxy\" \"backendPort\" \"n-spice\" -}}\n{{- $secretName := $envAll.Values.secrets.tls.compute_spice_proxy.spiceproxy.internal -}}\n{{- if and .Values.manifests.certificates $secretName }}\n{{- $_ := set $ingressOpts \"certIssuer\" .Values.endpoints.compute_spice_proxy.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end }}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- $envAll := . }}\n{{- if and $envAll.Values.manifests.job_bootstrap $envAll.Values.bootstrap.enabled }}\n{{- $serviceName := \"nova\" -}}\n{{- $keystoneUser := $envAll.Values.bootstrap.ks_user -}}\n{{- $backoffLimit := index . \"backoffLimit\" | default \"1000\" -}}\n{{- $configMapBin := printf \"%s-%s\" $serviceName \"bin\" -}}\n{{- $configMapEtc := printf \"%s-%s\" $serviceName \"etc\" -}}\n{{- $configFile := printf \"/etc/%s/%s.conf\" $serviceName $serviceName -}}\n{{- $nodeSelector := index . \"nodeSelector\" | default ( dict $envAll.Values.labels.job.node_selector_key $envAll.Values.labels.job.node_selector_value ) -}}\n{{- $serviceAccountName := printf \"%s-%s\" $serviceName \"bootstrap\" -}}\n{{ tuple $envAll \"bootstrap\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ $serviceAccountName | quote }}\n  labels:\n{{ tuple $envAll \"nova\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n{{ tuple \"nova_bootstrap\" $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 }}\nspec:\n  backoffLimit: {{ $backoffLimit }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"nova\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n{{ toYaml $nodeSelector | indent 8 }}\n{{ if $envAll.Values.pod.tolerations.nova.enabled }}\n{{ tuple $envAll \"nova\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"bootstrap\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n{{- if $envAll.Values.bootstrap.wait_for_computes.enabled }}\n        - name: nova-wait-for-computes-init\n{{ tuple $envAll \"nova_wait_for_computes_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"bootstrap\" \"container\" \"nova_wait_for_computes_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/wait-for-computes-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: bootstrap-sh\n              mountPath: /tmp/wait-for-computes-init.sh\n              subPath: wait-for-computes-init.sh\n              readOnly: true\n{{- end }}\n      containers:\n        - name: bootstrap\n          image: {{ $envAll.Values.images.tags.bootstrap }}\n          imagePullPolicy: {{ $envAll.Values.images.pull_policy }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"bootstrap\" \"container\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" ( index $envAll.Values.secrets.identity $keystoneUser ) \"useCA\" (or .Values.manifests.certificates .Values.tls.identity) }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n            - name: WAIT_PERCENTAGE\n              value: \"{{ .Values.bootstrap.wait_for_computes.wait_percentage }}\"\n            - name: REMAINING_WAIT\n              value: \"{{ .Values.bootstrap.wait_for_computes.remaining_wait }}\"\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/bootstrap.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: bootstrap-sh\n              mountPath: /tmp/bootstrap.sh\n              subPath: bootstrap.sh\n              readOnly: true\n            - name: etc-service\n              mountPath: {{ dir $configFile | quote }}\n            - name: bootstrap-conf\n              mountPath: {{ $configFile | quote }}\n              subPath: {{ base $configFile | quote }}\n              readOnly: true\n            {{- if .Values.conf.nova.DEFAULT.log_config_append }}\n            - name: bootstrap-conf\n              mountPath: {{ .Values.conf.nova.DEFAULT.log_config_append | quote }}\n              subPath: {{ base .Values.conf.nova.DEFAULT.log_config_append | quote }}\n              readOnly: true\n            {{- end }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: bootstrap-sh\n          configMap:\n            name: {{ $configMapBin | quote }}\n            defaultMode: 0555\n        - name: etc-service\n          emptyDir: {}\n        - name: bootstrap-conf\n          secret:\n            secretName: {{ $configMapEtc | quote }}\n            defaultMode: 0444\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - ''\n    resources:\n      - nodes\n    verbs:\n      - get\n      - list\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $serviceAccountName }}\n  apiGroup: rbac.authorization.k8s.io\n{{- end }}\n"
  },
  {
    "path": "nova/templates/job-cell-setup.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_cell_setup }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"nova-cell-setup\" }}\n{{ tuple $envAll \"cell_setup\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- $etcSources := .Values.pod.etcSources.nova_cell_setup }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"nova-ks-etc\")) }}\n{{- end }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: nova-cell-setup\n  labels:\n{{ tuple $envAll \"nova\" \"cell-setup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ tuple \"nova_cell_setup\" $envAll | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"nova\" \"cell-setup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" \"nova-cell-setup\" \"containerNames\" (list \"nova-cell-setup-init\" \"nova-cell-setup\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.nova.enabled }}\n{{ tuple $envAll \"nova\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"cell_setup\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n{{- if $envAll.Values.bootstrap.wait_for_computes.enabled }}\n        - name: nova-wait-for-computes-init\n{{ tuple $envAll \"nova_wait_for_computes_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"bootstrap\" \"container\" \"nova_wait_for_computes_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/wait-for-computes-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: nova-bin\n              mountPath: /tmp/wait-for-computes-init.sh\n              subPath: wait-for-computes-init.sh\n              readOnly: true\n{{- end }}\n        - name: nova-cell-setup-init\n{{ tuple $envAll \"nova_cell_setup_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.cell_setup | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova_cell_setup\" \"container\" \"nova_cell_setup_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" $envAll.Values.secrets.identity.admin \"useCA\" (or .Values.manifests.certificates .Values.tls.identity) }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n          command:\n            - /tmp/cell-setup-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: nova-bin\n              mountPath: /tmp/cell-setup-init.sh\n              subPath: cell-setup-init.sh\n              readOnly: true\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      containers:\n        - name: nova-cell-setup\n{{ tuple $envAll \"nova_cell_setup\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.cell_setup | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova_cell_setup\" \"container\" \"nova_cell_setup\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n          env:\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/nova/certs/ca.crt\"\n{{- end }}\n          command:\n            - /tmp/cell-setup.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: nova-bin\n              mountPath: /tmp/cell-setup.sh\n              subPath: cell-setup.sh\n              readOnly: true\n            - name: etcnova\n              mountPath: /etc/nova\n            - name: nova-etc\n              mountPath: /etc/nova/nova.conf\n              subPath: nova.conf\n              readOnly: true\n            - name: nova-etc-snippets\n              mountPath: /etc/nova/nova.conf.d/\n              readOnly: true\n            {{- if .Values.conf.nova.DEFAULT.log_config_append }}\n            - name: nova-etc\n              mountPath: {{ .Values.conf.nova.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.nova.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: nova-etc\n              mountPath: /etc/nova/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: etcnova\n          emptyDir: {}\n        - name: nova-etc\n          secret:\n            secretName: nova-etc\n            defaultMode: 0444\n        - name: nova-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: nova-bin\n          configMap:\n            name: nova-bin\n            defaultMode: 0555\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - ''\n    resources:\n      - nodes\n    verbs:\n      - get\n      - list\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $serviceAccountName }}\n  apiGroup: rbac.authorization.k8s.io\n{{- end }}\n"
  },
  {
    "path": "nova/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $serviceName := \"nova\" -}}\n{{- $dbSvc := dict \"adminSecret\" .Values.secrets.oslo_db.admin \"configFile\" (printf \"/etc/%s/%s.conf\" $serviceName $serviceName ) \"logConfigFile\" (printf \"/etc/%s/logging.conf\" $serviceName ) \"configDbSection\" \"database\" \"configDbKey\" \"connection\" -}}\n{{- $dbApi := dict \"adminSecret\" .Values.secrets.oslo_db.admin \"configFile\" (printf \"/etc/%s/%s.conf\" $serviceName $serviceName ) \"logConfigFile\" (printf \"/etc/%s/logging.conf\" $serviceName ) \"configDbSection\" \"api_database\" \"configDbKey\" \"connection\" -}}\n{{- $dbCell := dict \"adminSecret\" .Values.secrets.oslo_db.admin \"configFile\" (printf \"/etc/%s/%s.conf\" $serviceName $serviceName ) \"logConfigFile\" (printf \"/etc/%s/logging.conf\" $serviceName ) \"configDbSection\" \"cell0_database\" \"configDbKey\" \"connection\" -}}\n{{- $dbsToDrop := list $dbSvc $dbApi $dbCell }}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" $serviceName \"dbsToDrop\" $dbsToDrop -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbDropJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- if .Values.pod.tolerations.nova.enabled -}}\n{{- $_ := set $dbDropJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $serviceName := \"nova\" -}}\n{{- $dbSvc := dict \"adminSecret\" .Values.secrets.oslo_db.admin \"configFile\" (printf \"/etc/%s/%s.conf\" $serviceName $serviceName ) \"logConfigFile\" (printf \"/etc/%s/logging.conf\" $serviceName ) \"configDbSection\" \"database\" \"configDbKey\" \"connection\" -}}\n{{- $dbApi := dict \"adminSecret\" .Values.secrets.oslo_db.admin \"configFile\" (printf \"/etc/%s/%s.conf\" $serviceName $serviceName ) \"logConfigFile\" (printf \"/etc/%s/logging.conf\" $serviceName ) \"configDbSection\" \"api_database\" \"configDbKey\" \"connection\" -}}\n{{- $dbCell := dict \"adminSecret\" .Values.secrets.oslo_db.admin \"configFile\" (printf \"/etc/%s/%s.conf\" $serviceName $serviceName ) \"logConfigFile\" (printf \"/etc/%s/logging.conf\" $serviceName ) \"configDbSection\" \"cell0_database\" \"configDbKey\" \"connection\" -}}\n{{- $dbsToInit := list $dbSvc $dbApi $dbCell }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" $serviceName \"dbsToInit\" $dbsToInit -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbInitJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbInitJob \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.nova.enabled -}}\n{{- $_ := set $dbInitJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- define \"nova.templates._job_db_sync.env_vars\" -}}\n{{- $envAll := index . 0 }}\nenv:\n  - name: TRANSPORT_URL\n    valueFrom:\n      secretKeyRef:\n        name: {{ $envAll.Values.secrets.oslo_messaging.nova }}\n        key: TRANSPORT_URL\n  - name: DB_CONNECTION\n    valueFrom:\n      secretKeyRef:\n        name: {{ $envAll.Values.secrets.oslo_db.nova }}\n        key: DB_CONNECTION\n  - name: DB_CONNECTION_CELL0\n    valueFrom:\n      secretKeyRef:\n        name: {{ $envAll.Values.secrets.oslo_db_cell0.nova }}\n        key: DB_CONNECTION\n{{- end }}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $podEnvVars := include \"nova.templates._job_db_sync.env_vars\" (tuple .) | toString | fromYaml }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"nova\" \"podVolMounts\" .Values.pod.mounts.nova_db_sync.nova_db_sync.volumeMounts \"podVols\" .Values.pod.mounts.nova_db_sync.nova_db_sync.volumes \"podEnvVars\" $podEnvVars.env -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbSyncJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.nova.enabled -}}\n{{- $_ := set $dbSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.repo_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\n{{- end }}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"nova\" -}}\n{{- $_ := set $imageRepoSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.repo_sync\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.nova.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_endpoints\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-2\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"nova\" \"serviceTypes\" ( tuple \"compute\" ) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.compute.osapi.internal -}}\n{{- end -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_endpoints\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.nova.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_service\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"nova\" \"serviceTypes\" ( tuple \"compute\" ) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.compute.osapi.internal -}}\n{{- end -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_service\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.nova.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $serviceUsers := (tuple \"nova\" \"neutron\" \"placement\" \"cinder\") -}}\n{{- if .Values.conf.nova.service_user.send_service_user_token }}\n{{- $serviceUsers = append $serviceUsers \"service\" -}}\n{{- end }}\n{{- if .Values.manifests.statefulset_compute_ironic }}\n{{- $serviceUsers = append $serviceUsers \"ironic\" -}}\n{{- end }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"nova\" \"serviceUsers\" $serviceUsers -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksUserJob \"tlsSecret\" .Values.secrets.tls.compute.osapi.internal -}}\n{{- end -}}\n{{- $_ := set $ksUserJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.nova.enabled -}}\n{{- $_ := set $ksUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/job-nova-storage-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_storage_init .Values.conf.ceph.enabled }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"nova-storage-init\" }}\n{{ tuple $envAll \"storage_init\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - secrets\n    verbs:\n      - get\n      - create\n      - update\n      - patch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  annotations:\n    helm.sh/hook: post-install,post-upgrade\n    helm.sh/hook-weight: \"-6\"\n  name: nova-storage-init\n  labels:\n{{ tuple $envAll \"nova\" \"storage-init\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"nova\" \"storage-init\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      restartPolicy: OnFailure\n{{ if $envAll.Values.pod.tolerations.nova.enabled }}\n{{ tuple $envAll \"nova\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"storage_init\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        {{- if $envAll.Values.conf.ceph.enabled }}\n        - name: ceph-keyring-placement\n{{ tuple $envAll \"nova_storage_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n          securityContext:\n            runAsUser: 0\n          command:\n            - /tmp/ceph-admin-keyring.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: nova-bin\n              mountPath: /tmp/ceph-admin-keyring.sh\n              subPath: ceph-admin-keyring.sh\n              readOnly: true\n            {{- if empty .Values.conf.ceph.admin_keyring }}\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n            {{ end }}\n        {{ end }}\n      containers:\n        - name: nova-storage-init\n{{ tuple $envAll \"nova_storage_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.storage_init | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: STORAGE_BACKEND\n              value: {{ .Values.conf.nova.libvirt.images_type | quote }}\n            {{- if eq .Values.conf.nova.libvirt.images_type \"rbd\" }}\n            - name: RBD_POOL_NAME\n              value: {{ .Values.conf.nova.libvirt.images_rbd_pool | quote }}\n            - name: RBD_POOL_APP_NAME\n              value: {{ .Values.rbd_pool.app_name | quote }}\n            - name: RBD_POOL_USER\n              value: {{ .Values.conf.nova.libvirt.rbd_user | quote }}\n            - name: RBD_POOL_CRUSH_RULE\n              value: {{ .Values.rbd_pool.crush_rule | quote }}\n            - name: RBD_POOL_REPLICATION\n              value: {{ .Values.rbd_pool.replication | quote }}\n            - name: RBD_POOL_CHUNK_SIZE\n              value: {{ .Values.rbd_pool.chunk_size | quote }}\n            {{ end }}\n          command:\n            - /tmp/storage-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: nova-bin\n              mountPath: /tmp/storage-init.sh\n              subPath: storage-init.sh\n              readOnly: true\n            {{- if eq .Values.conf.nova.libvirt.images_type \"rbd\" }}\n            - name: etcceph\n              mountPath: /etc/ceph\n            - name: ceph-etc\n              mountPath: /etc/ceph/ceph.conf\n              subPath: ceph.conf\n              readOnly: true\n            {{- if empty $envAll.Values.conf.ceph.admin_keyring }}\n            - name: ceph-keyring\n              mountPath: /tmp/client-keyring\n              subPath: key\n              readOnly: true\n            {{- end }}\n            {{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: nova-bin\n          configMap:\n            name: nova-bin\n            defaultMode: 0555\n        {{- if $envAll.Values.conf.ceph.enabled }}\n        - name: etcceph\n          emptyDir: {}\n        - name: ceph-etc\n          configMap:\n            name: {{ .Values.ceph_client.configmap }}\n            defaultMode: 0444\n        {{- if empty .Values.conf.ceph.admin_keyring }}\n        - name: ceph-keyring\n          secret:\n            secretName: {{ .Values.ceph_client.user_secret_name }}\n        {{- end }}\n        {{- end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/job-rabbit-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.rabbit_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rmqUserJob := dict \"envAll\" . \"serviceName\" \"nova\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $rmqUserJob \"tlsSecret\" .Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $rmqUserJob \"jobAnnotations\" (include \"metadata.annotations.job.rabbit_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.nova.enabled -}}\n{{- $_ := set $rmqUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $rmqUserJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/netpol-nova.yaml",
    "content": "{{/*\nCopyright 2017-2018 The Openstack-Helm Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"nova\" }}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "nova/templates/pdb-metadata.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_metadata }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: nova-api-metadata\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.metadata.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"nova\" \"metadata\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/pdb-osapi.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_osapi }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: nova-api-osapi\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.osapi.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"nova\" \"os-api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/pod-rally-test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if .Values.manifests.pod_rally_test }}\n{{- $envAll := . }}\n\n{{- $mounts_tests := .Values.pod.mounts.nova_tests.nova_tests }}\n{{- $mounts_tests_init := .Values.pod.mounts.nova_tests.init_container }}\n\n{{- $serviceAccountName := print $envAll.deployment_name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: {{ print $envAll.deployment_name \"-test\" }}\n  labels:\n{{ tuple $envAll \"nova\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"nova-test\" \"containerNames\" (list \"init\" \"nova-test\" \"nova-test-ks-user\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.nova.enabled }}\n{{ tuple $envAll \"nova\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 2 }}\n{{ end }}\n  restartPolicy: Never\n{{ tuple \"nova_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 2 }}\n{{ tuple \"nova_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 2 }}\n  serviceAccountName: {{ $serviceAccountName }}\n  initContainers:\n{{ tuple $envAll \"tests\" $mounts_tests_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n    - name: nova-test-ks-user\n{{ tuple $envAll \"ks_user\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.ks_user | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n      command:\n        - /tmp/ks-user.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: nova-bin\n          mountPath: /tmp/ks-user.sh\n          subPath: ks-user.sh\n          readOnly: true\n{{ dict \"enabled\" .Values.manifests.certificates \"name\"  .Values.secrets.tls.compute.osapi.internal | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 8 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_SERVICE_NAME\n          value: \"test\"\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.test }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_ROLE\n          value: {{ .Values.endpoints.identity.auth.test.role | quote }}\n  containers:\n    - name: nova-test\n{{ tuple $envAll \"test\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" .Values.manifests.certificates}}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.test }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: RALLY_ENV_NAME\n          value: {{.deployment_name}}\n      command:\n        - /tmp/rally-test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: nova-etc\n          mountPath: /etc/rally/rally_tests.yaml\n          subPath: rally_tests.yaml\n          readOnly: true\n        - name: nova-bin\n          mountPath: /tmp/rally-test.sh\n          subPath: rally-test.sh\n          readOnly: true\n        - name: rally-db\n          mountPath: /var/lib/rally\n{{ dict \"enabled\" .Values.manifests.certificates \"name\"  .Values.secrets.tls.compute.osapi.internal | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 8 }}\n{{ if $mounts_tests.volumeMounts }}{{ toYaml $mounts_tests.volumeMounts | indent 8 }}{{ end }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: nova-etc\n      secret:\n        secretName: nova-etc\n        defaultMode: 0444\n    - name: nova-bin\n      configMap:\n        name: nova-bin\n        defaultMode: 0555\n    - name: rally-db\n      emptyDir: {}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\"  .Values.secrets.tls.compute.osapi.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 4 }}\n{{ if $mounts_tests.volumes }}{{ toYaml $mounts_tests.volumes | indent 4 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/secret-db-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db_api }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"nova\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db_api $userClass }}\n{{- $connection := tuple \"oslo_db_api\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db_api\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- if $envAll.Values.manifests.certificates }}\n  DB_CONNECTION: {{ (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | b64enc -}}\n{{- else }}\n  DB_CONNECTION: {{ $connection | b64enc -}}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/secret-db-cell0.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db_cell0 }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"nova\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db_cell0 $userClass }}\n{{- $connection := tuple \"oslo_db_cell0\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db_cell0\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- if $envAll.Values.manifests.certificates }}\n  DB_CONNECTION: {{ (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | b64enc -}}\n{{- else }}\n  DB_CONNECTION: {{ $connection | b64enc -}}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"nova\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n{{- $connection := tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- if $envAll.Values.manifests.certificates }}\n  DB_CONNECTION: {{ (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | b64enc -}}\n{{- else }}\n  DB_CONNECTION: {{  $connection | b64enc -}}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{ include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendService\" \"osapi\" \"backendServiceType\" \"compute\" ) }}\n{{ include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendService\" \"novncproxy\" \"backendServiceType\" \"compute_novnc_proxy\" ) }}\n{{- if .Values.manifests.ingress_placement }}\n{{ include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendService\" \"placement\" \"backendServiceType\" \"placement\" ) }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $userClass, $val := $envAll.Values.endpoints.identity.auth }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/secret-ks-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $envAll := . -}}\n{{/* the endpoints.identity.auth sections with the oslo conf sections they get rendered to */}}\n{{- $ksUsers := dict\n  \"nova\" \"keystone_authtoken\"\n  \"neutron\" \"neutron\"\n  \"placement\" \"placement\"\n  \"cinder\" \"cinder\"\n-}}\n{{- if .Values.conf.nova.service_user.send_service_user_token }}\n{{- $_ := set $ksUsers \"service\" \"service_user\" -}}\n{{- end }}\n{{- if .Values.manifests.statefulset_compute_ironic }}\n{{- $_ := set $ksUsers \"ironic\" \"ironic\" -}}\n{{- end }}\n{{ dict\n  \"envAll\" $envAll\n  \"serviceName\" \"nova\"\n  \"serviceUserSections\" $ksUsers\n  | include \"helm-toolkit.manifests.secret_ks_etc\"\n}}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- $rabbitmqProtocol := \"http\" }}\n{{- if $envAll.Values.manifests.certificates }}\n{{- $rabbitmqProtocol = \"https\" }}\n{{- end }}\n{{- range $key1, $userClass := tuple \"admin\" \"nova\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_messaging\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass $rabbitmqProtocol $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n  TRANSPORT_URL: {{ tuple \"oslo_messaging\" \"internal\" $userClass \"amqp\" $envAll | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/secret-ssh.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"nova.configmap.ssh\" }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: nova-ssh\n  annotations:\n{{ tuple \"ssh\" \"keys\" . | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  private-key: {{ .Values.network.ssh.private_key | b64enc }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.network.ssh.public_key \"key\" \"public-key\" \"format\" \"Secret\" ) | indent 2 }}\n\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.secret_ssh }}\n{{- list \"nova-ssh\" . | include \"nova.configmap.ssh\" }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/service-ingress-metadata.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_metadata .Values.network.metadata.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"compute_metadata\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/service-ingress-novncproxy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_novncproxy .Values.network.novncproxy.ingress.public (eq .Values.console.console_kind \"novnc\") }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"compute_novnc_proxy\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/service-ingress-osapi.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_osapi .Values.network.osapi.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"compute\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/service-ingress-serialproxy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_serialproxy .Values.network.serialproxy.ingress.public (eq .Values.console.console_kind \"serial\") }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"compute_serial_proxy\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/service-ingress-spiceproxy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_spiceproxy .Values.network.spiceproxy.ingress.public (eq .Values.console.console_kind \"spice\") }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"compute_spice_proxy\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/service-metadata.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_metadata }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"compute_metadata\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: n-meta\n    port: {{ .Values.network.metadata.port }}\n    {{ if .Values.network.metadata.node_port.enabled }}\n    nodePort: {{ .Values.network.metadata.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"nova\" \"metadata\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.metadata.node_port.enabled }}\n  type: NodePort\n  {{ if .Values.network.metadata.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{ end }}\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/service-novncproxy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_novncproxy ( eq .Values.console.console_kind \"novnc\" ) }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"compute_novnc_proxy\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: n-novnc\n    port: {{ tuple \"compute_novnc_proxy\" \"internal\" \"novnc_proxy\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.novncproxy.node_port.enabled }}\n    nodePort: {{ .Values.network.novncproxy.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"nova\" \"novnc-proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.novncproxy.node_port.enabled }}\n  type: NodePort\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/service-osapi.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_osapi }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"compute\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: n-api\n    port: {{ tuple \"compute\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.osapi.node_port.enabled }}\n    nodePort: {{ .Values.network.osapi.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"nova\" \"os-api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.osapi.node_port.enabled }}\n  type: NodePort\n  {{ if .Values.network.osapi.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{ end }}\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/service-serialproxy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_serialproxy ( eq .Values.console.console_kind \"serial\" ) }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"compute_serial_proxy\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: n-serial\n    port: {{ tuple \"compute_serial_proxy\" \"internal\" \"serial_proxy\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.serialproxy.node_port.enabled }}\n    nodePort: {{ .Values.network.serialproxy.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"noa\" \"serial-proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.serialproxy.node_port.enabled }}\n  type: NodePort\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/service-spiceproxy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_spiceproxy (eq .Values.console.console_kind \"spice\") }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"compute_spice_proxy\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: n-spice\n    port: {{ tuple \"compute_spice_proxy\" \"internal\" \"spice_proxy\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.spiceproxy.node_port.enabled }}\n    nodePort: {{ .Values.network.spiceproxy.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"nova\" \"spice-proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.spiceproxy.node_port.enabled }}\n  type: NodePort\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/statefulset-compute-ironic.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.statefulset_compute_ironic }}\n{{- $envAll := . }}\n\n{{- $mounts_nova_compute_ironic := .Values.pod.mounts.nova_compute_ironic.nova_compute_ironic }}\n{{- $mounts_nova_compute_ironic_init := .Values.pod.mounts.nova_compute_ironic.init_container }}\n{{- $etcSources := .Values.pod.etcSources.nova_compute_ironic }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"nova-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"nova-compute-ironic\" }}\n{{ tuple $envAll \"compute_ironic\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  name: nova-compute-ironic\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"nova\" \"compute-ironic\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.compute_ironic }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"nova\" \"compute-ironic\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  serviceName: \"{{ tuple \"baremetal\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}-compute\"\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"nova\" \"compute-ironic\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"nova-compute-default\" \"containerNames\" (list \"nova-compute\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"nova_compute_ironic\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"nova_compute_ironic\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"nova\" \"compute-ironic\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.agent.compute_ironic.node_selector_key }}: {{ .Values.labels.agent.compute_ironic.node_selector_value }}\n      hostPID: true\n      dnsPolicy: ClusterFirstWithHostNet\n      initContainers:\n{{ tuple $envAll \"compute_ironic\" $mounts_nova_compute_ironic_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: nova-compute-ironic\n{{ tuple $envAll \"nova_compute_ironic\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.compute_ironic | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/nova-compute-ironic.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.nova.oslo_concurrency.lock_path }}\n            - name: nova-bin\n              mountPath: /tmp/nova-compute-ironic.sh\n              subPath: nova-compute-ironic.sh\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/nova-compute.conf\n              subPath: nova-compute.conf\n              readOnly: true\n            - name: nova-etc-snippets\n              mountPath: /etc/nova/nova.conf.d/\n              readOnly: true\n            {{- if .Values.conf.nova.DEFAULT.log_config_append }}\n            - name: nova-etc\n              mountPath: {{ .Values.conf.nova.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.nova.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: nova-etc\n              mountPath: /etc/nova/nova-ironic.conf\n              subPath: nova-ironic.conf\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: varlibironic\n              mountPath: /var/lib/ironic\n{{ if $mounts_nova_compute_ironic.volumeMounts }}{{ toYaml $mounts_nova_compute_ironic.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: nova-bin\n          configMap:\n            name: nova-bin\n            defaultMode: 0555\n        - name: nova-etc\n          secret:\n            secretName: nova-etc\n            defaultMode: 0444\n        - name: nova-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: varlibironic\n          hostPath:\n            path: /var/lib/ironic\n{{ if $mounts_nova_compute_ironic.volumes }}{{ toYaml $mounts_nova_compute_ironic.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/statefulset-conductor.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"novaConductorLivenessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/nova/nova.conf\n    - --config-dir\n    - /etc/nova/nova.conf.d\n    - --service-queue-name\n    - conductor\n    - --liveness-probe\n{{- end }}\n\n{{- define \"novaConductorReadinessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/nova/nova.conf\n    - --config-dir\n    - /etc/nova/nova.conf.d\n    - --service-queue-name\n    - conductor\n{{- end }}\n\n{{- if .Values.manifests.statefulset_conductor }}\n{{- $envAll := . }}\n\n{{- $mounts_nova_conductor := .Values.pod.mounts.nova_conductor.nova_conductor }}\n{{- $mounts_nova_conductor_init := .Values.pod.mounts.nova_conductor.init_container }}\n{{- $etcSources := .Values.pod.etcSources.nova_conductor }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"nova-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"nova-conductor\" }}\n{{ tuple $envAll \"conductor\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  name: nova-conductor\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"nova\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  serviceName: nova-conductor\n  replicas: {{ .Values.pod.replicas.conductor }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"nova\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  podManagementPolicy: Parallel\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_statefulset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"nova\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"nova_conductor\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"nova-conductor\" \"containerNames\" (list \"nova-conductor\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"nova_conductor\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"nova_conductor\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"nova\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.conductor.node_selector_key }}: {{ .Values.labels.conductor.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.nova.enabled }}\n{{ tuple $envAll \"nova\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"conductor\" $mounts_nova_conductor_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: nova-conductor\n{{ tuple $envAll \"nova_conductor\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.conductor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_conductor\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"conductor\" \"container\" \"default\" \"type\" \"liveness\" \"probeTemplate\" (include \"novaConductorLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"conductor\" \"container\" \"default\" \"type\" \"readiness\" \"probeTemplate\" (include \"novaConductorReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          env:\n            - name: RPC_PROBE_TIMEOUT\n              value: \"{{ .Values.pod.probes.rpc_timeout }}\"\n            - name: RPC_PROBE_RETRIES\n              value: \"{{ .Values.pod.probes.rpc_retries }}\"\n            - name: HOSTNAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.name\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/nova/certs/ca.crt\"\n{{- end }}\n          command:\n            - /bin/bash\n            - -c\n            - |\n              set -x\n{{- if empty .Values.conf.nova.DEFAULT.host }}\n              # When using StatefulSet, use the pod hostname for stable service names\n              cat > /tmp/nova-conductor-host.conf << EOF\n              [DEFAULT]\n              host = ${HOSTNAME}\n              EOF\n              exec nova-conductor --config-file /etc/nova/nova.conf --config-file /tmp/nova-conductor-host.conf --config-dir /etc/nova/nova.conf.d\n{{- else }}\n              exec /tmp/nova-conductor.sh\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.nova.oslo_concurrency.lock_path }}\n            - name: nova-bin\n              mountPath: /tmp/nova-conductor.sh\n              subPath: nova-conductor.sh\n              readOnly: true\n            - name: nova-bin\n              mountPath: /tmp/health-probe.py\n              subPath: health-probe.py\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/nova.conf\n              subPath: nova.conf\n              readOnly: true\n            - name: nova-etc-snippets\n              mountPath: /etc/nova/nova.conf.d/\n              readOnly: true\n            {{- if .Values.conf.nova.DEFAULT.log_config_append }}\n            - name: nova-etc\n              mountPath: {{ .Values.conf.nova.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.nova.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: nova-etc\n              mountPath: /etc/nova/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal \"path\" \"/etc/nova/certs\" \"certs\" (tuple \"ca.crt\") | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_nova_conductor.volumeMounts }}{{ toYaml $mounts_nova_conductor.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: nova-bin\n          configMap:\n            name: nova-bin\n            defaultMode: 0555\n        - name: nova-etc\n          secret:\n            secretName: nova-etc\n            defaultMode: 0444\n        - name: nova-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_nova_conductor.volumes }}{{ toYaml $mounts_nova_conductor.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "nova/templates/statefulset-scheduler.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"novaSchedulerLivenessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/nova/nova.conf\n    - --config-dir\n    - /etc/nova/nova.conf.d\n    - --service-queue-name\n    - scheduler\n    - --liveness-probe\n{{- end }}\n\n{{- define \"novaSchedulerReadinessProbeTemplate\" }}\nexec:\n  command:\n    - python\n    - /tmp/health-probe.py\n    - --config-file\n    - /etc/nova/nova.conf\n    - --config-dir\n    - /etc/nova/nova.conf.d\n    - --service-queue-name\n    - scheduler\n{{- end }}\n\n{{- if .Values.manifests.statefulset_scheduler }}\n{{- $envAll := . }}\n\n{{- $mounts_nova_scheduler := .Values.pod.mounts.nova_scheduler.nova_scheduler }}\n{{- $mounts_nova_scheduler_init := .Values.pod.mounts.nova_scheduler.init_container }}\n{{- $etcSources := .Values.pod.etcSources.nova_scheduler }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"nova-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"nova-scheduler\" }}\n{{ tuple $envAll \"scheduler\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  name: nova-scheduler\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"nova\" \"scheduler\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  serviceName: nova-scheduler\n  replicas: {{ .Values.pod.replicas.scheduler }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"nova\" \"scheduler\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  podManagementPolicy: Parallel\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_statefulset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"nova\" \"scheduler\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"nova_scheduler\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"nova-scheduler\" \"containerNames\" (list \"nova-scheduler\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"nova_scheduler\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"nova_scheduler\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"nova\" \"scheduler\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.scheduler.node_selector_key }}: {{ .Values.labels.scheduler.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.nova.enabled }}\n{{ tuple $envAll \"nova\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      initContainers:\n{{ tuple $envAll \"scheduler\" $mounts_nova_scheduler_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: nova-scheduler\n{{ tuple $envAll \"nova_scheduler\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.scheduler | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"nova\" \"container\" \"nova_scheduler\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"scheduler\" \"container\" \"default\" \"type\" \"liveness\" \"probeTemplate\" (include \"novaSchedulerLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"scheduler\" \"container\" \"default\" \"type\" \"readiness\" \"probeTemplate\" (include \"novaSchedulerReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          env:\n            - name: RPC_PROBE_TIMEOUT\n              value: \"{{ .Values.pod.probes.rpc_timeout }}\"\n            - name: RPC_PROBE_RETRIES\n              value: \"{{ .Values.pod.probes.rpc_retries }}\"\n            - name: HOSTNAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.name\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/nova/certs/ca.crt\"\n{{- end }}\n          command:\n            - /bin/bash\n            - -c\n            - |\n              set -xe\n{{- if empty .Values.conf.nova.DEFAULT.host }}\n              # When using StatefulSet, use the pod hostname for stable service names\n              cat > /tmp/nova-scheduler-host.conf << EOF\n              [DEFAULT]\n              host = ${HOSTNAME}\n              EOF\n              exec nova-scheduler --config-file /etc/nova/nova.conf --config-file /tmp/nova-scheduler-host.conf --config-dir /etc/nova/nova.conf.d\n{{- else }}\n              exec /tmp/nova-scheduler.sh\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.nova.oslo_concurrency.lock_path }}\n            - name: nova-bin\n              mountPath: /tmp/nova-scheduler.sh\n              subPath: nova-scheduler.sh\n              readOnly: true\n            - name: nova-bin\n              mountPath: /tmp/health-probe.py\n              subPath: health-probe.py\n              readOnly: true\n            - name: nova-etc\n              mountPath: /etc/nova/nova.conf\n              subPath: nova.conf\n              readOnly: true\n            - name: nova-etc-snippets\n              mountPath: /etc/nova/nova.conf.d/\n              readOnly: true\n            {{- if .Values.conf.nova.DEFAULT.log_config_append }}\n            - name: nova-etc\n              mountPath: {{ .Values.conf.nova.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.nova.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: nova-etc\n              mountPath: /etc/nova/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal \"path\" \"/etc/nova/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_nova_scheduler.volumeMounts }}{{ toYaml $mounts_nova_scheduler.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: nova-bin\n          configMap:\n            name: nova-bin\n            defaultMode: 0555\n        - name: nova-etc\n          secret:\n            secretName: nova-etc\n            defaultMode: 0444\n        - name: nova-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.compute.osapi.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_nova_scheduler.volumes }}{{ toYaml $mounts_nova_scheduler.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "nova/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for nova.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nrelease_group: null\n\nlabels:\n  agent:\n    compute:\n      node_selector_key: openstack-compute-node\n      node_selector_value: enabled\n    compute_ironic:\n      node_selector_key: openstack-compute-node\n      node_selector_value: enabled\n  api_metadata:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  conductor:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  novncproxy:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  osapi:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  scheduler:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  serialproxy:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  spiceproxy:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  pull_policy: IfNotPresent\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    dep_check: 'quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy'\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    nova_archive_deleted_rows: quay.io/airshipit/nova:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    nova_api: quay.io/airshipit/nova:2025.1-ubuntu_noble\n    nova_cell_setup: quay.io/airshipit/nova:2025.1-ubuntu_noble\n    nova_cell_setup_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    nova_compute: quay.io/airshipit/nova:2025.1-ubuntu_noble\n    nova_compute_ironic: quay.io/airshipit/nova:2025.1-ubuntu_noble\n    nova_compute_ssh: quay.io/airshipit/nova:2025.1-ubuntu_noble\n    nova_conductor: quay.io/airshipit/nova:2025.1-ubuntu_noble\n    nova_db_sync: quay.io/airshipit/nova:2025.1-ubuntu_noble\n    nova_novncproxy: quay.io/airshipit/nova:2025.1-ubuntu_noble\n    nova_novncproxy_assets: quay.io/airshipit/nova:2025.1-ubuntu_noble\n    nova_scheduler: quay.io/airshipit/nova:2025.1-ubuntu_noble\n    nova_storage_init: quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\n    # NOTE(portdirect): we simply use the ceph config helper here,\n    # as it has both oscli and jq.\n    nova_service_cleaner: quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\n    nova_serialproxy: quay.io/airshipit/nova:2025.1-ubuntu_noble\n    nova_spiceproxy: quay.io/airshipit/nova:2025.1-ubuntu_noble\n    nova_spiceproxy_assets: quay.io/airshipit/nova:2025.1-ubuntu_noble\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    image_repo_sync: docker.io/docker:17.07.0\n    nova_wait_for_computes_init: quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\njobs:\n  # NOTE(portdirect): When using cells new nodes will be added to the cell on the hour by default.\n  # TODO(portdirect): Add a post-start action to nova compute pods that registers themselves.\n  cell_setup:\n    cron: \"0 */1 * * *\"\n    starting_deadline: 600\n    history:\n      success: 3\n      failed: 1\n    extended_wait:\n      enabled: false\n      iteration: 3\n      duration: 5\n    extra_command: null\n  service_cleaner:\n    cron: \"0 */1 * * *\"\n    starting_deadline: 600\n    history:\n      success: 3\n      failed: 1\n    sleep_time: 60\n    extra_command: null\n  archive_deleted_rows:\n    cron: \"0 */1 * * *\"\n    starting_deadline: 600\n    history:\n      success: 3\n      failed: 1\n\nbootstrap:\n  enabled: true\n  ks_user: admin\n  script: null\n  structured:\n    flavors:\n      enabled: true\n      options:\n        m1_tiny:\n          name: \"m1.tiny\"\n          ram: 512\n          disk: 1\n          vcpus: 1\n        m1_small:\n          name: \"m1.small\"\n          ram: 2048\n          disk: 20\n          vcpus: 1\n        m1_medium:\n          name: \"m1.medium\"\n          ram: 4096\n          disk: 40\n          vcpus: 2\n        m1_large:\n          name: \"m1.large\"\n          ram: 8192\n          disk: 80\n          vcpus: 4\n        m1_xlarge:\n          name: \"m1.xlarge\"\n          ram: 16384\n          disk: 160\n          vcpus: 8\n  wait_for_computes:\n    enabled: false\n    # Wait percentage is the minimum percentage of compute hypervisors which\n    # must be available before the remainder of the bootstrap script can be run.\n    wait_percentage: 70\n    # Once the wait_percentage above is achieved, the remaining_wait is the\n    # amount of time in seconds to wait before executing the remainder of the\n    # boostrap script.\n    remaining_wait: 300\n    scripts:\n      init_script: |\n        # This runs in a bootstrap init container. It counts the number of compute nodes.\n        COMPUTE_NODES=$(kubectl get nodes -o custom-columns=NAME:.metadata.name -l openstack-compute-node=enabled --no-headers | sort)\n        /bin/echo $COMPUTE_NODES > /tmp/compute_nodes.txt\n      wait_script: |\n        # This script runs in the main bootstrap container just before the\n        # bootstrap.script is called.\n        COMPUTE_HOSTS=`cat /tmp/compute_nodes.txt | wc -w`\n        if [[ $COMPUTE_HOSTS == 0 ]]; then\n          echo \"There are no compute hosts found!\"\n          exit 1\n        fi\n\n        # Wait for all hypervisors to come up before moving on with the deployment\n        HYPERVISOR_WAIT=true\n        WAIT_AFTER_READY=0\n        SLEEP=5\n        while [[ $HYPERVISOR_WAIT == true ]]; do\n          date '+%Y-%m-%d %H:%M:%S.%3N'\n          # Its possible that openstack command may fail due to not being able to\n          # reach the compute service\n          set +e\n          HYPERVISORS=$(openstack hypervisor list -f value -c 'Hypervisor Hostname' | wc -w)\n          set -e\n\n          PERCENT_READY=$(( $HYPERVISORS * 100 / $COMPUTE_HOSTS ))\n          if [[ $PERCENT_READY -ge $WAIT_PERCENTAGE ]]; then\n            echo \"Hypervisor ready percentage is $PERCENT_READY\"\n            if [[ $PERCENT_READY == 100 ]]; then\n              HYPERVISOR_WAIT=false\n              echo \"All hypervisors are ready.\"\n            elif [[ $WAIT_AFTER_READY -ge $REMAINING_WAIT ]]; then\n              HYPERVISOR_WAIT=false\n              echo \"Waited the configured time -- $HYPERVISORS out of $COMPUTE_HOSTS hypervisor(s) ready -- proceeding with the bootstrap.\"\n            else\n              sleep $SLEEP\n              WAIT_AFTER_READY=$(( $WAIT_AFTER_READY + $SLEEP ))\n            fi\n          else\n            echo \"Waiting $SLEEP seconds for enough hypervisors to be discovered...\"\n            sleep $SLEEP\n          fi\n        done\n\nnetwork:\n  # provide what type of network wiring will be used\n  # possible options: openvswitch, linuxbridge, sriov\n  backend:\n    - openvswitch\n  osapi:\n    port: 8774\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30774\n  metadata:\n    port: 8775\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30775\n  novncproxy:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    node_port:\n      enabled: false\n      port: 30680\n  serialproxy:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    node_port:\n      enabled: false\n      port: 30683\n  spiceproxy:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    node_port:\n      enabled: false\n      port: 30682\n  ssh:\n    enabled: false\n    port: 8022\n    from_subnet: 0.0.0.0/0\n    key_types:\n      - rsa\n      - dsa\n      - ecdsa\n      - ed25519\n    private_key: 'null'\n    public_key: 'null'\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - nova-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n    targeted:\n      ovn:\n        compute:\n          pod:\n            - requireSameNode: true\n              labels:\n                application: ovn\n                component: ovn-controller\n      openvswitch:\n        compute:\n          pod:\n            - requireSameNode: true\n              labels:\n                application: neutron\n                component: neutron-ovs-agent\n      linuxbridge:\n        compute:\n          pod:\n            - requireSameNode: true\n              labels:\n                application: neutron\n                component: neutron-lb-agent\n      sriov:\n        compute:\n          pod:\n            - requireSameNode: true\n              labels:\n                application: neutron\n                component: neutron-sriov-agent\n  static:\n    api:\n      jobs:\n        - nova-db-sync\n        - nova-ks-user\n        - nova-ks-endpoints\n        - nova-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n    api_metadata:\n      jobs:\n        - nova-db-sync\n        - nova-ks-user\n        - nova-ks-endpoints\n        - nova-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n    bootstrap:\n      jobs:\n        - nova-cell-setup\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: compute\n    cell_setup:\n      jobs:\n        - nova-db-sync\n        - nova-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: compute\n      pod:\n        - requireSameNode: false\n          labels:\n            application: nova\n            component: compute\n    service_cleaner:\n      jobs:\n        - nova-db-sync\n        - nova-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: compute\n    compute:\n      pod:\n        - requireSameNode: true\n          labels:\n            application: libvirt\n            component: libvirt\n      jobs:\n        - nova-db-sync\n        - nova-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: image\n        - endpoint: internal\n          service: compute\n        - endpoint: internal\n          service: network\n        - endpoint: internal\n          service: compute_metadata\n    compute_ironic:\n      jobs:\n        - nova-db-sync\n        - nova-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: image\n        - endpoint: internal\n          service: compute\n        - endpoint: internal\n          service: network\n        - endpoint: internal\n          service: baremetal\n    conductor:\n      jobs:\n        - nova-db-sync\n        - nova-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: compute\n    db_drop:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    archive_deleted_rows:\n      jobs:\n        - nova-db-init\n        - nova-db-sync\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - nova-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    ks_endpoints:\n      jobs:\n        - nova-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    rabbit_init:\n      services:\n        - service: oslo_messaging\n          endpoint: internal\n    novncproxy:\n      jobs:\n        - nova-db-sync\n      services:\n        - endpoint: internal\n          service: oslo_db\n    serialproxy:\n      jobs:\n        - nova-db-sync\n      services:\n        - endpoint: internal\n          service: oslo_db\n    spiceproxy:\n      jobs:\n        - nova-db-sync\n      services:\n        - endpoint: internal\n          service: oslo_db\n    scheduler:\n      jobs:\n        - nova-db-sync\n        - nova-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: compute\n    tests:\n      services:\n        - endpoint: internal\n          service: image\n        - endpoint: internal\n          service: compute\n        - endpoint: internal\n          service: network\n        - endpoint: internal\n          service: compute_metadata\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\nconsole:\n  # serial | spice | novnc | none\n  console_kind: novnc\n  serial:\n    compute:\n      # IF blank, search default routing interface\n      server_proxyclient_interface: null\n      # or set network cidr\n      server_proxyclient_network_cidr: 0/0\n    proxy:\n      # IF blank, search default routing interface\n      server_proxyclient_interface: null\n      # or set network cidr\n      server_proxyclient_network_cidr: 0/0\n  spice:\n    compute:\n      # IF blank, search default routing interface\n      server_proxyclient_interface: null\n      # or set network cidr\n      server_proxyclient_network_cidr: 0/0\n    proxy:\n      # IF blank, search default routing interface\n      server_proxyclient_interface: null\n      # or set network cidr\n      server_proxyclient_network_cidr: 0/0\n  novnc:\n    compute:\n      # IF blank, search default routing interface\n      vncserver_proxyclient_interface: null\n      # or set network cidr\n      vncserver_proxyclient_network_cidr: 0/0\n    vncproxy:\n      # IF blank, search default routing interface\n      vncserver_proxyclient_interface: null\n      # or set network cidr\n      vncserver_proxyclient_network_cidr: 0/0\n  address_search_enabled: true\n\nceph_client:\n  configmap: ceph-etc\n  user_secret_name: pvc-ceph-client-key\n\nrbd_pool:\n  app_name: nova-vms\n  replication: 3\n  crush_rule: replicated_rule\n  chunk_size: 8\n\nconf:\n  security: |\n    #\n    # Disable access to the entire file system except for the directories that\n    # are explicitly allowed later.\n    #\n    # This currently breaks the configurations that come with some web application\n    # Debian packages.\n    #\n    #<Directory />\n    #   AllowOverride None\n    #   Require all denied\n    #</Directory>\n\n    # Changing the following options will not really affect the security of the\n    # server, but might make attacks slightly more difficult in some cases.\n\n    #\n    # ServerTokens\n    # This directive configures what you return as the Server HTTP response\n    # Header. The default is 'Full' which sends information about the OS-Type\n    # and compiled in modules.\n    # Set to one of:  Full | OS | Minimal | Minor | Major | Prod\n    # where Full conveys the most information, and Prod the least.\n    ServerTokens Prod\n\n    #\n    # Optionally add a line containing the server version and virtual host\n    # name to server-generated pages (internal error documents, FTP directory\n    # listings, mod_status and mod_info output etc., but not CGI generated\n    # documents or custom error documents).\n    # Set to \"EMail\" to also include a mailto: link to the ServerAdmin.\n    # Set to one of:  On | Off | EMail\n    ServerSignature Off\n\n    #\n    # Allow TRACE method\n    #\n    # Set to \"extended\" to also reflect the request body (only for testing and\n    # diagnostic purposes).\n    #\n    # Set to one of:  On | Off | extended\n    TraceEnable Off\n\n    #\n    # Forbid access to version control directories\n    #\n    # If you use version control systems in your document root, you should\n    # probably deny access to their directories. For example, for subversion:\n    #\n    #<DirectoryMatch \"/\\.svn\">\n    #   Require all denied\n    #</DirectoryMatch>\n\n    #\n    # Setting this header will prevent MSIE from interpreting files as something\n    # else than declared by the content type in the HTTP headers.\n    # Requires mod_headers to be enabled.\n    #\n    #Header set X-Content-Type-Options: \"nosniff\"\n\n    #\n    # Setting this header will prevent other sites from embedding pages from this\n    # site as frames. This defends against clickjacking attacks.\n    # Requires mod_headers to be enabled.\n    #\n    #Header set X-Frame-Options: \"sameorigin\"\n  software:\n    apache2:\n      binary: apache2\n      start_parameters: -DFOREGROUND\n      conf_dir: /etc/apache2/conf-enabled\n      site_dir: /etc/apache2/sites-enable\n      mods_dir: /etc/apache2/mods-available\n      a2enmod: null\n      a2dismod: null\n  ceph:\n    enabled: true\n    admin_keyring: null\n    cinder:\n      user: \"cinder\"\n      keyring: null\n      secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337\n  rally_tests:\n    run_tempest: false\n    clean_up: |\n      FLAVORS=$(openstack flavor list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }')\n      if [ -n \"$FLAVORS\" ]; then\n        echo $FLAVORS | xargs openstack flavor delete\n      fi\n      SERVERS=$(openstack server list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }')\n      if [ -n \"$SERVERS\" ]; then\n        echo $SERVERS | xargs openstack server delete\n      fi\n      IMAGES=$(openstack image list -f value | awk '$2 ~ /^c_rally_/ { print $1 }')\n      if [ -n \"$IMAGES\" ]; then\n        echo $IMAGES | xargs openstack image delete\n      fi\n    tests:\n      NovaAggregates.create_and_get_aggregate_details:\n        - args:\n            availability_zone: nova\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaAggregates.create_and_update_aggregate:\n        - args:\n            availability_zone: nova\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaAggregates.list_aggregates:\n        - runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaAvailabilityZones.list_availability_zones:\n        - args:\n            detailed: true\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaFlavors.create_and_delete_flavor:\n        - args:\n            disk: 1\n            ram: 500\n            vcpus: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaFlavors.create_and_list_flavor_access:\n        - args:\n            disk: 1\n            ram: 500\n            vcpus: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaFlavors.create_flavor:\n        - args:\n            disk: 1\n            ram: 500\n            vcpus: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaFlavors.create_flavor_and_add_tenant_access:\n        - args:\n            disk: 1\n            ram: 500\n            vcpus: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaFlavors.create_flavor_and_set_keys:\n        - args:\n            disk: 1\n            extra_specs:\n              'quota:disk_read_bytes_sec': 10240\n            ram: 500\n            vcpus: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaFlavors.list_flavors:\n        - args:\n            detailed: true\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaHypervisors.list_and_get_hypervisors:\n        - args:\n            detailed: true\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaHypervisors.list_and_get_uptime_hypervisors:\n        - args:\n            detailed: true\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaHypervisors.list_and_search_hypervisors:\n        - args:\n            detailed: true\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaHypervisors.list_hypervisors:\n        - args:\n            detailed: true\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaHypervisors.statistics_hypervisors:\n        - args: {}\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaKeypair.create_and_delete_keypair:\n        - runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaKeypair.create_and_list_keypairs:\n        - runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaServerGroups.create_and_list_server_groups:\n        - args:\n            all_projects: false\n            kwargs:\n              policies:\n                - affinity\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaServices.list_services:\n        - runner:\n            concurrency: 1\n            times: 1\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n  paste:\n    composite:metadata:\n      use: egg:Paste#urlmap\n      /: meta\n    pipeline:meta:\n      pipeline: cors metaapp\n    app:metaapp:\n      paste.app_factory: nova.api.metadata.handler:MetadataRequestHandler.factory\n    composite:osapi_compute:\n      use: call:nova.api.openstack.urlmap:urlmap_factory\n      /: oscomputeversions\n      /v2: openstack_compute_api_v21_legacy_v2_compatible\n      /v2.1: openstack_compute_api_v21\n    composite:openstack_compute_api_v21:\n      use: call:nova.api.auth:pipeline_factory_v21\n      noauth2: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2 osapi_compute_app_v21\n      keystone: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken audit keystonecontext osapi_compute_app_v21\n    composite:openstack_compute_api_v21_legacy_v2_compatible:\n      use: call:nova.api.auth:pipeline_factory_v21\n      noauth2: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2 legacy_v2_compatible osapi_compute_app_v21\n      keystone: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken audit keystonecontext legacy_v2_compatible osapi_compute_app_v21\n    filter:request_id:\n      paste.filter_factory: oslo_middleware:RequestId.factory\n    filter:compute_req_id:\n      paste.filter_factory: nova.api.compute_req_id:ComputeReqIdMiddleware.factory\n    filter:faultwrap:\n      paste.filter_factory: nova.api.openstack:FaultWrapper.factory\n    filter:noauth2:\n      paste.filter_factory: nova.api.openstack.auth:NoAuthMiddleware.factory\n    filter:sizelimit:\n      paste.filter_factory: oslo_middleware:RequestBodySizeLimiter.factory\n    filter:http_proxy_to_wsgi:\n      paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory\n    filter:legacy_v2_compatible:\n      paste.filter_factory: nova.api.openstack:LegacyV2CompatibleWrapper.factory\n    app:osapi_compute_app_v21:\n      paste.app_factory: nova.api.openstack.compute:APIRouterV21.factory\n    pipeline:oscomputeversions:\n      pipeline: faultwrap http_proxy_to_wsgi oscomputeversionapp\n    app:oscomputeversionapp:\n      paste.app_factory: nova.api.openstack.compute.versions:Versions.factory\n    filter:cors:\n      paste.filter_factory: oslo_middleware.cors:filter_factory\n      oslo_config_project: nova\n    filter:keystonecontext:\n      paste.filter_factory: nova.api.auth:NovaKeystoneContext.factory\n    filter:authtoken:\n      paste.filter_factory: keystonemiddleware.auth_token:filter_factory\n    filter:audit:\n      paste.filter_factory: keystonemiddleware.audit:filter_factory\n      audit_map_file: /etc/nova/api_audit_map.conf\n  policy: {}\n  nova_sudoers: |\n    # This sudoers file supports rootwrap for both Kolla and LOCI Images.\n    Defaults !requiretty\n    Defaults secure_path=\"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin\"\n    nova ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/nova-rootwrap /etc/nova/rootwrap.conf *, /var/lib/openstack/bin/nova-rootwrap /etc/nova/rootwrap.conf *\n  api_audit_map:\n    DEFAULT:\n      target_endpoint_type: None\n    custom_actions:\n      enable: enable\n      disable: disable\n      delete: delete\n      startup: start/startup\n      shutdown: stop/shutdown\n      reboot: start/reboot\n      os-migrations/get: read\n      os-server-password/post: update\n    path_keywords:\n      add: None\n      action: None\n      enable: None\n      disable: None\n      configure-project: None\n      defaults: None\n      delete: None\n      detail: None\n      diagnostics: None\n      entries: entry\n      extensions: alias\n      flavors: flavor\n      images: image\n      ips: label\n      limits: None\n      metadata: key\n      os-agents: os-agent\n      os-aggregates: os-aggregate\n      os-availability-zone: None\n      os-certificates: None\n      os-cloudpipe: None\n      os-fixed-ips: ip\n      os-extra_specs: key\n      os-flavor-access: None\n      os-floating-ip-dns: domain\n      os-floating-ips-bulk: host\n      os-floating-ip-pools: None\n      os-floating-ips: floating-ip\n      os-hosts: host\n      os-hypervisors: hypervisor\n      os-instance-actions: instance-action\n      os-keypairs: keypair\n      os-migrations: None\n      os-networks: network\n      os-quota-sets: tenant\n      os-security-groups: security_group\n      os-security-group-rules: rule\n      os-server-password: None\n      os-services: None\n      os-simple-tenant-usage: tenant\n      os-virtual-interfaces: None\n      os-volume_attachments: attachment\n      os-volumes_boot: None\n      os-volumes: volume\n      os-volume-types: volume-type\n      os-snapshots: snapshot\n      reboot: None\n      servers: server\n      shutdown: None\n      startup: None\n      statistics: None\n    service_endpoints:\n      compute: service/compute\n  rootwrap: |\n    # Configuration for nova-rootwrap\n    # This file should be owned by (and only-writeable by) the root user\n\n    [DEFAULT]\n    # List of directories to load filter definitions from (separated by ',').\n    # These directories MUST all be only writeable by root !\n    filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap\n\n    # List of directories to search executables in, in case filters do not\n    # explicitely specify a full path (separated by ',')\n    # If not specified, defaults to system PATH environment variable.\n    # These directories MUST all be only writeable by root !\n    exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin\n\n    # Enable logging to syslog\n    # Default value is False\n    use_syslog=False\n\n    # Which syslog facility to use.\n    # Valid values include auth, authpriv, syslog, local0, local1...\n    # Default value is 'syslog'\n    syslog_log_facility=syslog\n\n    # Which messages to log.\n    # INFO means log all usage\n    # ERROR means only log unsuccessful attempts\n    syslog_log_level=ERROR\n  rootwrap_filters:\n    api_metadata:\n      pods:\n        - metadata\n      content: |\n        # nova-rootwrap command filters for api-metadata nodes\n        # This is needed on nova-api hosts running with \"metadata\" in enabled_apis\n        # or when running nova-api-metadata\n        # This file should be owned by (and only-writeable by) the root user\n\n        [Filters]\n        # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...\n        iptables-save: CommandFilter, iptables-save, root\n        ip6tables-save: CommandFilter, ip6tables-save, root\n\n        # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)\n        iptables-restore: CommandFilter, iptables-restore, root\n        ip6tables-restore: CommandFilter, ip6tables-restore, root\n    compute:\n      pods:\n        - compute\n      content: |\n        # nova-rootwrap command filters for compute nodes\n        # This file should be owned by (and only-writeable by) the root user\n\n        [Filters]\n        # nova/virt/disk/mount/api.py: 'kpartx', '-a', device\n        # nova/virt/disk/mount/api.py: 'kpartx', '-d', device\n        kpartx: CommandFilter, kpartx, root\n\n        # nova/virt/xenapi/vm_utils.py: tune2fs, -O ^has_journal, part_path\n        # nova/virt/xenapi/vm_utils.py: tune2fs, -j, partition_path\n        tune2fs: CommandFilter, tune2fs, root\n\n        # nova/virt/disk/mount/api.py: 'mount', mapped_device\n        # nova/virt/disk/api.py: 'mount', '-o', 'bind', src, target\n        # nova/virt/xenapi/vm_utils.py: 'mount', '-t', 'ext2,ext3,ext4,reiserfs'..\n        # nova/virt/configdrive.py: 'mount', device, mountdir\n        # nova/virt/libvirt/volume.py: 'mount', '-t', 'sofs' ...\n        mount: CommandFilter, mount, root\n\n        # nova/virt/disk/mount/api.py: 'umount', mapped_device\n        # nova/virt/disk/api.py: 'umount' target\n        # nova/virt/xenapi/vm_utils.py: 'umount', dev_path\n        # nova/virt/configdrive.py: 'umount', mountdir\n        umount: CommandFilter, umount, root\n\n        # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-c', device, image\n        # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-d', device\n        qemu-nbd: CommandFilter, qemu-nbd, root\n\n        # nova/virt/disk/mount/loop.py: 'losetup', '--find', '--show', image\n        # nova/virt/disk/mount/loop.py: 'losetup', '--detach', device\n        losetup: CommandFilter, losetup, root\n\n        # nova/virt/disk/vfs/localfs.py: 'blkid', '-o', 'value', '-s', 'TYPE', device\n        blkid: CommandFilter, blkid, root\n\n        # nova/virt/libvirt/utils.py: 'blockdev', '--getsize64', path\n        # nova/virt/disk/mount/nbd.py: 'blockdev', '--flushbufs', device\n        blockdev: RegExpFilter, blockdev, root, blockdev, (--getsize64|--flushbufs), /dev/.*\n\n        # nova/virt/disk/vfs/localfs.py: 'tee', canonpath\n        tee: CommandFilter, tee, root\n\n        # nova/virt/disk/vfs/localfs.py: 'mkdir', canonpath\n        mkdir: CommandFilter, mkdir, root\n\n        # nova/virt/disk/vfs/localfs.py: 'chown'\n        # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log\n        # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log\n        # nova/virt/libvirt/connection.py: 'chown', 'root', basepath('disk')\n        chown: CommandFilter, chown, root\n\n        # nova/virt/disk/vfs/localfs.py: 'chmod'\n        chmod: CommandFilter, chmod, root\n\n        # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'\n        # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'\n        # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev\n        # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i..\n        # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'..\n        # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',..\n        # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',..\n        # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev)\n        # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1]\n        # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge\n        # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', ..\n        # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',..\n        # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ...\n        # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,..\n        # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up'\n        # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up'\n        # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..\n        # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..\n        # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'\n        # nova/network/linux_net.py: 'ip', 'route', 'add', ..\n        # nova/network/linux_net.py: 'ip', 'route', 'del', .\n        # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev\n        ip: CommandFilter, ip, root\n\n        # nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev\n        # nova/network/linux_net.py: 'tunctl', '-b', '-t', dev\n        tunctl: CommandFilter, tunctl, root\n\n        # nova/virt/libvirt/vif.py: 'ovs-vsctl', ...\n        # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...\n        # nova/network/linux_net.py: 'ovs-vsctl', ....\n        ovs-vsctl: CommandFilter, ovs-vsctl, root\n\n        # nova/virt/libvirt/vif.py: 'vrouter-port-control', ...\n        vrouter-port-control: CommandFilter, vrouter-port-control, root\n\n        # nova/virt/libvirt/vif.py: 'ebrctl', ...\n        ebrctl: CommandFilter, ebrctl, root\n\n        # nova/virt/libvirt/vif.py: 'mm-ctl', ...\n        mm-ctl: CommandFilter, mm-ctl, root\n\n        # nova/network/linux_net.py: 'ovs-ofctl', ....\n        ovs-ofctl: CommandFilter, ovs-ofctl, root\n\n        # nova/virt/libvirt/connection.py: 'dd', if=%s % virsh_output, ...\n        dd: CommandFilter, dd, root\n\n        # nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ...\n        iscsiadm: CommandFilter, iscsiadm, root\n\n        # nova/virt/libvirt/volume/aoe.py: 'aoe-revalidate', aoedev\n        # nova/virt/libvirt/volume/aoe.py: 'aoe-discover'\n        aoe-revalidate: CommandFilter, aoe-revalidate, root\n        aoe-discover: CommandFilter, aoe-discover, root\n\n        # nova/virt/xenapi/vm_utils.py: parted, --script, ...\n        # nova/virt/xenapi/vm_utils.py: 'parted', '--script', dev_path, ..*.\n        parted: CommandFilter, parted, root\n\n        # nova/virt/xenapi/vm_utils.py: 'pygrub', '-qn', dev_path\n        pygrub: CommandFilter, pygrub, root\n\n        # nova/virt/xenapi/vm_utils.py: fdisk %(dev_path)s\n        fdisk: CommandFilter, fdisk, root\n\n        # nova/virt/xenapi/vm_utils.py: e2fsck, -f, -p, partition_path\n        # nova/virt/disk/api.py: e2fsck, -f, -p, image\n        e2fsck: CommandFilter, e2fsck, root\n\n        # nova/virt/xenapi/vm_utils.py: resize2fs, partition_path\n        # nova/virt/disk/api.py: resize2fs, image\n        resize2fs: CommandFilter, resize2fs, root\n\n        # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...\n        iptables-save: CommandFilter, iptables-save, root\n        ip6tables-save: CommandFilter, ip6tables-save, root\n\n        # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)\n        iptables-restore: CommandFilter, iptables-restore, root\n        ip6tables-restore: CommandFilter, ip6tables-restore, root\n\n        # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...\n        # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..\n        arping: CommandFilter, arping, root\n\n        # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address\n        dhcp_release: CommandFilter, dhcp_release, root\n\n        # nova/network/linux_net.py: 'kill', '-9', pid\n        # nova/network/linux_net.py: 'kill', '-HUP', pid\n        kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP\n\n        # nova/network/linux_net.py: 'kill', pid\n        kill_radvd: KillFilter, root, /usr/sbin/radvd\n\n        # nova/network/linux_net.py: dnsmasq call\n        dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq\n\n        # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..\n        radvd: CommandFilter, radvd, root\n\n        # nova/network/linux_net.py: 'brctl', 'addbr', bridge\n        # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0\n        # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'\n        # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface\n        brctl: CommandFilter, brctl, root\n\n        # nova/virt/libvirt/utils.py: 'mkswap'\n        # nova/virt/xenapi/vm_utils.py: 'mkswap'\n        mkswap: CommandFilter, mkswap, root\n\n        # nova/virt/libvirt/utils.py: 'nova-idmapshift'\n        nova-idmapshift: CommandFilter, nova-idmapshift, root\n\n        # nova/virt/xenapi/vm_utils.py: 'mkfs'\n        # nova/utils.py: 'mkfs', fs, path, label\n        mkfs: CommandFilter, mkfs, root\n\n        # nova/virt/libvirt/utils.py: 'qemu-img'\n        qemu-img: CommandFilter, qemu-img, root\n\n        # nova/virt/disk/vfs/localfs.py: 'readlink', '-e'\n        readlink: CommandFilter, readlink, root\n\n        # nova/virt/disk/api.py:\n        mkfs.ext3: CommandFilter, mkfs.ext3, root\n        mkfs.ext4: CommandFilter, mkfs.ext4, root\n        mkfs.ntfs: CommandFilter, mkfs.ntfs, root\n\n        # nova/virt/libvirt/connection.py:\n        lvremove: CommandFilter, lvremove, root\n\n        # nova/virt/libvirt/utils.py:\n        lvcreate: CommandFilter, lvcreate, root\n\n        # nova/virt/libvirt/utils.py:\n        lvs: CommandFilter, lvs, root\n\n        # nova/virt/libvirt/utils.py:\n        vgs: CommandFilter, vgs, root\n\n        # nova/utils.py:read_file_as_root: 'cat', file_path\n        # (called from nova/virt/disk/vfs/localfs.py:VFSLocalFS.read_file)\n        read_passwd: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/passwd\n        read_shadow: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/shadow\n\n        # os-brick needed commands\n        read_initiator: ReadFileFilter, /etc/iscsi/initiatorname.iscsi\n        multipath: CommandFilter, multipath, root\n        # multipathd show status\n        multipathd: CommandFilter, multipathd, root\n        systool: CommandFilter, systool, root\n        vgc-cluster: CommandFilter, vgc-cluster, root\n        # os_brick/initiator/connector.py\n        drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid\n\n        # TODO(smcginnis) Temporary fix.\n        # Need to pull in os-brick os-brick.filters file instead and clean\n        # out stale brick values from this file.\n        scsi_id: CommandFilter, /lib/udev/scsi_id, root\n        # os_brick.privileged.default oslo.privsep context\n        # This line ties the superuser privs with the config files, context name,\n        # and (implicitly) the actual python code invoked.\n        privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\\.\\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.*\n\n        # nova/storage/linuxscsi.py: sg_scan device\n        sg_scan: CommandFilter, sg_scan, root\n\n        # nova/volume/encryptors/cryptsetup.py:\n        # nova/volume/encryptors/luks.py:\n        ln: RegExpFilter, ln, root, ln, --symbolic, --force, /dev/mapper/crypt-.+, .+\n\n        # nova/volume/encryptors.py:\n        # nova/virt/libvirt/dmcrypt.py:\n        cryptsetup: CommandFilter, cryptsetup, root\n\n        # nova/virt/xenapi/vm_utils.py:\n        xenstore-read: CommandFilter, xenstore-read, root\n\n        # nova/virt/libvirt/utils.py:\n        rbd: CommandFilter, rbd, root\n\n        # nova/virt/libvirt/utils.py: 'shred', '-n3', '-s%d' % volume_size, path\n        shred: CommandFilter, shred, root\n\n        # nova/virt/libvirt/volume.py: 'cp', '/dev/stdin', delete_control..\n        cp: CommandFilter, cp, root\n\n        # nova/virt/xenapi/vm_utils.py:\n        sync: CommandFilter, sync, root\n\n        # nova/virt/libvirt/imagebackend.py:\n        ploop: RegExpFilter, ploop, root, ploop, restore-descriptor, .*\n        prl_disk_tool: RegExpFilter, prl_disk_tool, root, prl_disk_tool, resize, --size, .*M$, --resize_partition, --hdd, .*\n\n        # nova/virt/libvirt/utils.py: 'xend', 'status'\n        xend: CommandFilter, xend, root\n\n        # nova/virt/libvirt/utils.py:\n        touch: CommandFilter, touch, root\n\n        # nova/virt/libvirt/volume/vzstorage.py\n        pstorage-mount: CommandFilter, pstorage-mount, root\n    network:\n      pods:\n        - compute\n      content: |\n        # nova-rootwrap command filters for network nodes\n        # This file should be owned by (and only-writeable by) the root user\n\n        [Filters]\n        # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'\n        # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'\n        # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev\n        # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i..\n        # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'..\n        # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',..\n        # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',..\n        # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev)\n        # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1]\n        # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge\n        # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', ..\n        # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',..\n        # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ...\n        # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,..\n        # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up'\n        # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up'\n        # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..\n        # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..\n        # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'\n        # nova/network/linux_net.py: 'ip', 'route', 'add', ..\n        # nova/network/linux_net.py: 'ip', 'route', 'del', .\n        # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev\n        ip: CommandFilter, ip, root\n\n        # nova/virt/libvirt/vif.py: 'ovs-vsctl', ...\n        # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...\n        # nova/network/linux_net.py: 'ovs-vsctl', ....\n        ovs-vsctl: CommandFilter, ovs-vsctl, root\n\n        # nova/network/linux_net.py: 'ovs-ofctl', ....\n        ovs-ofctl: CommandFilter, ovs-ofctl, root\n\n        # nova/virt/libvirt/vif.py: 'ivs-ctl', ...\n        # nova/virt/libvirt/vif.py: 'ivs-ctl', 'del-port', ...\n        # nova/network/linux_net.py: 'ivs-ctl', ....\n        ivs-ctl: CommandFilter, ivs-ctl, root\n\n        # nova/virt/libvirt/vif.py: 'ifc_ctl', ...\n        ifc_ctl: CommandFilter, /opt/pg/bin/ifc_ctl, root\n\n        # nova/network/linux_net.py: 'ebtables', '-D' ...\n        # nova/network/linux_net.py: 'ebtables', '-I' ...\n        ebtables: CommandFilter, ebtables, root\n        ebtables_usr: CommandFilter, ebtables, root\n\n        # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...\n        iptables-save: CommandFilter, iptables-save, root\n        ip6tables-save: CommandFilter, ip6tables-save, root\n\n        # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)\n        iptables-restore: CommandFilter, iptables-restore, root\n        ip6tables-restore: CommandFilter, ip6tables-restore, root\n\n        # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...\n        # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..\n        arping: CommandFilter, arping, root\n\n        # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address\n        dhcp_release: CommandFilter, dhcp_release, root\n\n        # nova/network/linux_net.py: 'kill', '-9', pid\n        # nova/network/linux_net.py: 'kill', '-HUP', pid\n        kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP\n\n        # nova/network/linux_net.py: 'kill', pid\n        kill_radvd: KillFilter, root, /usr/sbin/radvd\n\n        # nova/network/linux_net.py: dnsmasq call\n        dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq\n\n        # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..\n        radvd: CommandFilter, radvd, root\n\n        # nova/network/linux_net.py: 'brctl', 'addbr', bridge\n        # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0\n        # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'\n        # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface\n        brctl: CommandFilter, brctl, root\n\n        # nova/network/linux_net.py: 'sysctl', ....\n        sysctl: CommandFilter, sysctl, root\n\n        # nova/network/linux_net.py: 'conntrack'\n        conntrack: CommandFilter, conntrack, root\n\n        # nova/network/linux_net.py: 'fp-vdev'\n        fp-vdev: CommandFilter, fp-vdev, root\n  nova_ironic:\n    DEFAULT:\n      scheduler_host_manager: ironic_host_manager\n      compute_driver: ironic.IronicDriver\n      ram_allocation_ratio: 1.0\n      cpu_allocation_ratio: 1.0\n      reserved_host_memory_mb: 0\n  libvirt:\n    address_search_enabled: true\n    # When \"address_search_enabled\", get the IP address to be used as the target for live migration\n    # traffic using interface name.\n    # If this option is set to None, the hostname of the migration target compute node will be used.\n    live_migration_interface: null\n    # or set cidr\n    live_migration_network_cidr: 0/0\n  hypervisor:\n    address_search_enabled: true\n    # my_ip can be set automatically through this interface name.\n    host_interface: null\n    # If host_interface is null there is a fallback mechanism to search\n    # for interface with routing using host network cidr.\n    host_network_cidr: 0/0\n  # This list is the keys to exclude from the config file ingested by nova-compute\n  nova_compute_redactions:\n    - database\n    - api_database\n    - cell0_database\n  nova:\n    DEFAULT:\n      log_config_append: /etc/nova/logging.conf\n      default_ephemeral_format: ext4\n      ram_allocation_ratio: 1.0\n      disk_allocation_ratio: 1.0\n      cpu_allocation_ratio: 3.0\n      state_path: /var/lib/nova\n      osapi_compute_listen: 0.0.0.0\n      # NOTE(portdirect): the bind port should not be defined, and is manipulated\n      # via the endpoints section.\n      osapi_compute_listen_port: null\n      osapi_compute_workers: 1\n      metadata_workers: 1\n      compute_driver: libvirt.LibvirtDriver\n      my_ip: 0.0.0.0\n      instance_usage_audit: True\n      instance_usage_audit_period: hour\n      resume_guests_state_on_host_boot: True\n    vnc:\n      auth_schemes: none\n      novncproxy_host: 0.0.0.0\n      server_listen: 0.0.0.0\n      # This would be set by each compute nodes's ip\n      # server_proxyclient_address: 127.0.0.1\n    spice:\n      html5proxy_host: 0.0.0.0\n      server_listen: 0.0.0.0\n      # This would be set by each compute nodes's ip\n      # server_proxyclient_address: 127.0.0.1\n    serial_console:\n      serialproxy_host: 0.0.0.0\n      # This would be set by each compute nodes's ip\n      # proxyclient_address: 127.0.0.1\n    conductor:\n      workers: 1\n    scheduler:\n      max_attempts: 10\n      discover_hosts_in_cells_interval: -1\n      workers: 1\n    oslo_policy:\n      policy_file: /etc/nova/policy.yaml\n    oslo_concurrency:\n      lock_path: /var/lock\n    oslo_middleware:\n      enable_proxy_headers_parsing: true\n    glance:\n      num_retries: 3\n    ironic:\n      api_endpoint: null\n      auth_url: null\n    neutron:\n      metadata_proxy_shared_secret: \"password\"\n      service_metadata_proxy: True\n      auth_type: password\n      auth_version: v3\n    cinder:\n      auth_type: password\n      catalog_info: volumev3::internalURL\n    database:\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    api_database:\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    cell0_database:\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    keystone_authtoken:\n      service_token_roles: service\n      service_token_roles_required: true\n      auth_type: password\n      auth_version: v3\n      memcache_security_strategy: ENCRYPT\n      service_type: compute\n    notifications:\n      notify_on_state_change: vm_and_task_state\n    service_user:\n      auth_type: password\n      send_service_user_token: true\n    libvirt:\n      connection_uri: \"qemu+unix:///system?socket=/run/libvirt/libvirt-sock\"\n      images_type: qcow2\n      images_rbd_pool: vms\n      images_rbd_ceph_conf: /etc/ceph/ceph.conf\n      rbd_user: cinder\n      rbd_secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337\n      disk_cachemodes: \"network=writeback\"\n      hw_disk_discard: unmap\n    upgrade_levels:\n      compute: auto\n    cache:\n      enabled: true\n      backend: dogpile.cache.memcached\n    wsgi:\n      api_paste_config: /etc/nova/api-paste.ini\n    oslo_messaging_notifications:\n      driver: messagingv2\n    oslo_messaging_rabbit:\n      rabbit_ha_queues: true\n    placement:\n      auth_type: password\n      auth_version: v3\n  logging:\n    loggers:\n      keys:\n        - root\n        - nova\n        - os.brick\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: WARNING\n      handlers: 'null'\n    logger_nova:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: nova\n    logger_os.brick:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: os.brick\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    formatter_default:\n      format: \"%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n  rabbitmq:\n    # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones\n    policies:\n      - vhost: \"nova\"\n        name: \"ha_ttl_nova\"\n        definition:\n          # mirror messges to other nodes in rmq cluster\n          ha-mode: \"all\"\n          ha-sync-mode: \"automatic\"\n          # 70s\n          message-ttl: 70000\n        priority: 0\n        apply-to: all\n        pattern: '^(?!(amq\\.|reply_)).*'\n  enable_iscsi: false\n  archive_deleted_rows:\n    purge_deleted_rows: false\n    until_completion: true\n    all_cells: false\n    max_rows:\n      enabled: False\n      rows: 1000\n    before:\n      enabled: false\n      date: 'nil'\n  nova_api_uwsgi:\n    uwsgi:\n      add-header: \"Connection: close\"\n      buffer-size: 65535\n      die-on-term: true\n      enable-threads: true\n      exit-on-reload: false\n      hook-master-start: unix_signal:15 gracefully_kill_them_all\n      lazy-apps: true\n      log-x-forwarded-for: true\n      master: true\n      procname-prefix-spaced: \"nova-api:\"\n      route-user-agent: '^kube-probe.* donotlog:'\n      thunder-lock: true\n      worker-reload-mercy: 80\n      wsgi-file: /var/lib/openstack/bin/nova-api-wsgi\n      stats: 0.0.0.0:1717\n      stats-http: true\n  nova_metadata_uwsgi:\n    uwsgi:\n      add-header: \"Connection: close\"\n      buffer-size: 65535\n      die-on-term: true\n      enable-threads: true\n      exit-on-reload: false\n      hook-master-start: unix_signal:15 gracefully_kill_them_all\n      lazy-apps: true\n      log-x-forwarded-for: true\n      master: true\n      procname-prefix-spaced: \"nova-metadata:\"\n      route-user-agent: '^kube-probe.* donotlog:'\n      thunder-lock: true\n      worker-reload-mercy: 80\n      wsgi-file: /var/lib/openstack/bin/nova-metadata-wsgi\n      stats: 0.0.0.0:1717\n      stats-http: true\n\n# Names of secrets used by bootstrap and environmental checks\nsecrets:\n  identity:\n    admin: nova-keystone-admin\n    nova: nova-keystone-user\n    neutron: nova-keystone-neutron\n    placement: nova-keystone-placement\n    cinder: nova-keystone-cinder\n    ironic: nova-keystone-ironic\n    service: nova-keystone-service\n    test: nova-keystone-test\n  oslo_db:\n    admin: nova-db-admin\n    nova: nova-db-user\n  oslo_db_api:\n    admin: nova-db-api-admin\n    nova: nova-db-api-user\n  oslo_db_cell0:\n    admin: nova-db-cell0-admin\n    nova: nova-db-cell0-user\n  oslo_messaging:\n    admin: nova-rabbitmq-admin\n    nova: nova-rabbitmq-user\n  tls:\n    compute:\n      osapi:\n        public: nova-tls-public\n        internal: nova-tls-api\n    compute_novnc_proxy:\n      novncproxy:\n        public: nova-novncproxy-tls-public\n        internal: nova-novncproxy-tls-proxy\n      vencrypt:\n        internal: nova-novncproxy-vencrypt\n    compute_metadata:\n      metadata:\n        public: metadata-tls-public\n        internal: metadata-tls-metadata\n    compute_spice_proxy:\n      spiceproxy:\n        public: nova-spiceproxy-tls-public\n        internal: nova-spiceproxy-tls-proxy\n    compute_serial_proxy:\n      serialproxy:\n        public: nova-serialproxy-tls-public\n        internal: nova-serialproxy-tls-proxy\n  oci_image_registry:\n    nova: nova-oci-image-registry\n\n# typically overridden by environmental\n# values, but should include all endpoints\n# required by this chart\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      nova:\n        username: nova\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n        secret:\n          tls:\n            internal: mariadb-tls-direct\n      nova:\n        username: nova\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /nova\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_db_api:\n    auth:\n      admin:\n        username: root\n        password: password\n      nova:\n        username: nova\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /nova_api\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_db_cell0:\n    auth:\n      admin:\n        username: root\n        password: password\n      nova:\n        username: nova\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /nova_cell0\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_messaging:\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n        secret:\n          tls:\n            internal: rabbitmq-tls-direct\n      nova:\n        username: nova\n        password: password\n    statefulset:\n      replicas: 2\n      name: rabbitmq-rabbitmq\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /nova\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n  oslo_cache:\n    auth:\n      # NOTE(portdirect): this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      nova:\n        role: admin,service\n        region_name: RegionOne\n        username: nova\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      service:\n        role: admin,service\n        region_name: RegionOne\n        username: nova_service_user\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      # NOTE(portdirect): the neutron user is not managed by the nova chart\n      # these values should match those set in the neutron chart.\n      neutron:\n        role: admin,service\n        region_name: RegionOne\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n        username: nova_neutron\n        password: password\n      # NOTE(portdirect): the ironic user is not managed by the nova chart\n      # these values should match those set in the ironic chart.\n      ironic:\n        role: admin,service\n        auth_type: password\n        auth_version: v3\n        region_name: RegionOne\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n        username: nova_ironic\n        password: password\n      placement:\n        role: admin,service\n        region_name: RegionOne\n        username: nova_placement\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      cinder:\n        role: admin,service\n        region_name: RegionOne\n        username: nova_cinder\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      test:\n        role: admin\n        region_name: RegionOne\n        username: nova-test\n        password: password\n        project_name: test\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  image:\n    name: glance\n    hosts:\n      default: glance-api\n      public: glance\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      api:\n        default: 9292\n        public: 80\n  volumev3:\n    name: cinderv3\n    hosts:\n      default: cinder-api\n      public: cinder\n    host_fqdn_override:\n      default: null\n    path:\n      default: '/v3'\n      healthcheck: /healthcheck\n    scheme:\n      default: http\n    port:\n      api:\n        default: 8776\n        public: 80\n  compute:\n    name: nova\n    hosts:\n      default: nova-api\n      public: nova\n    host_fqdn_override:\n      default: null\n      # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public\n      # endpoints using the following format:\n      # public:\n      #   host: null\n      #   tls:\n      #     crt: null\n      #     key: null\n    path:\n      default: \"/v2.1/\"\n    scheme:\n      default: 'http'\n      service: 'http'\n    port:\n      api:\n        default: 8774\n        public: 80\n        service: 8774\n      novncproxy:\n        default: 6080\n  compute_metadata:\n    name: nova\n    ip:\n      # IF blank, set clusterIP and metadata_host dynamically\n      ingress: null\n    hosts:\n      default: nova-metadata\n      public: metadata\n    host_fqdn_override:\n      default: null\n    path:\n      default: /\n    scheme:\n      default: 'http'\n    port:\n      metadata:\n        default: 8775\n        public: 80\n  compute_novnc_proxy:\n    name: nova\n    hosts:\n      default: nova-novncproxy\n      public: novncproxy\n    host_fqdn_override:\n      default: null\n      # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public\n      # endpoints using the following format:\n      # public:\n      #   host: null\n      #   tls:\n      #     crt: null\n      #     key: null\n    path:\n      default: /vnc_auto.html\n    scheme:\n      default: 'http'\n    port:\n      novnc_proxy:\n        default: 6080\n        public: 80\n  # This endpoint is only to allow configuring the cert used specifically for\n  # vencrypt.  Specifically, the same CA/issuer needs to be used to sign both\n  # this cert, and the libvirt/qemu certs.\n  compute_novnc_vencrypt:\n    hosts:\n      default: nova-novncproxy\n    host_fqdn_override:\n      default:\n        commonName: nova-novncproxy\n        usages:\n          - client auth\n  compute_serial_proxy:\n    name: nova\n    hosts:\n      default: nova-serialproxy\n      public: serialproxy\n    host_fqdn_override:\n      default: null\n    scheme:\n      default: 'ws'\n    path:\n      default: /serial_auto.html\n    port:\n      serial_proxy:\n        default: 6083\n        public: 80\n  compute_spice_proxy:\n    name: nova\n    hosts:\n      default: nova-spiceproxy\n      public: spiceproxy\n    host_fqdn_override:\n      default: null\n    path:\n      default: /spice_auto.html\n    scheme:\n      default: 'http'\n    port:\n      spice_proxy:\n        default: 6082\n        public: 80\n  placement:\n    name: placement\n    hosts:\n      default: placement-api\n      public: placement\n    host_fqdn_override:\n      default: null\n    path:\n      default: /\n    scheme:\n      default: 'http'\n      service: 'http'\n    port:\n      api:\n        default: 8778\n        public: 80\n        service: 8778\n  network:\n    name: neutron\n    hosts:\n      default: neutron-server\n      public: neutron\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 9696\n        public: 80\n  baremetal:\n    name: ironic\n    hosts:\n      default: ironic-api\n      public: ironic\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      api:\n        default: 6385\n        public: 80\n  fluentd:\n    namespace: null\n    name: fluentd\n    hosts:\n      default: fluentd-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: 'http'\n    port:\n      service:\n        default: 24224\n      metrics:\n        default: 24220\n  # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress\n  # They are using to enable the Egress K8s network policy.\n  kube_dns:\n    namespace: kube-system\n    name: kubernetes-dns\n    hosts:\n      default: kube-dns\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: http\n    port:\n      dns:\n        default: 53\n        protocol: UDP\n  ingress:\n    namespace: null\n    name: ingress\n    hosts:\n      default: ingress\n    port:\n      ingress:\n        default: 80\n\npod:\n  probes:\n    rpc_timeout: 60\n    rpc_retries: 2\n    compute:\n      default:\n        liveness:\n          enabled: True\n          params:\n            periodSeconds: 90\n            timeoutSeconds: 70\n        readiness:\n          enabled: True\n          params:\n            periodSeconds: 90\n            timeoutSeconds: 70\n        startup:\n          enabled: True\n          params:\n            failureThreshold: 120\n            periodSeconds: 10\n            successThreshold: 1\n            timeoutSeconds: 70\n    api-metadata:\n      default:\n        liveness:\n          enabled: True\n          params:\n            initialDelaySeconds: 5\n            periodSeconds: 10\n            timeoutSeconds: 5\n        readiness:\n          enabled: True\n          params:\n            initialDelaySeconds: 5\n            periodSeconds: 10\n            timeoutSeconds: 5\n    api-osapi:\n      default:\n        liveness:\n          enabled: True\n          params:\n            initialDelaySeconds: 5\n            periodSeconds: 10\n            timeoutSeconds: 5\n        readiness:\n          enabled: True\n          params:\n            initialDelaySeconds: 5\n            periodSeconds: 10\n            timeoutSeconds: 5\n    conductor:\n      default:\n        liveness:\n          enabled: True\n          params:\n            initialDelaySeconds: 120\n            periodSeconds: 90\n            timeoutSeconds: 70\n        readiness:\n          enabled: True\n          params:\n            initialDelaySeconds: 80\n            periodSeconds: 90\n            timeoutSeconds: 70\n    novncproxy:\n      default:\n        liveness:\n          enabled: True\n          params:\n            initialDelaySeconds: 30\n            periodSeconds: 60\n            timeoutSeconds: 15\n        readiness:\n          enabled: True\n          params:\n            initialDelaySeconds: 30\n            periodSeconds: 60\n            timeoutSeconds: 15\n    scheduler:\n      default:\n        liveness:\n          enabled: True\n          params:\n            initialDelaySeconds: 120\n            periodSeconds: 90\n            timeoutSeconds: 70\n        readiness:\n          enabled: True\n          params:\n            initialDelaySeconds: 80\n            periodSeconds: 90\n            timeoutSeconds: 70\n    serialproxy:\n      default:\n        liveness:\n          enabled: True\n          params:\n            initialDelaySeconds: 30\n            periodSeconds: 60\n            timeoutSeconds: 15\n        readiness:\n          enabled: True\n          params:\n            initialDelaySeconds: 30\n            periodSeconds: 60\n            timeoutSeconds: 15\n    compute-spice-proxy:\n      default:\n        liveness:\n          enabled: True\n          params:\n            initialDelaySeconds: 30\n            periodSeconds: 60\n            timeoutSeconds: 15\n        readiness:\n          enabled: True\n          params:\n            initialDelaySeconds: 30\n            periodSeconds: 60\n            timeoutSeconds: 15\n  security_context:\n    nova:\n      pod:\n        runAsUser: 42424\n      container:\n        nova_compute_init:\n          readOnlyRootFilesystem: true\n          runAsUser: 0\n        ceph_perms:\n          readOnlyRootFilesystem: true\n          runAsUser: 0\n        nova_compute_vnc_init:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        nova_compute_serial_init:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        nova_compute_spice_init:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        nova_compute:\n          readOnlyRootFilesystem: true\n          privileged: true\n        nova_compute_ssh:\n          privileged: true\n          runAsUser: 0\n        nova_compute_ssh_init:\n          runAsUser: 0\n        nova_api_metadata_init:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        nova_api:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        nova_osapi:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        nova_conductor:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        nova_novncproxy_init:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        nova_novncproxy_init_assests:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        nova_novncproxy:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        nova_scheduler:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        nova_serialproxy_init:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        nova_serialproxy:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        nova_spiceproxy_init:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        nova_spiceproxy_init_assets:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        nova_spiceproxy:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    bootstrap:\n      pod:\n        runAsUser: 42424\n      container:\n        nova_wait_for_computes_init:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        bootstrap:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    nova_cell_setup:\n      pod:\n        runAsUser: 42424\n      container:\n        nova_wait_for_computes_init:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        nova_cell_setup_init:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        nova_cell_setup:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    archive_deleted_rows:\n      pod:\n        runAsUser: 42424\n      container:\n        nova_archive_deleted_rows_init:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        nova_archive_deleted_rows:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    cell_setup:\n      pod:\n        runAsUser: 42424\n      container:\n        nova_cell_setup:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    service_cleaner:\n      pod:\n        runAsUser: 42424\n      container:\n        nova_service_cleaner:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n  use_fqdn:\n    # NOTE: If the option \"host\" is not specified in nova.conf, the host name\n    # shown in the hypervisor host is defaulted to the short name of the host.\n    # Setting the option here to true will cause use $(hostname --fqdn) as the\n    # host name by default. If the short name is desired $(hostname --short),\n    # set the option to false. Specifying a host in the nova.conf via the conf:\n    # section will supersede the value of this option.\n    compute: true\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    nova:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n        effect: NoSchedule\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n        effect: NoSchedule\n  # -- This allows users to add Kubernetes Projected Volumes to be mounted at /etc/nova/nova.conf.d/\n  ## This is a list of projected volume source objects for each deployment/statefulset/job\n  ## https://kubernetes.io/docs/concepts/storage/projected-volumes/\n  etcSources:\n    nova_compute: []\n    nova_compute_ironic: []\n    nova_api_metadata: []\n    nova_api_osapi: []\n    nova_conductor: []\n    nova_scheduler: []\n    nova_bootstrap: []\n    nova_tests: []\n    nova_novncproxy: []\n    nova_serialproxy: []\n    nova_spiceproxy: []\n    nova_db_sync: []\n    nova_archive_deleted_rows: []\n    nova_service_cleaner: []\n    nova_cell_setup: []\n  mounts:\n    nova_compute:\n      init_container: null\n      nova_compute:\n        volumeMounts:\n        volumes:\n    nova_compute_ironic:\n      init_container: null\n      nova_compute_ironic:\n        volumeMounts:\n        volumes:\n    nova_api_metadata:\n      init_container: null\n      nova_api_metadata:\n        volumeMounts:\n        volumes:\n    nova_api_osapi:\n      init_container: null\n      nova_api_osapi:\n        volumeMounts:\n        volumes:\n    nova_conductor:\n      init_container: null\n      nova_conductor:\n        volumeMounts:\n        volumes:\n    nova_scheduler:\n      init_container: null\n      nova_scheduler:\n        volumeMounts:\n        volumes:\n    nova_bootstrap:\n      init_container: null\n      nova_bootstrap:\n        volumeMounts:\n        volumes:\n    nova_tests:\n      init_container: null\n      nova_tests:\n        volumeMounts:\n        volumes:\n    nova_novncproxy:\n      init_novncproxy: null\n      nova_novncproxy:\n        volumeMounts:\n        volumes:\n    nova_serialproxy:\n      init_serialproxy: null\n      nova_serialproxy:\n        volumeMounts:\n        volumes:\n    nova_spiceproxy:\n      init_spiceproxy: null\n      nova_spiceproxy:\n        volumeMounts:\n        volumes:\n    nova_db_sync:\n      nova_db_sync:\n        volumeMounts:\n        volumes:\n  useHostNetwork:\n    novncproxy: true\n  replicas:\n    api_metadata: 1\n    compute_ironic: 1\n    osapi: 1\n    conductor: 1\n    scheduler: 1\n    novncproxy: 1\n    serialproxy: 1\n    spiceproxy: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        compute:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n    disruption_budget:\n      metadata:\n        min_available: 0\n      osapi:\n        min_available: 0\n    termination_grace_period:\n      metadata:\n        timeout: 30\n      osapi:\n        timeout: 30\n  resources:\n    enabled: false\n    compute:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    compute_ironic:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    api_metadata:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    conductor:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    scheduler:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    ssh:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    novncproxy:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    serialproxy:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    spiceproxy:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      storage_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rabbit_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      archive_deleted_rows:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      cell_setup:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      service_cleaner:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nnetwork_policy:\n  nova:\n    # TODO(lamt): Need to tighten this ingress for security.\n    ingress:\n      - {}\n    egress:\n      - {}\n\nhealth_probe:\n  logging:\n    level: ERROR\n\ntls:\n  identity: false\n  oslo_messaging: false\n  oslo_db: false\n\nmanifests:\n  certificates: false\n  compute_uuid_self_provisioning: true\n  configmap_bin: true\n  configmap_etc: true\n  cron_job_cell_setup: true\n  cron_job_service_cleaner: true\n  cron_job_archive_deleted_rows: false\n  daemonset_compute: true\n  deployment_api_metadata: true\n  deployment_api_osapi: true\n  deployment_conductor: true\n  deployment_novncproxy: true\n  deployment_serialproxy: true\n  deployment_spiceproxy: true\n  deployment_scheduler: true\n  # NOTE: StatefulSets provide stable pod hostnames (e.g., nova-conductor-0, nova-conductor-1)\n  # which are used as service host names in `openstack compute service list`.\n  # When enabled, the corresponding deployment_* manifest is automatically disabled.\n  # This ensures service names remain stable across pod restarts.\n  statefulset_conductor: false\n  statefulset_scheduler: false\n  ingress_metadata: true\n  ingress_novncproxy: true\n  ingress_serialproxy: true\n  ingress_spiceproxy: true\n  ingress_osapi: true\n  job_bootstrap: true\n  job_storage_init: true\n  job_db_init: true\n  job_db_sync: true\n  job_db_drop: false\n  job_image_repo_sync: true\n  job_rabbit_init: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  job_cell_setup: true\n  pdb_metadata: true\n  pdb_osapi: true\n  pod_rally_test: true\n  network_policy: false\n  secret_db_api: true\n  secret_db_cell0: true\n  secret_db: true\n  secret_ingress_tls: true\n  secret_keystone: true\n  secret_ks_etc: true\n  secret_rabbitmq: true\n  secret_registry: true\n  secret_ssh: true\n  service_ingress_metadata: true\n  service_ingress_novncproxy: true\n  service_ingress_serialproxy: true\n  service_ingress_spiceproxy: true\n  service_ingress_osapi: true\n  service_metadata: true\n  service_novncproxy: true\n  service_serialproxy: true\n  service_spiceproxy: true\n  service_osapi: true\n  statefulset_compute_ironic: false\n\n# List of compute hosts and its respective uuids\n# Items should be in the following format\n# - name: compute-node-hostname\n#   uuid: <compute node's UUID>\nhosts_uuids: []\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "octavia/Chart.yaml",
    "content": "# Copyright 2019 Samsung Electronics Co., Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Octavia\nname: octavia\nversion: 2025.2.0\nhome: https://docs.openstack.org/octavia/latest/\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Octavia/OpenStack_Project_Octavia_vertical.png\nsources:\n  - https://opendev.org/openstack/octavia\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "octavia/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "octavia/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\noctavia-db-manage upgrade head\n\noctavia-db-manage upgrade_persistence\n"
  },
  {
    "path": "octavia/templates/bin/_octavia-api.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec uwsgi --ini /etc/octavia/octavia-api-uwsgi.ini\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "octavia/templates/bin/_octavia-driver-agent.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2024 Vexxhost Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec octavia-driver-agent \\\n        --config-file /etc/octavia/octavia.conf \\\n        --config-dir /etc/octavia/octavia.conf.d\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "octavia/templates/bin/_octavia-health-manager-get-port.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nHOSTNAME=$(hostname -s)\nPORTNAME=octavia-health-manager-port-$HOSTNAME\n\nHM_PORT_ID=$(openstack port show $PORTNAME -c id -f value)\nHM_PORT_MAC=$(openstack port show $PORTNAME -c mac_address -f value)\n\necho $HM_PORT_ID > /tmp/pod-shared/HM_PORT_ID\necho $HM_PORT_MAC > /tmp/pod-shared/HM_PORT_MAC\n"
  },
  {
    "path": "octavia/templates/bin/_octavia-health-manager-nic-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nHM_PORT_ID=$(cat /tmp/pod-shared/HM_PORT_ID)\nHM_PORT_MAC=$(cat /tmp/pod-shared/HM_PORT_MAC)\n\novs-vsctl --no-wait show\n\novs-vsctl --may-exist add-port br-int o-hm0 \\\n        -- set Interface o-hm0 type=internal \\\n        -- set Interface o-hm0 external-ids:iface-status=active \\\n        -- set Interface o-hm0 external-ids:attached-mac=$HM_PORT_MAC \\\n        -- set Interface o-hm0 external-ids:iface-id=$HM_PORT_ID \\\n        -- set Interface o-hm0 external-ids:skip_cleanup=true\n\nip link set dev o-hm0 address $HM_PORT_MAC\n\niptables -I INPUT -i o-hm0 -p udp --dport {{ .Values.conf.octavia.health_manager.bind_port }} -j ACCEPT\n"
  },
  {
    "path": "octavia/templates/bin/_octavia-health-manager.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  cat > /tmp/dhclient.conf <<EOF\nrequest subnet-mask,broadcast-address,interface-mtu;\ndo-forward-updates false;\nEOF\n\n  dhclient -v o-hm0 -cf /tmp/dhclient.conf\n\n  exec octavia-health-manager \\\n        --config-file /etc/octavia/octavia.conf \\\n        --config-dir /etc/octavia/octavia.conf.d\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "octavia/templates/bin/_octavia-housekeeping.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec octavia-housekeeping \\\n        --config-file /etc/octavia/octavia.conf \\\n        --config-dir /etc/octavia/octavia.conf.d\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "octavia/templates/bin/_octavia-worker-get-port.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nHOSTNAME=$(hostname -s)\nPORTNAME=octavia-worker-port-$HOSTNAME\n\nHM_PORT_ID=$(openstack port show $PORTNAME -c id -f value)\nHM_PORT_MAC=$(openstack port show $PORTNAME -c mac_address -f value)\n\necho $HM_PORT_ID > /tmp/pod-shared/HM_PORT_ID\necho $HM_PORT_MAC > /tmp/pod-shared/HM_PORT_MAC\n"
  },
  {
    "path": "octavia/templates/bin/_octavia-worker-nic-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nHM_PORT_ID=$(cat /tmp/pod-shared/HM_PORT_ID)\nHM_PORT_MAC=$(cat /tmp/pod-shared/HM_PORT_MAC)\n\novs-vsctl --no-wait show\n\novs-vsctl --may-exist add-port br-int o-w0 \\\n        -- set Interface o-w0 type=internal \\\n        -- set Interface o-w0 external-ids:iface-status=active \\\n        -- set Interface o-w0 external-ids:attached-mac=$HM_PORT_MAC \\\n        -- set Interface o-w0 external-ids:iface-id=$HM_PORT_ID \\\n        -- set Interface o-w0 external-ids:skip_cleanup=true\n\nip link set dev o-w0 address $HM_PORT_MAC\n"
  },
  {
    "path": "octavia/templates/bin/_octavia-worker.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  cat > /tmp/dhclient.conf <<EOF\nrequest subnet-mask,broadcast-address,interface-mtu;\ndo-forward-updates false;\nEOF\n\n  dhclient -v o-w0 -cf /tmp/dhclient.conf\n\n  exec octavia-worker \\\n        --config-file /etc/octavia/octavia.conf \\\n        --config-dir /etc/octavia/octavia.conf.d\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "octavia/templates/configmap-bin.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n{{- $rallyTests := .Values.conf.rally_tests }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: octavia-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  rally-test.sh: |\n{{ tuple $rallyTests | include \"helm-toolkit.scripts.rally_test\" | indent 4 }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  octavia-api.sh: |\n{{ tuple \"bin/_octavia-api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  octavia-driver-agent.sh: |\n{{ tuple \"bin/_octavia-driver-agent.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  octavia-health-manager.sh: |\n{{ tuple \"bin/_octavia-health-manager.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  octavia-health-manager-nic-init.sh: |\n{{ tuple \"bin/_octavia-health-manager-nic-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  octavia-health-manager-get-port.sh: |\n{{ tuple \"bin/_octavia-health-manager-get-port.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  octavia-housekeeping.sh: |\n{{ tuple \"bin/_octavia-housekeeping.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  octavia-worker.sh: |\n{{ tuple \"bin/_octavia-worker.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  octavia-worker-nic-init.sh: |\n{{ tuple \"bin/_octavia-worker-nic-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  octavia-worker-get-port.sh: |\n{{ tuple \"bin/_octavia-worker-get-port.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/configmap-etc.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"octavia.configmap.etc\" }}\n{{- $configMapName := index . 0 }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n\n{{- if empty .Values.conf.octavia.keystone_authtoken.auth_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.octavia.keystone_authtoken \"auth_uri\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.octavia.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.octavia.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.octavia.keystone_authtoken.region_name -}}\n{{- $_ := set .Values.conf.octavia.keystone_authtoken \"region_name\" .Values.endpoints.identity.auth.octavia.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.octavia.keystone_authtoken.project_name -}}\n{{- $_ := set .Values.conf.octavia.keystone_authtoken \"project_name\" .Values.endpoints.identity.auth.octavia.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.octavia.keystone_authtoken.project_domain_name -}}\n{{- $_ := set .Values.conf.octavia.keystone_authtoken \"project_domain_name\" .Values.endpoints.identity.auth.octavia.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.octavia.keystone_authtoken.user_domain_name -}}\n{{- $_ := set .Values.conf.octavia.keystone_authtoken \"user_domain_name\" .Values.endpoints.identity.auth.octavia.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.octavia.keystone_authtoken.username -}}\n{{- $_ := set .Values.conf.octavia.keystone_authtoken \"username\" .Values.endpoints.identity.auth.octavia.username -}}\n{{- end -}}\n{{- if empty .Values.conf.octavia.keystone_authtoken.password -}}\n{{- $_ := set .Values.conf.octavia.keystone_authtoken \"password\" .Values.endpoints.identity.auth.octavia.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.octavia.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.octavia.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n{{- if empty .Values.conf.octavia.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.octavia.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if empty .Values.conf.octavia.service_auth.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.octavia.service_auth \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.octavia.service_auth.project_name -}}\n{{- $_ := set .Values.conf.octavia.service_auth \"project_name\" .Values.endpoints.identity.auth.admin.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.octavia.service_auth.project_domain_name -}}\n{{- $_ := set .Values.conf.octavia.service_auth \"project_domain_name\" .Values.endpoints.identity.auth.admin.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.octavia.service_auth.user_domain_name -}}\n{{- $_ := set .Values.conf.octavia.service_auth \"user_domain_name\" .Values.endpoints.identity.auth.admin.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.octavia.service_auth.username -}}\n{{- $_ := set .Values.conf.octavia.service_auth \"username\" .Values.endpoints.identity.auth.admin.username -}}\n{{- end -}}\n{{- if empty .Values.conf.octavia.service_auth.password -}}\n{{- $_ := set .Values.conf.octavia.service_auth \"password\" .Values.endpoints.identity.auth.admin.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.octavia.service_auth.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.octavia.service_auth \"memcached_servers\" -}}\n{{- end -}}\n{{- if empty .Values.conf.octavia.service_auth.memcache_secret_key -}}\n{{- $_ := set .Values.conf.octavia.service_auth \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.octavia.database.connection)) (empty .Values.conf.octavia.database.connection) -}}\n{{- $_ := tuple \"oslo_db\" \"internal\" \"octavia\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\"| set .Values.conf.octavia.database \"connection\" -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.octavia.task_flow.persistence_connection)) (empty .Values.conf.octavia.task_flow.persistence_connection) -}}\n{{- $_ := tuple \"oslo_db_persistence\" \"internal\" \"octavia\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\"| set .Values.conf.octavia.task_flow \"persistence_connection\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.octavia.task_flow.jobboard_backend_hosts -}}\n{{- $_ := tuple \"valkey\" \"internal\" . | include \"helm-toolkit.endpoints.endpoint_host_lookup\" | set .Values.conf.octavia.task_flow \"jobboard_backend_hosts\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.octavia.task_flow.jobboard_backend_port -}}\n{{- $_ := tuple \"valkey\" \"sentinel\" \"server\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set .Values.conf.octavia.task_flow \"jobboard_backend_port\" -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.octavia.task_flow.jobboard_backend_password) (not (empty .Values.endpoints.valkey.password) ) -}}\n{{- $_ := set .Values.conf.octavia.task_flow \"jobboard_backend_password\" .Values.endpoints.valkey.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.octavia.DEFAULT.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"octavia\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.octavia.DEFAULT \"transport_url\" -}}\n{{- end -}}\n\n{{- if empty (index .Values.conf.octavia_api_uwsgi.uwsgi \"http-socket\") -}}\n{{- $http_socket_port := tuple \"load_balancer\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | toString }}\n{{- $http_socket := printf \"0.0.0.0:%s\" $http_socket_port }}\n{{- $_ := set .Values.conf.octavia_api_uwsgi.uwsgi \"http-socket\" $http_socket -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.handler_fluent) (has \"fluent\" .Values.conf.logging.handlers.keys) -}}\n{{- $fluentd_host := tuple \"fluentd\" \"internal\" $envAll | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\n{{- $fluentd_port := tuple \"fluentd\" \"internal\" \"service\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $fluent_args := printf \"('%s.%s', '%s', %s)\" .Release.Namespace .Release.Name $fluentd_host $fluentd_port }}\n{{- $handler_fluent := dict \"class\" \"fluent.handler.FluentHandler\" \"formatter\" \"fluent\" \"args\" $fluent_args -}}\n{{- $_ := set .Values.conf.logging \"handler_fluent\" $handler_fluent -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.formatter_fluent) (has \"fluent\" .Values.conf.logging.formatters.keys) -}}\n{{- $formatter_fluent := dict \"class\" \"oslo_log.formatters.FluentFormatter\" -}}\n{{- $_ := set .Values.conf.logging \"formatter_fluent\" $formatter_fluent -}}\n{{- end -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $configMapName }}\ntype: Opaque\ndata:\n  octavia.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.octavia | b64enc }}\n  octavia-api-uwsgi.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.octavia_api_uwsgi | b64enc }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.logging | b64enc }}\n  policy.yaml: {{ toYaml .Values.conf.policy | b64enc }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- list \"octavia-etc\" . | include \"octavia.configmap.etc\" }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/daemonset-health-manager.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"octavia.health_manager.daemonset\" }}\n{{- $daemonset := index . 0 }}\n{{- $configMapName := index . 1 }}\n{{- $serviceAccountName := index . 2 }}\n{{- $envAll := index . 3 }}\n{{- with $envAll }}\n\n{{- $mounts_octavia_health_manager := .Values.pod.mounts.octavia_health_manager.octavia_health_manager }}\n{{- $mounts_octavia_health_manager_init := .Values.pod.mounts.octavia_health_manager.init_container }}\n\n{{- $etcSources := .Values.pod.etcSources.octavia_health_manager }}\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: octavia-health-manager\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"octavia\" \"health_manager\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"octavia\" \"health_manager\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"health_manager\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"octavia\" \"health_manager\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"octavia_health_manager\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ tuple \"octavia_health_manager\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"octavia_health_manager\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      dnsPolicy: ClusterFirstWithHostNet\n      hostNetwork: true\n      hostPID: true\n      nodeSelector:\n        {{ .Values.labels.health_manager.node_selector_key }}: {{ .Values.labels.health_manager.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"health_manager\" $mounts_octavia_health_manager_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: octavia-health-manager-get-port\n{{ tuple $envAll \"octavia_health_manager_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.health_manager | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" ( index $envAll.Values.secrets.identity \"admin\" ) }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n          command:\n            - /tmp/octavia-health-manager-get-port.sh\n          volumeMounts:\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: octavia-bin\n              mountPath: /tmp/octavia-health-manager-get-port.sh\n              subPath: octavia-health-manager-get-port.sh\n              readOnly: true\n        - name: octavia-health-manager-nic-init\n{{ tuple $envAll \"openvswitch_vswitchd\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.health_manager | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"octavia_health_manager\" \"container\" \"octavia_health_manager_nic_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/octavia-health-manager-nic-init.sh\n          volumeMounts:\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: octavia-bin\n              mountPath: /tmp/octavia-health-manager-nic-init.sh\n              subPath: octavia-health-manager-nic-init.sh\n              readOnly: true\n            - name: run\n              mountPath: /run\n      containers:\n        - name: octavia-health-manager\n{{ tuple $envAll \"octavia_health_manager\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.health_manager | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"octavia_health_manager\" \"container\" \"octavia_health_manager\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/octavia-health-manager.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/octavia-health-manager.sh\n                  - stop\n          volumeMounts:\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.octavia.oslo_concurrency.lock_path }}\n            - name: pod-etc-octavia\n              mountPath: /etc/octavia\n            - name: octavia-bin\n              mountPath: /tmp/octavia-health-manager.sh\n              subPath: octavia-health-manager.sh\n              readOnly: true\n            - name: octavia-etc\n              mountPath: /etc/octavia/octavia.conf\n              subPath: octavia.conf\n              readOnly: true\n            - name: octavia-etc-snippets\n              mountPath: /etc/octavia/octavia.conf.d/\n              readOnly: true\n            {{- if .Values.conf.octavia.DEFAULT.log_config_append }}\n            - name: octavia-etc\n              mountPath: {{ .Values.conf.octavia.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.octavia.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n{{ if $mounts_octavia_health_manager.volumeMounts }}{{ toYaml $mounts_octavia_health_manager.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-octavia\n          emptyDir: {}\n        - name: octavia-bin\n          configMap:\n            name: octavia-bin\n            defaultMode: 0555\n        - name: octavia-etc\n          secret:\n            secretName: {{ $configMapName }}\n            defaultMode: 0444\n        - name: octavia-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: pod-shared\n          emptyDir: {}\n        - name: run\n          hostPath:\n            path: /run\n{{ if $mounts_octavia_health_manager.volumes }}{{ toYaml $mounts_octavia_health_manager.volumes | indent 8 }}{{ end }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.daemonset_health_manager }}\n{{- $envAll := . }}\n{{- $daemonset := \"health_manager\" }}\n{{- $configMapName := \"octavia-etc\" }}\n{{- $serviceAccountName := \"octavia-health-manager\" }}\n\n{{- $dependencyOpts := dict \"envAll\" $envAll \"dependencyMixinParam\" $envAll.Values.network.backend \"dependencyKey\" \"health_manager\" -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_resolver\" $dependencyOpts | toString | fromYaml }}\n\n{{ tuple $envAll \"health_manager\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include \"octavia.health_manager.daemonset\" | toString | fromYaml }}\n{{- $configmap_yaml := \"octavia.configmap.etc\" }}\n{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include \"helm-toolkit.utils.daemonset_overrides\" }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/daemonset-worker.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"octavia.worker.daemonset\" }}\n{{- $daemonset := index . 0 }}\n{{- $configMapName := index . 1 }}\n{{- $serviceAccountName := index . 2 }}\n{{- $envAll := index . 3 }}\n{{- with $envAll }}\n\n{{- $mounts_octavia_worker := .Values.pod.mounts.octavia_worker.octavia_worker }}\n{{- $mounts_octavia_worker_init := .Values.pod.mounts.octavia_worker.init_container }}\n\n{{- $etcSources := .Values.pod.etcSources.octavia_worker }}\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: octavia-worker\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"octavia\" \"worker\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"octavia\" \"worker\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"worker\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"octavia\" \"worker\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"octavia_worker\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ tuple \"octavia_worker\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"octavia_worker\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      dnsPolicy: ClusterFirstWithHostNet\n      hostNetwork: true\n    #   hostPID: true\n      nodeSelector:\n        {{ .Values.labels.worker.node_selector_key }}: {{ .Values.labels.worker.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"worker\" $mounts_octavia_worker_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: octavia-worker-get-port\n{{ tuple $envAll \"octavia_worker_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.worker | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" ( index $envAll.Values.secrets.identity \"admin\" ) }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n          command:\n            - /tmp/octavia-worker-get-port.sh\n          volumeMounts:\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: octavia-bin\n              mountPath: /tmp/octavia-worker-get-port.sh\n              subPath: octavia-worker-get-port.sh\n              readOnly: true\n        - name: octavia-worker-nic-init\n{{ tuple $envAll \"openvswitch_vswitchd\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.worker | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"octavia_worker\" \"container\" \"octavia_worker_nic_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/octavia-worker-nic-init.sh\n          volumeMounts:\n            - name: pod-shared\n              mountPath: /tmp/pod-shared\n            - name: octavia-bin\n              mountPath: /tmp/octavia-worker-nic-init.sh\n              subPath: octavia-worker-nic-init.sh\n              readOnly: true\n            - name: run\n              mountPath: /run\n      containers:\n        - name: octavia-worker\n{{ tuple $envAll \"octavia_worker\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.worker | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"octavia_worker\" \"container\" \"octavia_worker\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/octavia-worker.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/octavia-worker.sh\n                  - stop\n          volumeMounts:\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.octavia.oslo_concurrency.lock_path }}\n            - name: pod-etc-octavia\n              mountPath: /etc/octavia\n            - name: octavia-bin\n              mountPath: /tmp/octavia-worker.sh\n              subPath: octavia-worker.sh\n              readOnly: true\n            - name: octavia-etc\n              mountPath: /etc/octavia/octavia.conf\n              subPath: octavia.conf\n              readOnly: true\n            - name: octavia-etc-snippets\n              mountPath: /etc/octavia/octavia.conf.d/\n              readOnly: true\n            {{- if .Values.conf.octavia.DEFAULT.log_config_append }}\n            - name: octavia-etc\n              mountPath: {{ .Values.conf.octavia.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.octavia.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n{{ if $mounts_octavia_worker.volumeMounts }}{{ toYaml $mounts_octavia_worker.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-octavia\n          emptyDir: {}\n        - name: octavia-bin\n          configMap:\n            name: octavia-bin\n            defaultMode: 0555\n        - name: octavia-etc\n          secret:\n            secretName: {{ $configMapName }}\n            defaultMode: 0444\n        - name: octavia-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        - name: pod-shared\n          emptyDir: {}\n        - name: run\n          hostPath:\n            path: /run\n{{ if $mounts_octavia_worker.volumes }}{{ toYaml $mounts_octavia_worker.volumes | indent 8 }}{{ end }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.daemonset_worker }}\n{{- $envAll := . }}\n{{- $daemonset := \"worker\" }}\n{{- $configMapName := \"octavia-etc\" }}\n{{- $serviceAccountName := \"octavia-worker\" }}\n\n{{- $dependencyOpts := dict \"envAll\" $envAll \"dependencyMixinParam\" $envAll.Values.network.backend \"dependencyKey\" \"worker\" -}}\n{{- $_ := include \"helm-toolkit.utils.dependency_resolver\" $dependencyOpts | toString | fromYaml }}\n\n{{ tuple $envAll \"worker\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include \"octavia.worker.daemonset\" | toString | fromYaml }}\n{{- $configmap_yaml := \"octavia.configmap.etc\" }}\n{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include \"helm-toolkit.utils.daemonset_overrides\" }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/deployment-api.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n\n{{- define \"octaviaApiLivenessProbeTemplate\" }}\nhttpGet:\n  scheme: {{ tuple \"load_balancer\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n  path: {{ tuple \"load_balancer\" \"healthcheck\" \"internal\" . | include \"helm-toolkit.endpoints.keystone_endpoint_path_lookup\" }}\n  port: {{ (splitList \":\" (index (index .Values.conf.octavia_api_uwsgi \"uwsgi\") \"http-socket\") | last) }}\n{{- end }}\n\n{{- define \"octaviaApiReadinessProbeTemplate\" }}\nhttpGet:\n  scheme: {{ tuple \"load_balancer\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n  path: {{ tuple \"load_balancer\" \"healthcheck\" \"internal\" . | include \"helm-toolkit.endpoints.keystone_endpoint_path_lookup\" }}\n  port: {{ (splitList \":\" (index (index .Values.conf.octavia_api_uwsgi \"uwsgi\") \"http-socket\") | last) }}\n{{- end }}\n\n{{- if .Values.manifests.deployment_api }}\n{{- $envAll := . }}\n\n{{- $mounts_octavia_api := .Values.pod.mounts.octavia_api.octavia_api }}\n{{- $mounts_octavia_api_init := .Values.pod.mounts.octavia_api.init_container }}\n\n{{- $serviceAccountName := \"octavia-api\" }}\n{{ tuple $envAll \"api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.octavia_api }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: octavia-api\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"octavia\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"octavia\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"octavia\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"octavia_api\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ tuple \"octavia_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"octavia_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"octavia\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.api.node_selector_key }}: {{ .Values.labels.api.node_selector_value }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.api.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"api\" $mounts_octavia_api_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: octavia-api\n{{ tuple $envAll \"octavia_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"octavia_api\" \"container\" \"octavia_api\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/octavia-api.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/octavia-api.sh\n                  - stop\n          ports:\n            - name: o-api\n              containerPort: {{ (splitList \":\" (index (index .Values.conf.octavia_api_uwsgi \"uwsgi\") \"http-socket\") | last) }}\n{{ dict \"envAll\" $envAll \"component\" \"api\" \"container\" \"octavia-api\" \"type\" \"readiness\" \"probeTemplate\" (include \"octaviaApiReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"api\" \"container\" \"octavia-api\" \"type\" \"liveness\" \"probeTemplate\" (include \"octaviaApiLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          volumeMounts:\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.octavia.oslo_concurrency.lock_path }}\n            - name: pod-etc-octavia\n              mountPath: /etc/octavia\n            - name: octavia-bin\n              mountPath: /tmp/octavia-api.sh\n              subPath: octavia-api.sh\n              readOnly: true\n            - name: octavia-etc\n              mountPath: /etc/octavia/octavia.conf\n              subPath: octavia.conf\n              readOnly: true\n            - name: octavia-etc-snippets\n              mountPath: /etc/octavia/octavia.conf.d/\n              readOnly: true\n            - name: octavia-etc\n              mountPath: /etc/octavia/octavia-api-uwsgi.ini\n              subPath: octavia-api-uwsgi.ini\n              readOnly: true\n            - name: octavia-etc\n              mountPath: /etc/octavia/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            {{- if .Values.conf.octavia.DEFAULT.log_config_append }}\n            - name: octavia-etc\n              mountPath: {{ .Values.conf.octavia.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.octavia.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n{{ if $mounts_octavia_api.volumeMounts }}{{ toYaml $mounts_octavia_api.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-octavia\n          emptyDir: {}\n        - name: octavia-bin\n          configMap:\n            name: octavia-bin\n            defaultMode: 0555\n        - name: octavia-etc\n          secret:\n            secretName: octavia-etc\n            defaultMode: 0444\n        - name: octavia-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{ if $mounts_octavia_api.volumes }}{{ toYaml $mounts_octavia_api.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/deployment-driver-agent.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n\n{{- define \"octaviaDriverAgentLivenessProbeTemplate\" }}\n{{- end }}\n\n{{- define \"octaviaDriverAgentReadinessProbeTemplate\" }}\n{{- end }}\n\n{{- if .Values.manifests.deployment_driver_agent }}\n{{- $envAll := . }}\n\n{{- $mounts_octavia_driver_agent := .Values.pod.mounts.octavia_driver_agent.octavia_driver_agent }}\n{{- $mounts_octavia_driver_agent_init := .Values.pod.mounts.octavia_driver_agent.init_container }}\n\n{{- $serviceAccountName := \"octavia-driver-agent\" }}\n{{ tuple $envAll \"driver_agent\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.octavia_driver_agent }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: octavia-driver-agent\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"octavia\" \"driver_agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.driver_agent }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"octavia\" \"driver_agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"octavia\" \"driver_agent\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"octavia_driver_agent\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ tuple \"octavia_driver_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"octavia_driver_agent\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"octavia\" \"driver_agent\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.driver_agent.node_selector_key }}: {{ .Values.labels.driver_agent.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"driver_agent\" $mounts_octavia_driver_agent_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: octavia-driver-agent\n{{ tuple $envAll \"octavia_driver_agent\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.driver_agent | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"octavia_driver_agent\" \"container\" \"octavia_driver_agent\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/octavia-driver-agent.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/octavia-driver-agent.sh\n                  - stop\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.octavia.oslo_concurrency.lock_path }}\n            - name: pod-etc-octavia\n              mountPath: /etc/octavia\n            - name: octavia-bin\n              mountPath: /tmp/octavia-driver-agent.sh\n              subPath: octavia-driver-agent.sh\n              readOnly: true\n            - name: octavia-etc\n              mountPath: /etc/octavia/octavia.conf\n              subPath: octavia.conf\n              readOnly: true\n            - name: octavia-etc-snippets\n              mountPath: /etc/octavia/octavia.conf.d/\n              readOnly: true\n            {{- if .Values.conf.octavia.DEFAULT.log_config_append }}\n            - name: octavia-etc\n              mountPath: {{ .Values.conf.octavia.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.octavia.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: octavia-driver-agents\n              mountPath: /var/run/octavia\n            - name: run-openvswitch\n              mountPath: /var/run/ovn\n{{ if $mounts_octavia_driver_agent.volumeMounts }}{{ toYaml $mounts_octavia_driver_agent.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-octavia\n          emptyDir: {}\n        - name: run-openvswitch\n          hostPath:\n            path: /run/openvswitch\n            type: DirectoryOrCreate\n        - name: octavia-driver-agents\n          emptyDir: {}\n        - name: octavia-bin\n          configMap:\n            name: octavia-bin\n            defaultMode: 0555\n        - name: octavia-etc\n          secret:\n            secretName: octavia-etc\n            defaultMode: 0444\n        - name: octavia-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{ if $mounts_octavia_driver_agent.volumes }}{{ toYaml $mounts_octavia_driver_agent.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/deployment-housekeeping.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_housekeeping }}\n{{- $envAll := . }}\n\n{{- $mounts_octavia_housekeeping := .Values.pod.mounts.octavia_housekeeping.octavia_housekeeping }}\n{{- $mounts_octavia_housekeeping_init := .Values.pod.mounts.octavia_housekeeping.init_container }}\n\n{{- $serviceAccountName := \"octavia-housekeeping\" }}\n{{ tuple $envAll \"housekeeping\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $etcSources := .Values.pod.etcSources.octavia_housekeeping }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: octavia-housekeeping\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"octavia\" \"housekeeping\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.housekeeping }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"octavia\" \"housekeeping\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"octavia\" \"housekeeping\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"octavia_housekeeping\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n{{ tuple \"octavia_housekeeping\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"octavia_housekeeping\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      dnsPolicy: ClusterFirstWithHostNet\n      hostNetwork: true\n      affinity:\n{{ tuple $envAll \"octavia\" \"housekeeping\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.housekeeping.node_selector_key }}: {{ .Values.labels.housekeeping.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"housekeeping\" $mounts_octavia_housekeeping_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: octavia-housekeeping\n{{ tuple $envAll \"octavia_housekeeping\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.housekeeping | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"octavia_housekeeping\" \"container\" \"octavia_housekeeping\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/octavia-housekeeping.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/octavia-housekeeping.sh\n                  - stop\n          volumeMounts:\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.octavia.oslo_concurrency.lock_path }}\n            - name: pod-etc-octavia\n              mountPath: /etc/octavia\n            - name: octavia-bin\n              mountPath: /tmp/octavia-housekeeping.sh\n              subPath: octavia-housekeeping.sh\n              readOnly: true\n            - name: octavia-etc\n              mountPath: /etc/octavia/octavia.conf\n              subPath: octavia.conf\n              readOnly: true\n            - name: octavia-etc-snippets\n              mountPath: /etc/octavia/octavia.conf.d/\n              readOnly: true\n            {{- if .Values.conf.octavia.DEFAULT.log_config_append }}\n            - name: octavia-etc\n              mountPath: {{ .Values.conf.octavia.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.octavia.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n{{ if $mounts_octavia_housekeeping.volumeMounts }}{{ toYaml $mounts_octavia_housekeeping.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-octavia\n          emptyDir: {}\n        - name: octavia-bin\n          configMap:\n            name: octavia-bin\n            defaultMode: 0555\n        - name: octavia-etc\n          secret:\n            secretName: octavia-etc\n            defaultMode: 0444\n        - name: octavia-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n{{ if $mounts_octavia_housekeeping.volumes }}{{ toYaml $mounts_octavia_housekeeping.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "octavia/templates/ingress-api.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_api .Values.network.api.ingress.public }}\n{{- $ingressOpts := dict \"envAll\" . \"backendServiceType\" \"load_balancer\" \"backendPort\" \"o-api\" -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n\n"
  },
  {
    "path": "octavia/templates/job-bootstrap.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.bootstrap\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"5\"\n{{- end }}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $bootstrapJob := dict \"envAll\" . \"serviceName\" \"octavia\" \"keystoneUser\" .Values.bootstrap.ks_user \"logConfigFile\" .Values.conf.octavia.DEFAULT.log_config_append \"jobAnnotations\" (include \"metadata.annotations.job.bootstrap\" . | fromYaml) -}}\n{{ $bootstrapJob | include \"helm-toolkit.manifests.job_bootstrap\" }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/job-db-drop.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $serviceName := \"octavia\" -}}\n{{- $dbSvc := dict \"adminSecret\" .Values.secrets.oslo_db.admin \"configFile\" (printf \"/etc/%s/%s.conf\" $serviceName $serviceName ) \"logConfigFile\" (printf \"/etc/%s/logging.conf\" $serviceName ) \"configDbSection\" \"database\" \"configDbKey\" \"connection\" -}}\n{{- $dbPersist := dict \"adminSecret\" .Values.secrets.oslo_db.admin \"configFile\" (printf \"/etc/%s/%s.conf\" $serviceName $serviceName ) \"logConfigFile\" (printf \"/etc/%s/logging.conf\" $serviceName ) \"configDbSection\" \"task_flow\" \"configDbKey\" \"persistence_connection\" -}}\n{{- $dbsToDrop := list $dbSvc $dbPersist }}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" $serviceName \"dbsToDrop\" $dbsToDrop -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/job-db-init.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $serviceName := \"octavia\" -}}\n{{- $dbSvc := dict \"adminSecret\" .Values.secrets.oslo_db.admin \"configFile\" (printf \"/etc/%s/%s.conf\" $serviceName $serviceName ) \"logConfigFile\" (printf \"/etc/%s/logging.conf\" $serviceName ) \"configDbSection\" \"database\" \"configDbKey\" \"connection\" -}}\n{{- $dbPersist := dict \"adminSecret\" .Values.secrets.oslo_db.admin \"configFile\" (printf \"/etc/%s/%s.conf\" $serviceName $serviceName ) \"logConfigFile\" (printf \"/etc/%s/logging.conf\" $serviceName ) \"configDbSection\" \"task_flow\" \"configDbKey\" \"persistence_connection\" -}}\n{{- $dbsToInit := list $dbSvc $dbPersist }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" $serviceName \"dbsToInit\" $dbsToInit -}}\n{{- $_ := set $dbInitJob \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) }}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/job-db-sync.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"octavia\" \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.repo_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\n{{- end }}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"octavia\" \"jobAnnotations\" (include \"metadata.annotations.job.repo_sync\" . | fromYaml) -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/job-ks-endpoint.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_endpoints\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-2\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"octavia\" \"serviceTypes\" ( tuple \"load-balancer\" ) \"jobAnnotations\" (include \"metadata.annotations.job.ks_endpoints\" . | fromYaml) -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/job-ks-service.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_service\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"octavia\" \"serviceTypes\" ( tuple \"load-balancer\" ) \"jobAnnotations\" (include \"metadata.annotations.job.ks_service\" . | fromYaml) -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/job-ks-user.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"octavia\" \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/job-rabbit-init.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.rabbit_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rmqUserJob := dict \"envAll\" . \"serviceName\" \"octavia\" \"jobAnnotations\" (include \"metadata.annotations.job.rabbit_init\" . | fromYaml) -}}\n{{ $rmqUserJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/network_policy.yaml",
    "content": "# Copyright 2019 Samsung Electronics Co., Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"octavia\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "octavia/templates/pdb-api.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_api }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: octavia-api\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.api.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"octavia\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/secret-db-persistence.yaml",
    "content": "{{/*\nCopyright 2024 Vexxhost Inc.\nSPDX-License-Identifier: APACHE-2.0\n*/}}\n\n{{- if .Values.manifests.secret_db_persistence }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"octavia\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db_persistence $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n  DB_CONNECTION: {{ tuple \"oslo_db_persistence\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/secret-db.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"octavia\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  DB_CONNECTION: {{ tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{- include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"load_balancer\" ) }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/secret-keystone.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"octavia\" \"test\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"octavia\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_messaging\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass \"http\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/service-api.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_api }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"load_balancer\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: o-api\n      port: {{ tuple \"load_balancer\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.api.node_port.enabled }}\n      nodePort: {{ .Values.network.api.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"octavia\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.api.node_port.enabled }}\n  type: NodePort\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "octavia/templates/service-ingress-api.yaml",
    "content": "{{/*\nCopyright 2019 Samsung Electronics Co., Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_api .Values.network.api.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"load_balancer\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "octavia/values.yaml",
    "content": "# Copyright 2019 Samsung Electronics Co., Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for octavia.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nrelease_group: null\n\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  driver_agent:\n    node_selector_key: openstack-network-node\n    node_selector_value: enabled\n  worker:\n    node_selector_key: openstack-network-node\n    node_selector_value: enabled\n  housekeeping:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  health_manager:\n    node_selector_key: openstack-network-node\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  tags:\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    octavia_db_sync: quay.io/airshipit/octavia:2025.1-ubuntu_jammy\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: docker.io/docker:17.07.0\n    octavia_api: quay.io/airshipit/octavia:2025.1-ubuntu_jammy\n    octavia_driver_agent: quay.io/airshipit/octavia:2025.1-ubuntu_jammy\n    octavia_worker: quay.io/airshipit/octavia:2025.1-ubuntu_jammy\n    octavia_worker_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    octavia_housekeeping: quay.io/airshipit/octavia:2025.1-ubuntu_jammy\n    octavia_health_manager: quay.io/airshipit/octavia:2025.1-ubuntu_jammy\n    octavia_health_manager_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    openvswitch_vswitchd: quay.io/airshipit/openvswitch:latest-ubuntu_jammy\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nbootstrap:\n  enabled: true\n  ks_user: admin\n  script: |\n    openstack role create --or-show load-balancer_admin\n    openstack role create --or-show load-balancer_observer\n    openstack role create --or-show load-balancer_global_observer\n    openstack role create --or-show load-balancer_quota_admin\n    openstack role create --or-show load-balancer_member\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30826\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - heat-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    api:\n      jobs:\n        - octavia-db-sync\n        - octavia-ks-user\n        - octavia-ks-endpoints\n        - octavia-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: oslo_db_persistence\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: oslo_cache\n        - endpoint: internal\n          service: network\n    driver_agent:\n      jobs:\n        - octavia-db-sync\n        - octavia-ks-user\n        - octavia-ks-endpoints\n        - octavia-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: oslo_db_persistence\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: oslo_cache\n        - endpoint: internal\n          service: network\n        - endpoint: internal\n          service: load_balancer\n    worker:\n      jobs:\n        - octavia-db-sync\n        - octavia-ks-user\n        - octavia-ks-endpoints\n        - octavia-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: oslo_db_persistence\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: oslo_cache\n        - endpoint: internal\n          service: network\n        - endpoint: internal\n          service: load_balancer\n    housekeeping:\n      jobs:\n        - octavia-db-sync\n        - octavia-ks-user\n        - octavia-ks-endpoints\n        - octavia-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: oslo_db_persistence\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: oslo_cache\n        - endpoint: internal\n          service: network\n        - endpoint: internal\n          service: load_balancer\n    health_manager:\n      jobs:\n        - octavia-db-sync\n        - octavia-ks-user\n        - octavia-ks-endpoints\n        - octavia-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: oslo_db_persistence\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_messaging\n        - endpoint: internal\n          service: oslo_cache\n        - endpoint: internal\n          service: network\n        - endpoint: internal\n          service: load_balancer\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: oslo_db_persistence\n    db_sync:\n      jobs:\n        - octavia-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: oslo_db_persistence\n    ks_endpoints:\n      jobs:\n        - octavia-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    rabbit_init:\n      services:\n      - endpoint: internal\n        service: oslo_messaging\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\nconf:\n  octavia:\n    DEFAULT:\n      log_config_append: /etc/octavia/logging.conf\n    ovn:\n      ovn_nb_connection: unix:/var/run/ovn/ovnnb_db.sock\n      ovn_sb_connection: unix:/var/run/ovn/ovnsb_db.sock\n    api_settings:\n      api_handler: queue_producer\n      bind_host: 0.0.0.0\n      healthcheck_enabled: true\n    database:\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    health_manager:\n      bind_port: 5555\n      bind_ip: 0.0.0.0\n      controller_ip_port_list: null\n      heartbeat_key: insecure\n    keystone_authtoken:\n      auth_type: password\n      auth_version: v3\n      memcache_security_strategy: ENCRYPT\n      service_type: load-balancer\n    certificates:\n      ca_private_key_passphrase: not-secure-passphrase\n      ca_private_key: /etc/octavia/certs/private/server_ca.key.pem\n      ca_certificate: /etc/octavia/certs/server_ca.cert.pem\n    haproxy_amphora:\n      server_ca: /etc/octavia/certs/server_ca-chain.cert.pem\n      client_cert: /etc/octavia/certs/private/client.cert-and-key.pem\n      base_path: /var/lib/octavia\n      base_cert_dir: /var/lib/octavia/certs\n    controller_worker:\n      amp_image_owner_id: null\n      amp_secgroup_list: null\n      amp_flavor_id: null\n      amp_boot_network_list: null\n      amp_ssh_key_name: octavia_ssh_key\n      amp_image_tag: amphora\n      network_driver: allowed_address_pairs_driver\n      compute_driver: compute_nova_driver\n      amphora_driver: amphora_haproxy_rest_driver\n      workers: 2\n      amp_active_retries: 100\n      amp_active_wait_sec: 2\n      loadbalancer_topology: SINGLE\n      client_ca: /etc/octavia/certs/client_ca.cert.pem\n    oslo_messaging:\n      topic: octavia_prov\n      rpc_thread_pool_size: 2\n    oslo_messaging_notifications:\n      driver: messagingv2\n    oslo_policy:\n      policy_file: /etc/octavia/policy.yaml\n    oslo_concurrency:\n      lock_path: /var/lock\n    house_keeping:\n      load_balancer_expiry_age: 3600\n      amphora_expiry_age: 3600\n    service_auth:\n      auth_type: password\n      cafile: \"\"\n      auth_version: v3\n      memcache_security_strategy: ENCRYPT\n    task_flow:\n      jobboard_enabled: true\n      # -- Taskflow persistence connection URI. When empty the URI is\n      ## auto-generated from endpoints.oslo_db_persistence. Set to null\n      ## to disable auto-generation.\n      persistence_connection: \"\"\n  policy: {}\n  logging:\n    loggers:\n      keys:\n        - root\n        - octavia\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: INFO\n      handlers: stdout\n    logger_octavia:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: octavia\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n    formatter_default:\n      format: \"%(message)s\"\n  rabbitmq:\n    # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones\n    policies:\n      - vhost: \"octavia\"\n        name: \"ha_ttl_octavia\"\n        definition:\n          # mirror messges to other nodes in rmq cluster\n          ha-mode: \"all\"\n          ha-sync-mode: \"automatic\"\n          # 70s\n          message-ttl: 70000\n        priority: 0\n        apply-to: all\n        pattern: '(notifications)\\.'\n  octavia_api_uwsgi:\n    uwsgi:\n      processes: 4\n      http-socket: \"0.0.0.0:9876\"\n      add-header: \"Connection: close\"\n      buffer-size: 65535\n      die-on-term: true\n      enable-threads: true\n      exit-on-reload: false\n      hook-master-start: unix_signal:15 gracefully_kill_them_all\n      lazy-apps: true\n      log-x-forwarded-for: true\n      master: true\n      procname-prefix-spaced: \"octavia-api:\"\n      route-user-agent: '^kube-probe.* donotlog:'\n      thunder-lock: true\n      worker-reload-mercy: 80\n      module: \"octavia.wsgi.api:application\"\n      stats: 0.0.0.0:1717\n      stats-http: true\n\nsecrets:\n  identity:\n    admin: octavia-keystone-admin\n    octavia: octavia-keystone-user\n    test: octavia-keystone-test\n  oslo_db:\n    admin: octavia-db-admin\n    octavia: octavia-db-user\n  oslo_db_persistence:\n    admin: octavia-persistence-db-admin\n    octavia: octavia-persistence-db-user\n  oslo_messaging:\n    admin: octavia-rabbitmq-admin\n    octavia: octavia-rabbitmq-user\n  tls:\n    load_balancer:\n      api:\n        public: octavia-tls-public\n  oci_image_registry:\n    octavia: octavia-oci-image-registry\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      octavia:\n        username: octavia\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      octavia:\n        role: admin\n        region_name: RegionOne\n        username: octavia\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      test:\n        role: admin\n        region_name: RegionOne\n        username: test\n        password: password\n        project_name: test\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 80\n        internal: 5000\n  load_balancer:\n    name: octavia\n    hosts:\n      default: octavia-api\n      public: octavia\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n      healthcheck: /healthcheck\n    scheme:\n      default: http\n    port:\n      api:\n        default: 9876\n        public: 80\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n      octavia:\n        username: octavia\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /octavia\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_db_persistence:\n    auth:\n      admin:\n        username: root\n        password: password\n      octavia:\n        username: octavia\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /octavia_persistence\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_cache:\n    auth:\n      # NOTE(portdirect): this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  oslo_messaging:\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n      octavia:\n        username: octavia\n        password: password\n    statefulset:\n      replicas: 2\n      name: rabbitmq-rabbitmq\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /octavia\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n  network:\n    name: neutron\n    hosts:\n      default: neutron-server\n      public: neutron\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 9696\n        public: 80\n  valkey:\n    name: valkey\n    hosts:\n      default: valkey\n    # NOTE(rlin): we should only provide password when not using CA cert.\n    password: null\n    host_fqdn_override:\n      default: null\n    port:\n      server:\n        default: 6379\n        sentinel: 26379\n\npod:\n  probes:\n    api:\n      octavia-api:\n        liveness:\n          enabled: True\n          params: {}\n        readiness:\n          enabled: True\n          params: {}\n  security_context:\n    octavia_api:\n      container:\n        octavia_api:\n          capabilities:\n            add:\n              - SYS_NICE\n    octavia_driver_agent:\n      container:\n        octavia_driver_agent:\n          capabilities:\n            add:\n              - SYS_NICE\n          runAsUser: 42424\n    octavia_worker:\n      container:\n        octavia_worker_nic_init:\n          runAsUser: 0\n          capabilities:\n            add:\n              - NET_ADMIN\n              - NET_RAW\n              - NET_BIND_SERVICE\n        octavia_worker:\n          runAsUser: 0\n          capabilities:\n            add:\n              - NET_ADMIN\n    octavia_housekeeping:\n      container:\n        octavia_housekeeping:\n          runAsUser: 42424\n    octavia_health_manager:\n      container:\n        octavia_health_manager_nic_init:\n          runAsUser: 0\n          capabilities:\n            add:\n              - NET_ADMIN\n              - NET_RAW\n              - NET_BIND_SERVICE\n        octavia_health_manager:\n          runAsUser: 0\n          capabilities:\n            add:\n              - NET_ADMIN\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n  mounts:\n    octavia_api:\n      init_container: null\n      octavia_api:\n        volumeMounts:\n        volumes:\n    octavia_driver_agent:\n      init_container: null\n      octavia_driver_agent:\n        volumeMounts:\n        volumes:\n    octavia_worker:\n      init_container: null\n      octavia_worker:\n        volumeMounts:\n        volumes:\n    octavia_housekeeping:\n      init_container: null\n      octavia_housekeeping:\n        volumeMounts:\n        volumes:\n    octavia_health_manager:\n      init_container: null\n      octavia_health_manager:\n        volumeMounts:\n        volumes:\n    octavia_bootstrap:\n      init_container: null\n      octavia_bootstrap:\n        volumeMounts:\n        volumes:\n    octavia_db_sync:\n      init_container: null\n      octavia_db_sync:\n        volumeMounts:\n        volumes:\n  # -- This allows users to add Kubernetes Projected Volumes to be mounted at /etc/octavia/octavia.conf.d/\n  ## This is a list of projected volume source objects for each deployment/statefulset/daemonset/cronjob\n  ## https://kubernetes.io/docs/concepts/storage/projected-volumes/\n  etcSources:\n    octavia_api: []\n    octavia_driver_agent: []\n    octavia_worker: []\n    octavia_housekeeping: []\n    octavia_health_manager: []\n    octavia_db_sync: []\n  replicas:\n    api: 1\n    driver_agent: 1\n    housekeeping: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        health_manager:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n    disruption_budget:\n      api:\n        min_available: 0\n    termination_grace_period:\n      api:\n        timeout: 30\n  resources:\n    enabled: false\n    api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    driver_agent:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    worker:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    housekeeping:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    health_manager:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rabbit_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nnetwork_policy:\n  octavia:\n    ingress:\n      - {}\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  daemonset_health_manager: true\n  daemonset_worker: true\n  deployment_api: true\n  deployment_driver_agent: true\n  deployment_housekeeping: true\n  ingress_api: true\n  job_bootstrap: true\n  job_db_init: true\n  job_db_sync: true\n  job_db_drop: false\n  job_image_repo_sync: true\n  job_rabbit_init: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  pdb_api: true\n  pod_rally_test: false\n  network_policy: false\n  secret_credential_keys: true\n  secret_db: true\n  secret_db_persistence: true\n  secret_ingress_tls: true\n  secret_keystone: true\n  secret_rabbitmq: true\n  secret_registry: true\n  service_ingress_api: true\n  service_api: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "openvswitch/.helmignore",
    "content": "values_overrides\n"
  },
  {
    "path": "openvswitch/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm OpenVSwitch\nname: openvswitch\nversion: 2025.2.0\nhome: http://openvswitch.org\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Neutron/OpenStack_Project_Neutron_vertical.png\nsources:\n  - https://github.com/openvswitch/ovs\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "openvswitch/templates/bin/_openvswitch-db-server.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nOVS_DB=/run/openvswitch/conf.db\nOVS_SCHEMA=/usr/share/openvswitch/vswitch.ovsschema\nOVS_PID=/run/openvswitch/ovsdb-server.pid\nOVS_SOCKET=/run/openvswitch/db.sock\n\nfunction start () {\n  mkdir -p \"$(dirname ${OVS_DB})\"\n  if [[ ! -e \"${OVS_DB}\" ]]; then\n    ovsdb-tool create \"${OVS_DB}\"\n  fi\n\n  if [[ \"$(ovsdb-tool needs-conversion ${OVS_DB} ${OVS_SCHEMA})\" == 'yes' ]]; then\n      ovsdb-tool convert ${OVS_DB} ${OVS_SCHEMA}\n  fi\n\n  umask 000\n  exec /usr/sbin/ovsdb-server ${OVS_DB} \\\n          -vconsole:emer \\\n          -vconsole:err \\\n          -vconsole:info \\\n          --pidfile=${OVS_PID} \\\n          --remote=punix:${OVS_SOCKET} \\\n          --remote=db:Open_vSwitch,Open_vSwitch,manager_options \\\n{{- if .Values.conf.openvswitch_db_server.ptcp_port }}\n          --remote=ptcp:{{ .Values.conf.openvswitch_db_server.ptcp_port }} \\\n{{- end }}\n          --private-key=db:Open_vSwitch,SSL,private_key \\\n          --certificate=db:Open_vSwitch,SSL,certificate \\\n          --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert\n}\n\nfunction stop () {\n  PID=$(cat $OVS_PID)\n  ovs-appctl -T1 -t /run/openvswitch/ovsdb-server.${PID}.ctl exit\n}\n\n$COMMAND\n"
  },
  {
    "path": "openvswitch/templates/bin/_openvswitch-vswitchd-init-modules.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nchroot /mnt/host-rootfs modprobe openvswitch\nchroot /mnt/host-rootfs modprobe gre\nchroot /mnt/host-rootfs modprobe vxlan\n\n{{- if .Values.conf.ovs_dpdk.enabled }}\n{{- if hasKey .Values.conf.ovs_dpdk \"driver\"}}\nchroot /mnt/host-rootfs modprobe {{ .Values.conf.ovs_dpdk.driver | quote }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "openvswitch/templates/bin/_openvswitch-vswitchd.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nOVS_SOCKET=/run/openvswitch/db.sock\nOVS_PID=/run/openvswitch/ovs-vswitchd.pid\n\n# Create vhostuser directory and grant nova user (default UID 42424) access\n# permissions.\n{{- if .Values.conf.ovs_dpdk.enabled }}\nmkdir -p /run/openvswitch/{{ .Values.conf.ovs_dpdk.vhostuser_socket_dir }}\nchown {{ .Values.pod.user.nova.uid }}.{{ .Values.pod.user.nova.uid }} /run/openvswitch/{{ .Values.conf.ovs_dpdk.vhostuser_socket_dir }}\nchown {{ .Values.pod.user.nova.uid }}.{{ .Values.pod.user.nova.uid }} {{ .Values.conf.ovs_dpdk.hugepages_mountpath }}\n{{- end }}\n\nfunction start () {\n  t=0\n  while [ ! -e \"${OVS_SOCKET}\" ] ; do\n      echo \"waiting for ovs socket $sock\"\n      sleep 1\n      t=$(($t+1))\n      if [ $t -ge 10 ] ; then\n          echo \"no ovs socket, giving up\"\n          exit 1\n      fi\n  done\n\n  ovs-vsctl --db=unix:${OVS_SOCKET} --no-wait show\n{{- if .Values.conf.ovs_hw_offload.enabled }}\n  ovs-vsctl --db=unix:${OVS_SOCKET} --no-wait set Open_vSwitch . other_config:hw-offload={{ .Values.conf.ovs_hw_offload.enabled }}\n{{- end }}\n{{- if .Values.conf.ovs_other_config.handler_threads }}\n  ovs-vsctl --db=unix:${OVS_SOCKET} --no-wait set Open_vSwitch . other_config:n-handler-threads={{ .Values.conf.ovs_other_config.handler_threads }}\n{{- end }}\n{{- if .Values.conf.ovs_other_config.revalidator_threads }}\n  ovs-vsctl --db=unix:${OVS_SOCKET} --no-wait set Open_vSwitch . other_config:n-revalidator-threads={{ .Values.conf.ovs_other_config.revalidator_threads }}\n{{- end }}\n\n{{- if .Values.conf.ovs_dpdk.enabled }}\n    ovs-vsctl --db=unix:${OVS_SOCKET} --no-wait set Open_vSwitch . other_config:dpdk-hugepage-dir={{ .Values.conf.ovs_dpdk.hugepages_mountpath | quote }}\n    ovs-vsctl --db=unix:${OVS_SOCKET} --no-wait set Open_vSwitch . other_config:dpdk-socket-mem={{ .Values.conf.ovs_dpdk.socket_memory | quote }}\n\n{{- if .Values.conf.ovs_dpdk.mem_channels }}\n    ovs-vsctl --db=unix:${OVS_SOCKET} --no-wait set Open_vSwitch . other_config:dpdk-mem-channels={{ .Values.conf.ovs_dpdk.mem_channels | quote }}\n{{- end }}\n\n{{- if hasKey .Values.conf.ovs_dpdk \"pmd_cpu_mask\" }}\n    ovs-vsctl --db=unix:${OVS_SOCKET} --no-wait set Open_vSwitch . other_config:pmd-cpu-mask={{ .Values.conf.ovs_dpdk.pmd_cpu_mask | quote }}\n    PMD_CPU_MASK={{ .Values.conf.ovs_dpdk.pmd_cpu_mask | quote }}\n{{- end }}\n\n{{- if hasKey .Values.conf.ovs_dpdk \"lcore_mask\" }}\n    ovs-vsctl --db=unix:${OVS_SOCKET} --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask={{ .Values.conf.ovs_dpdk.lcore_mask | quote }}\n    LCORE_MASK={{ .Values.conf.ovs_dpdk.lcore_mask | quote }}\n{{- end }}\n\n{{- if hasKey .Values.conf.ovs_dpdk \"vhost_iommu_support\" }}\n    ovs-vsctl --db=unix:${OVS_SOCKET} --no-wait set Open_vSwitch . other_config:vhost-iommu-support={{ .Values.conf.ovs_dpdk.vhost_iommu_support }}\n{{- end }}\n\n    ovs-vsctl --db=unix:${OVS_SOCKET} --no-wait set Open_vSwitch . other_config:vhost-sock-dir={{ .Values.conf.ovs_dpdk.vhostuser_socket_dir | quote }}\n    ovs-vsctl --db=unix:${OVS_SOCKET} --no-wait set Open_vSwitch . other_config:dpdk-init=true\n\n  # No need to create the cgroup if lcore_mask or pmd_cpu_mask is not set.\n  if [[ -n ${PMD_CPU_MASK} || -n ${LCORE_MASK} ]]; then\n      if [ \"$(stat -fc %T /sys/fs/cgroup/)\" = \"cgroup2fs\" ]; then\n          # Setup Cgroups to use when breaking out of Kubernetes defined groups\n          mkdir -p /sys/fs/cgroup/osh-openvswitch\n          target_mems=\"/sys/fs/cgroup/osh-openvswitch/cpuset.mems\"\n          target_cpus=\"/sys/fs/cgroup/osh-openvswitch/cpuset.cpus\"\n          touch $target_mems\n          touch $target_cpus\n\n          # Ensure the write target for the for cpuset.mem for the pod exists\n          if [[ -f \"$target_mems\" && -f \"$target_cpus\" ]]; then\n            # Write cpuset.mem and cpuset.cpus for new cgroup and add current task to new cgroup\n{{- if hasKey .Values.conf.ovs_dpdk \"cgroup_cpuset_mems\" }}\n            echo \"{{ .Values.conf.ovs_dpdk.cgroup_cpuset_mems }}\" > \"$target_mems\"\n{{- else }}\n            cat /sys/fs/cgroup/cpuset.mems.effective > \"$target_mems\"\n{{- end }}\n{{- if hasKey .Values.conf.ovs_dpdk \"cgroup_cpuset_cpus\" }}\n            echo \"{{ .Values.conf.ovs_dpdk.cgroup_cpuset_cpus }}\" > \"$target_cpus\"\n{{- else }}\n            cat /sys/fs/cgroup/cpuset.cpus.effective > \"$target_cpus\"\n{{- end }}\n            echo $$ > /sys/fs/cgroup/osh-openvswitch/cgroup.procs\n          else\n            echo \"ERROR: Could not find write target for either cpuset.mems: $target_mems or cpuset.cpus: $target_cpus\"\n          fi\n      else\n          # Setup Cgroups to use when breaking out of Kubernetes defined groups\n          mkdir -p /sys/fs/cgroup/cpuset/osh-openvswitch\n          target_mems=\"/sys/fs/cgroup/cpuset/osh-openvswitch/cpuset.mems\"\n          target_cpus=\"/sys/fs/cgroup/cpuset/osh-openvswitch/cpuset.cpus\"\n\n          # Ensure the write target for the for cpuset.mem for the pod exists\n          if [[ -f \"$target_mems\" && -f \"$target_cpus\" ]]; then\n            # Write cpuset.mem and cpuset.cpus for new cgroup and add current task to new cgroup\n{{- if hasKey .Values.conf.ovs_dpdk \"cgroup_cpuset_mems\" }}\n            echo \"{{ .Values.conf.ovs_dpdk.cgroup_cpuset_mems }}\" > \"$target_mems\"\n{{- else }}\n            cat /sys/fs/cgroup/cpuset/cpuset.mems > \"$target_mems\"\n{{- end }}\n{{- if hasKey .Values.conf.ovs_dpdk \"cgroup_cpuset_cpus\" }}\n            echo \"{{ .Values.conf.ovs_dpdk.cgroup_cpuset_cpus }}\" > \"$target_cpus\"\n{{- else }}\n            cat /sys/fs/cgroup/cpuset/cpuset.cpus > \"$target_cpus\"\n{{- end }}\n            echo $$ > /sys/fs/cgroup/cpuset/osh-openvswitch/tasks\n          else\n            echo \"ERROR: Could not find write target for either cpuset.mems: $target_mems or cpuset.cpus: $target_cpus\"\n          fi\n      fi\n  fi\n{{- end }}\n\n  exec /usr/sbin/ovs-vswitchd unix:${OVS_SOCKET} \\\n          -vconsole:emer \\\n          -vconsole:err \\\n          -vconsole:info \\\n          --pidfile=${OVS_PID} \\\n          {{- if .Values.conf.ovs_user_name }}\n          --user=\"{{ .Values.conf.ovs_user_name }}\" \\\n          {{- end }}\n          --mlockall\n}\n\nfunction stop () {\n  PID=$(cat $OVS_PID)\n  ovs-appctl -T1 -t /run/openvswitch/ovs-vswitchd.${PID}.ctl exit\n}\n\nfind_latest_ctl_file() {\n    latest_file=\"\"\n    latest_file=$(ls -lt /run/openvswitch/*.ctl | awk 'NR==1 {if ($3 == \"{{ .Values.conf.poststart.rootUser }}\") print $NF}')\n\n    echo \"$latest_file\"\n}\n\nfunction poststart () {\n  # This enables the usage of 'ovs-appctl' from neutron-ovs-agent pod.\n\n  # Wait for potential new ctl file before continuing\n  timeout={{ .Values.conf.poststart.timeout }}\n  start_time=$(date +%s)\n  while true; do\n      latest_ctl_file=$(find_latest_ctl_file)\n      if [ -n \"$latest_ctl_file\" ]; then\n          break\n      fi\n      current_time=$(date +%s)\n      if (( current_time - start_time >= timeout )); then\n          break\n      fi\n      sleep 1\n  done\n\n  until [ -f $OVS_PID ]\n  do\n      echo \"Waiting for file $OVS_PID\"\n      sleep 1\n  done\n\n  PID=$(cat $OVS_PID)\n  OVS_CTL=/run/openvswitch/ovs-vswitchd.${PID}.ctl\n\n  until [ -S $OVS_CTL ]\n  do\n      echo \"Waiting for file $OVS_CTL\"\n      sleep 1\n  done\n  chown {{ .Values.pod.user.nova.uid }}.{{ .Values.pod.user.nova.uid }} ${OVS_CTL}\n\n{{- if .Values.conf.poststart.extraCommand }}\n{{ .Values.conf.poststart.extraCommand | indent 2 }}\n{{- end }}\n\n}\n\n$COMMAND\n"
  },
  {
    "path": "openvswitch/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: openvswitch-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n  openvswitch-db-server.sh: |\n{{ tuple \"bin/_openvswitch-db-server.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  openvswitch-vswitchd.sh: |\n{{ tuple \"bin/_openvswitch-vswitchd.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  openvswitch-vswitchd-init-modules.sh: |\n{{ tuple \"bin/_openvswitch-vswitchd-init-modules.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "openvswitch/templates/daemonset.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"ovsdblivenessProbeTemplate\" }}\nexec:\n  command:\n    - /usr/bin/ovs-vsctl\n    - show\n{{- end }}\n\n{{- define \"ovsdbreadinessProbeTemplate\" }}\nexec:\n  command:\n    - /usr/bin/ovs-vsctl\n    - list\n    - Open_Vswitch\n{{- end }}\n\n{{- define \"ovsvswitchlivenessProbeTemplate\" }}\nexec:\n  command:\n{{- if .Values.pod.probes.ovs.ovs_vswitch.liveness.exec }}\n{{ .Values.pod.probes.ovs.ovs_vswitch.liveness.exec | toYaml | indent 4 }}\n{{- else }}\n    - /usr/bin/ovs-appctl\n    - bond/list\n{{- end }}\n{{- end }}\n\n{{- define \"ovsvswitchreadinessProbeTemplate\" }}\nexec:\n  command:\n{{- if .Values.pod.probes.ovs.ovs_vswitch.readiness.exec }}\n{{ .Values.pod.probes.ovs.ovs_vswitch.readiness.exec | toYaml | indent 4 }}\n{{- else if not .Values.conf.ovs_dpdk.enabled }}\n    - /bin/bash\n    - -c\n    - '/usr/bin/ovs-vsctl show'\n{{- else }}\n    - /bin/bash\n    - -c\n    - '/usr/bin/ovs-vsctl show && ! /usr/bin/ovs-vsctl list Open_vSwitch | grep -q dpdk_initialized.*false'\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.daemonset }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"openvswitch-server\" }}\n{{ tuple $envAll \"ovs\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: openvswitch\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"openvswitch\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"openvswitch\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"ovs\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"openvswitch\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"openvswitch\" \"containerNames\" (list \"openvswitch-db\" \"openvswitch-db-perms\" \"openvswitch-vswitchd\" \"openvswitch-vswitchd-modules\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"openvswitch\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"openvswitch\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      shareProcessNamespace: true\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"ovs\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      nodeSelector:\n        {{ .Values.labels.ovs.node_selector_key }}: {{ .Values.labels.ovs.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.openvswitch.enabled }}\n{{ tuple $envAll \"openvswitch\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      dnsPolicy: {{ .Values.pod.dns_policy }}\n      hostNetwork: true\n      initContainers:\n{{ tuple $envAll \"ovs\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: openvswitch-db-perms\n{{ tuple $envAll \"openvswitch_db_server\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"ovs\" \"container\" \"perms\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.ovs.db | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - chown\n            - -R\n            - {{ $envAll.Values.pod.security_context.ovs.container.server.runAsUser | quote }}\n            - /run/openvswitch\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: run-openvswitch\n              mountPath: /run/openvswitch\n        - name: openvswitch-vswitchd-modules\n{{ tuple $envAll \"openvswitch_vswitchd\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"ovs\" \"container\" \"modules\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/openvswitch-vswitchd-init-modules.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: openvswitch-bin\n              mountPath: /tmp/openvswitch-vswitchd-init-modules.sh\n              subPath: openvswitch-vswitchd-init-modules.sh\n              readOnly: true\n            - name: host-rootfs\n              mountPath: /mnt/host-rootfs\n              mountPropagation: HostToContainer\n              readOnly: true\n      containers:\n        - name: openvswitch-db\n{{ tuple $envAll \"openvswitch_db_server\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"ovs\" \"container\" \"server\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.ovs.db | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"ovs\" \"container\" \"ovs_db\" \"type\" \"liveness\" \"probeTemplate\" (include \"ovsdblivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"ovs\" \"container\" \"ovs_db\" \"type\" \"readiness\" \"probeTemplate\" (include \"ovsdbreadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          command:\n            - /tmp/openvswitch-db-server.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/openvswitch-db-server.sh\n                  - stop\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: openvswitch-bin\n              mountPath: /tmp/openvswitch-db-server.sh\n              subPath: openvswitch-db-server.sh\n              readOnly: true\n            - name: run\n              mountPath: /run\n        - name: openvswitch-vswitchd\n{{/* Run the container in priviledged mode due to the need for root\npermissions when we specify --user to run in non-root. */}}\n{{- $_ := set $envAll.Values.pod.security_context.ovs.container.vswitchd \"privileged\" true -}}\n{{- if .Values.conf.ovs_dpdk.enabled }}\n{{/* Limiting CPU cores would severely affect packet throughput\nIt should be handled through lcore and pmd core masks. */}}\n{{- if .Values.pod.resources.enabled }}\n{{ $_ := unset $envAll.Values.pod.resources.ovs.vswitchd.requests \"cpu\" }}\n{{ $_ := unset $envAll.Values.pod.resources.ovs.vswitchd.limits \"cpu\" }}\n{{- end }}\n{{- end }}\n{{ tuple $envAll \"openvswitch_vswitchd\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"ovs\" \"container\" \"vswitchd\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.ovs.vswitchd | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          # ensures this container can speak to the ovs database\n          # successfully before its marked as ready\n{{ dict \"envAll\" $envAll \"component\" \"ovs\" \"container\" \"ovs_vswitch\" \"type\" \"liveness\" \"probeTemplate\" (include \"ovsvswitchlivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"ovs\" \"container\" \"ovs_vswitch\" \"type\" \"readiness\" \"probeTemplate\" (include \"ovsvswitchreadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{- if .Values.pod.tini.enabled }}\n          command:\n            - /tini\n            - -s\n            - --\n          args:\n            - /tmp/openvswitch-vswitchd.sh\n            - start\n{{- else }}\n          command:\n            - /tmp/openvswitch-vswitchd.sh\n            - start\n{{- end }}\n          lifecycle:\n            postStart:\n              exec:\n                command:\n                  - /tmp/openvswitch-vswitchd.sh\n                  - poststart\n            preStop:\n              exec:\n                command:\n                  - /tmp/openvswitch-vswitchd.sh\n                  - stop\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: openvswitch-bin\n              mountPath: /tmp/openvswitch-vswitchd.sh\n              subPath: openvswitch-vswitchd.sh\n              readOnly: true\n            - name: run\n              mountPath: /run\n{{- if .Values.conf.ovs_dpdk.enabled }}\n            - name: hugepages\n              mountPath: {{ .Values.conf.ovs_dpdk.hugepages_mountpath | quote }}\n            - name: pci-devices\n              mountPath: /sys/bus/pci/devices\n            - name: huge-pages-kernel\n              mountPath: /sys/kernel/mm/hugepages\n            - name: node-devices\n              mountPath: /sys/devices/system/node\n            - name: modules\n              mountPath: /lib/modules\n            - name: devs\n              mountPath: /dev\n            - name: pci-drivers\n              mountPath: /sys/bus/pci/drivers\n            - name: cgroup\n              mountPath: /sys/fs/cgroup\n            - name: var-tmp\n              mountPath: /var/tmp\n{{- end }}\n        {{- with .Values.openvswitch.extraContainers }}\n          {{- tpl (toYaml .) $envAll | nindent 8 }}\n        {{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: openvswitch-bin\n          configMap:\n            name: openvswitch-bin\n            defaultMode: 0555\n        - name: run\n          hostPath:\n            path: /run\n            type: Directory\n        - name: run-openvswitch\n          hostPath:\n            path: /run/openvswitch\n            type: DirectoryOrCreate\n        - name: host-rootfs\n          hostPath:\n            path: /\n            type: Directory\n{{- if .Values.conf.ovs_dpdk.enabled }}\n        - name: devs\n          hostPath:\n            path: /dev\n            type: Directory\n        - name: pci-devices\n          hostPath:\n            path: /sys/bus/pci/devices\n            type: Directory\n        - name: huge-pages-kernel\n          hostPath:\n            path: /sys/kernel/mm/hugepages\n            type: Directory\n        - name: node-devices\n          hostPath:\n            path: /sys/devices/system/node\n            type: Directory\n        - name: modules\n          hostPath:\n            path: /lib/modules\n            type: Directory\n        - name: pci-drivers\n          hostPath:\n            path: /sys/bus/pci/drivers\n            type: Directory\n        - name: hugepages\n          hostPath:\n            path: {{ .Values.conf.ovs_dpdk.hugepages_mountpath | quote }}\n            type: Directory\n        - name: cgroup\n          hostPath:\n            path: /sys/fs/cgroup\n        - name: var-tmp\n          hostPath:\n            path: /var/tmp\n            type: DirectoryOrCreate\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "openvswitch/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "openvswitch/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"openvswitch\" -}}\n{{- if .Values.pod.tolerations.openvswitch.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "openvswitch/templates/network-policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"openvswitch\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "openvswitch/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "openvswitch/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for openvswitch.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nrelease_group: null\n\nimages:\n  tags:\n    openvswitch_db_server: quay.io/airshipit/openvswitch:latest-ubuntu_noble\n    openvswitch_vswitchd: quay.io/airshipit/openvswitch:latest-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_noble\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  ovs:\n    node_selector_key: openvswitch\n    node_selector_value: enabled\n\npod:\n  tini:\n    enabled: true\n  tolerations:\n    openvswitch:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n        effect: NoSchedule\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n        effect: NoSchedule\n  probes:\n    ovs:\n      ovs_db:\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 60\n            periodSeconds: 30\n            timeoutSeconds: 5\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 90\n            periodSeconds: 30\n            timeoutSeconds: 5\n      ovs_vswitch:\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 60\n            periodSeconds: 30\n            timeoutSeconds: 5\n        readiness:\n          enabled: true\n          params:\n            failureThreshold: 3\n            periodSeconds: 10\n            timeoutSeconds: 1\n  security_context:\n    ovs:\n      pod:\n        runAsUser: 42424\n      container:\n        perms:\n          runAsUser: 0\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        server:\n          runAsUser: 42424\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        modules:\n          runAsUser: 0\n          capabilities:\n            add:\n              - SYS_MODULE\n              - SYS_CHROOT\n          readOnlyRootFilesystem: true\n        vswitchd:\n          runAsUser: 0\n          capabilities:\n            add:\n              - NET_ADMIN\n          readOnlyRootFilesystem: true\n  dns_policy: \"ClusterFirstWithHostNet\"\n\n  lifecycle:\n    upgrades:\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        ovs:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n  resources:\n    enabled: false\n    ovs:\n      db:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      vswitchd:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n          # set resources to enabled and specify one of the following when using dpdk\n          # hugepages-1Gi: \"1Gi\"\n          # hugepages-2Mi: \"512Mi\"\n    jobs:\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n  user:\n    nova:\n      uid: 42424\n\nsecrets:\n  oci_image_registry:\n    openvswitch: openvswitch-oci-image-registry-key\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      openvswitch:\n        username: openvswitch\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n\nnetwork_policy:\n  openvswitch:\n    ingress:\n      - {}\n    egress:\n      - {}\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - openvswitch-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    ovs: null\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\nmanifests:\n  configmap_bin: true\n  daemonset: true\n  daemonset_ovs_vswitchd: true\n  job_image_repo_sync: true\n  network_policy: false\n  secret_registry: true\n\nopenvswitch:\n  extraContainers: []\n\nconf:\n  poststart:\n    timeout: 5\n    rootUser: \"root\"\n    extraCommand: null\n  openvswitch_db_server:\n    ptcp_port: null\n  ovs_other_config:\n    handler_threads: null\n    revalidator_threads: null\n  ovs_hw_offload:\n    enabled: false\n  ovs_dpdk:\n    enabled: false\n    ## Mandatory parameters. Please uncomment when enabling DPDK\n    # socket_memory: 1024\n    # hugepages_mountpath: /dev/hugepages\n    # vhostuser_socket_dir: vhostuser\n    #\n    ## Optional hardware specific parameters: modify to match NUMA topology\n    # mem_channels: 4\n    # lcore_mask: 0x1\n    # pmd_cpu_mask: 0x4\n    #\n    ## Optional driver to use. Driver name should be the same as the one\n    ## specified in the ovs_dpdk section in the Neutron values and vice versa\n    # driver: vfio-pci\n    #\n    ## Optional security feature\n    #     vHost IOMMU feature restricts the vhost memory that a virtio device\n    #     access, available with DPDK v17.11\n    # vhost_iommu_support: true\n    #\n    ## Optional cgroups cpuset mems/cpus override\n    ## The default is to copy the values from root cgroup, cpuset.mems.effective\n    ## and cpuset.cpus.effective.\n    ## Note: cgroup only created if lcore_mask or pmd_cpu_mask is set\n    # cgroup_cpuset_mems: some_list_of_memory_nodes\n    # cgroup_cpuset_cpus: some_list_of_cpus\n  ## OVS supports run in non-root for both OVS and OVS DPDK mode, the user\n  # for OVS need to be added to container image with user id 42424.\n  # useradd -u 42424 openvswitch; groupmod -g 42424 openvswitch\n  #\n  # Leave empty to run as user that invokes the command (default: root)\n  ovs_user_name: \"openvswitch:openvswitch\"\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "ovn/.helmignore",
    "content": "values_overrides\n"
  },
  {
    "path": "ovn/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v23.3.0\ndescription: OpenStack-Helm OVN\nname: ovn\nversion: 2025.2.0\nhome: https://www.ovn.org\nicon: https://www.ovn.org/images/ovn-logo.png\nsources:\n  - https://github.com/ovn-org/ovn\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "ovn/templates/bin/_ovn-controller-init.sh.tpl",
    "content": "#!/bin/bash -xe\n\n# Copyright 2023 VEXXHOST, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#    http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nANNOTATION_KEY=\"openstack-helm/ovn-system-id\"\n\nfunction get_ip_address_from_interface {\n  local interface=$1\n  local ip=$(ip -4 -o addr s \"${interface}\" | awk '{ print $4; exit }' | awk -F '/' 'NR==1 {print $1}')\n  if [ -z \"${ip}\" ] ; then\n    exit 1\n  fi\n  echo ${ip}\n}\n\nfunction get_ip_prefix_from_interface {\n  local interface=$1\n  local prefix=$(ip -4 -o addr s \"${interface}\" | awk '{ print $4; exit }' | awk -F '/' 'NR==1 {print $2}')\n  if [ -z \"${prefix}\" ] ; then\n    exit 1\n  fi\n  echo ${prefix}\n}\n\nfunction migrate_ip_from_nic {\n  src_nic=$1\n  bridge_name=$2\n\n  # Enabling explicit error handling: We must avoid to lose the IP\n  # address in the migration process. Hence, on every error, we\n  # attempt to assign the IP back to the original NIC and exit.\n  set +e\n\n  ip=$(get_ip_address_from_interface ${src_nic})\n  prefix=$(get_ip_prefix_from_interface ${src_nic})\n\n  bridge_ip=$(get_ip_address_from_interface \"${bridge_name}\")\n  bridge_prefix=$(get_ip_prefix_from_interface \"${bridge_name}\")\n\n  ip link set ${bridge_name} up\n\n  if [[ -n \"${ip}\" && -n \"${prefix}\" ]]; then\n    ip addr flush dev ${src_nic}\n    if [ $? -ne 0 ] ; then\n      ip addr replace ${ip}/${prefix} dev ${src_nic}\n      echo \"Error while flushing IP from ${src_nic}.\"\n      exit 1\n    fi\n\n    ip addr replace ${ip}/${prefix} dev \"${bridge_name}\"\n    if [ $? -ne 0 ] ; then\n      echo \"Error assigning IP to bridge \"${bridge_name}\".\"\n      ip addr replace ${ip}/${prefix} dev ${src_nic}\n      exit 1\n    fi\n  elif [[ -n \"${bridge_ip}\" && -n \"${bridge_prefix}\" ]]; then\n    echo \"Bridge '${bridge_name}' already has IP assigned. Keeping the same:: IP:[${bridge_ip}]; Prefix:[${bridge_prefix}]...\"\n  elif [[ -z \"${bridge_ip}\" && -z \"${ip}\" ]]; then\n    echo \"Interface and bridge have no ips configured. Leaving as is.\"\n  else\n    echo \"Interface ${src_nic} has invalid IP address. IP:[${ip}]; Prefix:[${prefix}]...\"\n    exit 1\n  fi\n\n  set -e\n}\n\nfunction get_current_system_id {\n  ovs-vsctl --if-exists get Open_vSwitch . external_ids:system-id | tr -d '\"'\n}\n\nfunction get_stored_system_id {\n  kubectl get node \"$NODE_NAME\" -o \"jsonpath={.metadata.annotations.openstack-helm/ovn-system-id}\"\n}\n\nfunction store_system_id() {\n  local system_id=$1\n  kubectl annotate node \"$NODE_NAME\" \"$ANNOTATION_KEY=$system_id\"\n}\n\n# Detect tunnel interface\ntunnel_interface=\"{{- .Values.network.interface.tunnel -}}\"\nif [ -z \"${tunnel_interface}\" ] ; then\n    # search for interface with tunnel network routing\n    tunnel_network_cidr=\"{{- .Values.network.interface.tunnel_network_cidr -}}\"\n    if [ -z \"${tunnel_network_cidr}\" ] ; then\n        tunnel_network_cidr=\"0/0\"\n    fi\n    # If there is not tunnel network gateway, exit\n    tunnel_interface=$(ip -4 route list ${tunnel_network_cidr} | awk -F 'dev' '{ print $2; exit }' \\\n        | awk '{ print $1 }') || exit 1\nfi\novs-vsctl set open . external_ids:ovn-encap-ip=\"$(get_ip_address_from_interface ${tunnel_interface})\"\n\n# Get the stored system-id from the Kubernetes node annotation\nstored_system_id=$(get_stored_system_id)\n\n# Get the current system-id set in OVS\ncurrent_system_id=$(get_current_system_id)\n\nif [ -n \"$stored_system_id\" ] && [ \"$stored_system_id\" != \"$current_system_id\" ]; then\n  # If the annotation exists and does not match the current system-id, set the system-id to the stored one\n  ovs-vsctl set Open_vSwitch . external_ids:system-id=\"$stored_system_id\"\nelif [ -z \"$current_system_id\" ]; then\n  # If no current system-id is set, generate a new one\n  current_system_id=$(uuidgen)\n  ovs-vsctl set Open_vSwitch . external_ids:system-id=\"$current_system_id\"\n  # Store the new system-id in the Kubernetes node annotation\n  store_system_id \"$current_system_id\"\nelif [ -z \"$stored_system_id\" ]; then\n  # If there is no stored system-id, store the current one\n  store_system_id \"$current_system_id\"\nfi\n\n# Configure OVN remote\n{{- if empty .Values.conf.ovn_remote -}}\n{{- $sb_svc_name := \"ovn-ovsdb-sb\" -}}\n{{- $sb_svc := (tuple $sb_svc_name \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\") -}}\n{{- $sb_port := (tuple \"ovn-ovsdb-sb\" \"internal\" \"ovsdb\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\") -}}\n{{- $sb_service_list := list -}}\n{{- range $i := until (.Values.pod.replicas.ovn_ovsdb_sb | int) -}}\n  {{- $sb_service_list = printf \"tcp:%s-%d.%s:%s\" $sb_svc_name $i $sb_svc $sb_port | append $sb_service_list -}}\n{{- end }}\n\novs-vsctl set open . external-ids:ovn-remote=\"{{ include \"helm-toolkit.utils.joinListWithComma\" $sb_service_list }}\"\n{{- else }}\novs-vsctl set open . external-ids:ovn-remote=\"{{ .Values.conf.ovn_remote }}\"\n{{- end }}\n\n# Configure OVN values\novs-vsctl set open . external-ids:rundir=\"/var/run/openvswitch\"\novs-vsctl set open . external-ids:ovn-encap-type=\"{{ .Values.conf.ovn_encap_type }}\"\novs-vsctl set open . external-ids:ovn-bridge=\"{{ .Values.conf.ovn_bridge }}\"\novs-vsctl set open . external-ids:ovn-bridge-mappings=\"{{ .Values.conf.ovn_bridge_mappings }}\"\novs-vsctl set open . external-ids:ovn-monitor-all=\"{{ .Values.conf.ovn_monitor_all }}\"\n\nGW_ENABLED=$(cat /tmp/gw-enabled/gw-enabled)\nif [[ ${GW_ENABLED} == {{ .Values.labels.ovn_controller_gw.node_selector_value }} ]]; then\n  ovs-vsctl set open . external-ids:ovn-cms-options={{ .Values.conf.ovn_cms_options_gw_enabled }}\nelse\n  ovs-vsctl set open . external-ids:ovn-cms-options={{ .Values.conf.ovn_cms_options }}\nfi\n\n{{ if .Values.conf.ovn_bridge_datapath_type -}}\novs-vsctl set open . external-ids:ovn-bridge-datapath-type=\"{{ .Values.conf.ovn_bridge_datapath_type }}\"\n{{- end }}\n\n# Configure hostname\n{{- if .Values.pod.use_fqdn.compute }}\n  ovs-vsctl set open . external-ids:hostname=\"$(hostname -f)\"\n{{- else }}\n  ovs-vsctl set open . external-ids:hostname=\"$(hostname)\"\n{{- end }}\n\n# Create bridges and create ports\n# handle any bridge mappings\n# /tmp/auto_bridge_add is one line json file: {\"br-ex1\":\"eth1\",\"br-ex2\":\"eth2\"}\nfor bmap in `sed 's/[{}\"]//g' /tmp/auto_bridge_add | tr \",\" \"\\n\"`\ndo\n  bridge=${bmap%:*}\n  iface=${bmap#*:}\n  ovs-vsctl --may-exist add-br $bridge -- set bridge $bridge protocols=OpenFlow13\n  if [ -n \"$iface\" ] && [ \"$iface\" != \"null\" ] && ( ip link show $iface 1>/dev/null 2>&1 );\n  then\n    ovs-vsctl --may-exist add-port $bridge $iface\n    migrate_ip_from_nic $iface $bridge\n  fi\ndone\n"
  },
  {
    "path": "ovn/templates/bin/_ovn-network-logging-parser.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec uwsgi --ini /etc/neutron/neutron-ovn-network-logging-parser-uwsgi.ini\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "ovn/templates/clusterrole-controller.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: ovn-controller\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - nodes\n  verbs:\n  - get\n  - patch\n  - list\n"
  },
  {
    "path": "ovn/templates/clusterrolebinding-controller.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: ovn-controller\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: ovn-controller\nsubjects:\n- kind: ServiceAccount\n  name: ovn-controller\n  namespace: {{ .Release.Namespace }}\n- kind: ServiceAccount\n  name: ovn-controller-gw\n  namespace: {{ .Release.Namespace }}\n"
  },
  {
    "path": "ovn/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"ovn.configmap.bin\" }}\n{{- $configMapName := index . 0 }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ $configMapName }}\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n  ovn-controller-init.sh: |\n{{ tuple \"bin/_ovn-controller-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ovn-network-logging-parser.sh: |\n{{ tuple \"bin/_ovn-network-logging-parser.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- list \"ovn-bin\" . | include \"ovn.configmap.bin\" }}\n{{- end }}\n"
  },
  {
    "path": "ovn/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"ovn.configmap.etc\" }}\n{{- $configMapName := index . 0 }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n\n{{- if empty (index .Values.conf.ovn_network_logging_parser_uwsgi.uwsgi \"http-socket\") -}}\n{{- $http_socket_port := tuple \"ovn_logging_parser\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | toString }}\n{{- $http_socket := printf \"0.0.0.0:%s\" $http_socket_port }}\n{{- $_ := set .Values.conf.ovn_network_logging_parser_uwsgi.uwsgi \"http-socket\" $http_socket -}}\n{{- end -}}\n\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $configMapName }}\ntype: Opaque\ndata:\n  auto_bridge_add: {{ toJson $envAll.Values.conf.auto_bridge_add | b64enc }}\n  neutron-ovn-network-logging-parser-uwsgi.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.ovn_network_logging_parser_uwsgi | b64enc }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- list \"ovn-etc\" . | include \"ovn.configmap.etc\" }}\n{{- end }}\n"
  },
  {
    "path": "ovn/templates/daemonset-controller.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"controllerReadinessProbeTemplate\" }}\nexec:\n  command:\n    - /usr/bin/ovn-kube-util\n    - readiness-probe\n    - -t\n    - ovn-controller\n{{- end }}\n\n{{- define \"ovn.daemonset\" }}\n{{- $daemonset := index . 0 }}\n{{- $configMapName := index . 1 }}\n{{- $serviceAccountName := index . 2 }}\n{{- $envAll := index . 3 }}\n{{- with $envAll }}\n\n{{- $env_ovn := .Values.pod.envs }}\n\n---\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n  name: ovn-controller\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n    configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n  labels:\n{{ tuple $envAll \"ovn\" \"ovn-controller\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"ovn\" \"ovn-controller\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ovn\" \"ovn-controller\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      hostNetwork: true\n      hostPID: true\n      hostIPC: true\n      dnsPolicy: {{ .Values.pod.dns_policy }}\n      nodeSelector:\n        {{ .Values.labels.ovn_controller.node_selector_key }}: {{ .Values.labels.ovn_controller.node_selector_value }}\n      initContainers:\n{{- tuple $envAll \"ovn_controller\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: get-gw-enabled\n{{ tuple $envAll \"ovn_controller_kubectl\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n          command:\n            - /bin/bash\n            - -c\n            - |\n              kubectl get node ${NODENAME} -o jsonpath='{.metadata.labels.{{ .Values.labels.ovn_controller_gw.node_selector_key }}}' > /tmp/gw-enabled/gw-enabled\n          env:\n            - name: NODENAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n          volumeMounts:\n            - name: gw-enabled\n              mountPath: /tmp/gw-enabled\n              readOnly: false\n        - name: controller-init\n{{ dict \"envAll\" $envAll \"application\" \"ovn_controller\" \"container\" \"controller_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ tuple $envAll \"ovn_controller\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n          command:\n            - /tmp/ovn-controller-init.sh\n          env:\n            - name: NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n          volumeMounts:\n            - name: ovn-bin\n              mountPath: /tmp/ovn-controller-init.sh\n              subPath: ovn-controller-init.sh\n              readOnly: true\n            - name: run-openvswitch\n              mountPath: /run/openvswitch\n            - name: ovn-etc\n              mountPath: /tmp/auto_bridge_add\n              subPath: auto_bridge_add\n              readOnly: true\n            - name: gw-enabled\n              mountPath: /tmp/gw-enabled\n              readOnly: true\n      containers:\n        - name: controller\n{{ tuple $envAll \"ovn_controller\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.ovn_controller | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"ovn_controller\" \"container\" \"controller\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /root/ovnkube.sh\n            - ovn-controller\n{{ dict \"envAll\" . \"component\" \"ovn_controller\" \"container\" \"controller\" \"type\" \"readiness\" \"probeTemplate\" (include \"controllerReadinessProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          env:\n            - name: OVS_USER_ID\n              value: {{ .Values.conf.ovs_user_name }}\n{{ if $env_ovn.common }}{{ toYaml $env_ovn.common | indent 12 }}{{ end }}\n{{ if $env_ovn.controller }}{{ toYaml $env_ovn.controller | indent 12 }}{{ end }}\n          volumeMounts:\n            - name: run-openvswitch\n              mountPath: /run/openvswitch\n            - name: logs\n              mountPath: /var/log/ovn\n            - name: run-openvswitch\n              mountPath: /run/ovn\n        {{- if .Values.pod.sidecars.vector }}\n        - name: vector\n{{ tuple $envAll \"vector\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.vector | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"ovn_controller\" \"container\" \"vector\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - vector\n            - --config\n            - /etc/vector/vector.toml\n          volumeMounts:\n            - name: vector-config\n              mountPath: /etc/vector\n            - name: logs\n              mountPath: /logs\n            - name: vector-data\n              mountPath: /var/lib/vector\n        {{- end }}\n        {{- if .Values.pod.sidecars.ovn_logging_parser }}\n        - name: log-parser\n{{ tuple $envAll \"ovn_logging_parser\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.ovn_logging_parser | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"ovn_controller\" \"container\" \"ovn_logging_parser\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/ovn-network-logging-parser.sh\n            - start\n          env:\n            - name: VECTOR_HTTP_ENDPOINT\n              value: http://localhost:5001\n          ports:\n            - name: http\n              containerPort: {{ tuple \"ovn_logging_parser\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n              protocol: TCP\n          volumeMounts:\n            - name: neutron-etc\n              mountPath: /etc/neutron/neutron.conf\n              subPath: neutron.conf\n              readOnly: true\n            - name: ovn-bin\n              mountPath: /tmp/ovn-network-logging-parser.sh\n              subPath: ovn-network-logging-parser.sh\n              readOnly: true\n            - name: ovn-etc\n              mountPath: /etc/neutron/neutron-ovn-network-logging-parser-uwsgi.ini\n              subPath: neutron-ovn-network-logging-parser-uwsgi.ini\n              readOnly: true\n        {{- end }}\n      volumes:\n        - name: ovn-bin\n          configMap:\n            name: ovn-bin\n            defaultMode: 0777\n        - name: run-openvswitch\n          hostPath:\n            path: /run/openvswitch\n            type: DirectoryOrCreate\n        - name: ovn-etc\n          secret:\n            secretName: {{ $configMapName }}\n            defaultMode: 0444\n        - name: logs\n          hostPath:\n            path: /var/log/ovn\n            type: DirectoryOrCreate\n        - name: run-ovn\n          hostPath:\n            path: /run/ovn\n            type: DirectoryOrCreate\n        - name: gw-enabled\n          emptyDir: {}\n        {{- if .Values.pod.sidecars.vector }}\n        - name: vector-config\n          secret:\n            secretName: ovn-vector-config\n        - name: vector-data\n          emptyDir: {}\n        {{- end }}\n        {{- if .Values.pod.sidecars.ovn_logging_parser }}\n        - name: neutron-etc\n          secret:\n            secretName: neutron-etc\n            defaultMode: 0444\n        {{- end }}\n{{- end }}\n{{- end }}\n\n\n{{- if .Values.manifests.daemonset_ovn_controller }}\n{{- $envAll := . }}\n{{- $daemonset := \"controller\" }}\n{{- $configMapName := \"ovn-etc\" }}\n{{- $serviceAccountName := \"ovn-controller\" }}\n\n{{ tuple $envAll \"ovn_controller\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n{{- $configmap_yaml := \"ovn.configmap.etc\" }}\n\n{{/* Preffer using .Values.overrides rather than .Values.conf.overrides */}}\n{{- list $daemonset \"ovn.daemonset\" $serviceAccountName $configmap_yaml $configMapName \"ovn.configmap.bin\" \"ovn-bin\" . | include \"helm-toolkit.utils.daemonset_overrides_root\" }}\n\n{{- $serviceAccountNamespace := $envAll.Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: ovn-controller-list-nodes-role-{{ $serviceAccountNamespace }}\nrules:\n- apiGroups: [\"\"]\n  resources: [\"nodes\"]\n  verbs: [\"list\", \"get\"]\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: ovn-controller-list-nodes-rolebinding-{{ $serviceAccountNamespace }}\nsubjects:\n- kind: ServiceAccount\n  name: {{ $serviceAccountName }}\n  namespace: {{ $serviceAccountNamespace }}\nroleRef:\n  kind: ClusterRole\n  name: ovn-controller-list-nodes-role-{{ $serviceAccountNamespace }}\n  apiGroup: rbac.authorization.k8s.io\n\n{{- end }}\n\n"
  },
  {
    "path": "ovn/templates/deployment-northd.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"northdReadinessProbeTemplate\" }}\nexec:\n  command:\n    - /usr/bin/ovn-kube-util\n    - readiness-probe\n    - -t\n    - ovn-northd\n{{- end }}\n\n{{- if .Values.manifests.deployment_northd }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"ovn-northd\" }}\n{{ tuple $envAll \"ovn_northd\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $env_ovn := .Values.pod.envs }}\n\n---\nkind: Deployment\napiVersion: apps/v1\nmetadata:\n  name: ovn-northd\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"ovn\" \"ovn-northd\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.ovn_northd }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"ovn\" \"ovn-northd\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ovn\" \"ovn-northd\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      nodeSelector:\n        {{ .Values.labels.ovn_northd.node_selector_key }}: {{ .Values.labels.ovn_northd.node_selector_value }}\n      initContainers:\n{{- tuple $envAll \"ovn_northd\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: northd\n          command:\n            - /root/ovnkube.sh\n            - run-ovn-northd\n{{ tuple $envAll \"ovn_northd\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.ovn_northd | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"ovn_northd\" \"container\" \"northd\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ dict \"envAll\" . \"component\" \"ovn_northd\" \"container\" \"northd\" \"type\" \"readiness\" \"probeTemplate\" (include \"northdReadinessProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" . \"component\" \"ovn_northd\" \"container\" \"northd\" \"type\" \"liveness\" \"probeTemplate\" (include \"northdReadinessProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          env:\n{{ if $env_ovn.common }}{{ toYaml $env_ovn.common | indent 12 }}{{ end }}\n{{ if $env_ovn.northd }}{{ toYaml $env_ovn.northd | indent 12 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "ovn/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "ovn/templates/role-controller.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: ovn-controller\n  namespace: {{ .Release.Namespace }}\nrules:\n- apiGroups:\n  - discovery.k8s.io\n  resources:\n  - endpointslices\n  verbs:\n  - list\n"
  },
  {
    "path": "ovn/templates/role-northd.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: ovn-northd\n  namespace: {{ .Release.Namespace }}\nrules:\n- apiGroups:\n  - discovery.k8s.io\n  resources:\n  - endpointslices\n  verbs:\n  - list\n"
  },
  {
    "path": "ovn/templates/role-ovsdb.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: ovn-ovsdb\n  namespace: {{ .Release.Namespace }}\nrules:\n- apiGroups:\n  - \"apps\"\n  resources:\n  - statefulsets\n  verbs:\n  - get\n- apiGroups:\n  - \"\"\n  resources:\n  - pods\n  - endpoints\n  verbs:\n  - list\n  - get\n"
  },
  {
    "path": "ovn/templates/rolebinding-controller.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: ovn-controller\n  namespace: {{ .Release.Namespace }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: ovn-controller\nsubjects:\n- kind: ServiceAccount\n  name: ovn-controller\n  namespace: {{ .Release.Namespace }}\n- kind: ServiceAccount\n  name: ovn-controller-gw\n  namespace: {{ .Release.Namespace }}\n"
  },
  {
    "path": "ovn/templates/rolebinding-northd.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: ovn-northd\n  namespace: {{ .Release.Namespace }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: ovn-northd\nsubjects:\n- kind: ServiceAccount\n  name: ovn-northd\n  namespace: {{ .Release.Namespace }}\n"
  },
  {
    "path": "ovn/templates/rolebinding-ovsdb.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: ovn-ovsdb\n  namespace: {{ .Release.Namespace }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: ovn-ovsdb\nsubjects:\n- kind: ServiceAccount\n  name: ovn-ovsdb-nb\n  namespace: {{ .Release.Namespace }}\n- kind: ServiceAccount\n  name: ovn-ovsdb-sb\n  namespace: {{ .Release.Namespace }}\n"
  },
  {
    "path": "ovn/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "ovn/templates/secret-vector.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.pod.sidecars.vector }}\n{{- $envAll := . }}\n\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: ovn-vector-config\ntype: Opaque\ndata:\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.vector \"key\" \"vector.toml\" \"format\" \"Secret\" ) | indent 2 }}\n{{- end }}\n"
  },
  {
    "path": "ovn/templates/service-ovsdb-nb.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_ovn_ovsdb_nb }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"ovn-ovsdb-nb\" \"direct\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  publishNotReadyAddresses: true\n  ports:\n    - name: ovsdb\n      port: {{ tuple \"ovn-ovsdb-nb\" \"internal\" \"ovsdb\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    - name: raft\n      port: {{ tuple \"ovn-ovsdb-nb\" \"internal\" \"raft\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"ovn\" \"ovn-ovsdb-nb\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "ovn/templates/service-ovsdb-sb.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_ovn_ovsdb_sb }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"ovn-ovsdb-sb\" \"direct\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  publishNotReadyAddresses: true\n  ports:\n    - name: ovsdb\n      port: {{ tuple \"ovn-ovsdb-sb\" \"internal\" \"ovsdb\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    - name: raft\n      port: {{ tuple \"ovn-ovsdb-sb\" \"internal\" \"raft\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"ovn\" \"ovn-ovsdb-sb\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "ovn/templates/statefulset-ovsdb-nb.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"ovnnbReadinessProbeTemplate\" }}\nexec:\n  command:\n    - /usr/bin/ovn-kube-util\n    - readiness-probe\n    - -t\n{{- if gt (int .Values.pod.replicas.ovn_ovsdb_nb) 1 }}\n    - ovnnb-db-raft\n{{- else }}\n    - ovnnb-db\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.statefulset_ovn_ovsdb_nb }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"ovn-ovsdb-nb\" }}\n{{ tuple $envAll \"ovn_ovsdb_nb\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $env_ovn := .Values.pod.envs }}\n\n---\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  name: ovn-ovsdb-nb\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"ovn\" \"ovn-ovsdb-nb\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  serviceName: {{ tuple \"ovn-ovsdb-nb\" \"direct\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  podManagementPolicy: Parallel\n  replicas: {{ .Values.pod.replicas.ovn_ovsdb_nb }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"ovn\" \"ovn-ovsdb-nb\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ovn\" \"ovn-ovsdb-nb\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{- tuple $envAll \"ovn\" \"ovn-ovsdb-nb\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.ovn_ovsdb_nb.node_selector_key }}: {{ .Values.labels.ovn_ovsdb_nb.node_selector_value }}\n      initContainers:\n{{- tuple $envAll \"ovn_ovsdb_nb\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: ovsdb\n          command:\n            - /root/ovnkube.sh\n{{- if gt (int .Values.pod.replicas.ovn_ovsdb_nb) 1 }}\n            - nb-ovsdb-raft\n{{- else }}\n            - nb-ovsdb\n{{- end }}\n{{ tuple $envAll \"ovn_ovsdb_nb\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.ovn_ovsdb_nb | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" . \"component\" \"ovn_ovsdb_nb\" \"container\" \"ovsdb\" \"type\" \"readiness\" \"probeTemplate\" (include \"ovnnbReadinessProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          ports:\n            - containerPort: {{ tuple \"ovn-ovsdb-nb\" \"internal\" \"ovsdb\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            - containerPort: {{ tuple \"ovn-ovsdb-nb\" \"internal\" \"raft\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          env:\n            - name: OVN_NB_PORT\n              value: {{ tuple \"ovn-ovsdb-nb\" \"internal\" \"ovsdb\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: OVN_NB_RAFT_PORT\n              value: {{ tuple \"ovn-ovsdb-nb\" \"internal\" \"raft\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n{{ if $env_ovn.common }}{{ toYaml $env_ovn.common | indent 12 }}{{ end }}\n{{ if $env_ovn.ovsdb_nb }}{{ toYaml $env_ovn.ovsdb_nb | indent 12 }}{{ end }}\n          volumeMounts:\n            - name: run-openvswitch\n              mountPath: /var/run/openvswitch\n            - name: run-openvswitch\n              mountPath: /var/run/ovn\n            - name: data\n              mountPath: {{ $envAll.Values.volume.ovn_ovsdb_nb.path }}\n      volumes:\n        - name: run-openvswitch\n          hostPath:\n            path: /run/openvswitch\n            type: DirectoryOrCreate\n{{- if not .Values.volume.ovn_ovsdb_nb.enabled }}\n        - name: data\n          emptyDir: {}\n{{- else }}\n  volumeClaimTemplates:\n    - apiVersion: v1\n      kind: PersistentVolumeClaim\n      metadata:\n        name: data\n      spec:\n        accessModes: [\"ReadWriteOnce\"]\n        storageClassName: {{ $envAll.Values.volume.ovn_ovsdb_nb.class_name }}\n        resources:\n          requests:\n            storage: {{ $envAll.Values.volume.ovn_ovsdb_nb.size }}\n{{- end }}\n\n{{- end }}\n"
  },
  {
    "path": "ovn/templates/statefulset-ovsdb-sb.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"ovnsbReadinessProbeTemplate\" }}\nexec:\n  command:\n    - /usr/bin/ovn-kube-util\n    - readiness-probe\n    - -t\n{{- if gt (int .Values.pod.replicas.ovn_ovsdb_sb) 1 }}\n    - ovnsb-db-raft\n{{- else }}\n    - ovnsb-db\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.statefulset_ovn_ovsdb_sb }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"ovn-ovsdb-sb\" }}\n{{ tuple $envAll \"ovn_ovsdb_sb\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $env_ovn := .Values.pod.envs }}\n\n---\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  name: ovn-ovsdb-sb\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"ovn\" \"ovn-ovsdb-sb\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  serviceName: {{ tuple \"ovn-ovsdb-sb\" \"direct\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  podManagementPolicy: Parallel\n  replicas: {{ .Values.pod.replicas.ovn_ovsdb_sb }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"ovn\" \"ovn-ovsdb-sb\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"ovn\" \"ovn-ovsdb-sb\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{- tuple $envAll \"ovn\" \"ovn-ovsdb-sb\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.ovn_ovsdb_sb.node_selector_key }}: {{ .Values.labels.ovn_ovsdb_sb.node_selector_value }}\n      initContainers:\n{{- tuple $envAll \"ovn_ovsdb_sb\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: ovsdb\n          command:\n            - /root/ovnkube.sh\n{{- if gt (int .Values.pod.replicas.ovn_ovsdb_sb) 1 }}\n            - sb-ovsdb-raft\n{{- else }}\n            - sb-ovsdb\n{{- end }}\n{{ tuple $envAll \"ovn_ovsdb_sb\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.ovn_ovsdb_sb | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" . \"component\" \"ovn_ovsdb_sb\" \"container\" \"ovsdb\" \"type\" \"readiness\" \"probeTemplate\" (include \"ovnsbReadinessProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          ports:\n            - containerPort: {{ tuple \"ovn-ovsdb-sb\" \"internal\" \"ovsdb\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            - containerPort: {{ tuple \"ovn-ovsdb-sb\" \"internal\" \"raft\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          env:\n            - name: OVN_SB_PORT\n              value: {{ tuple \"ovn-ovsdb-sb\" \"internal\" \"ovsdb\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: OVN_SB_RAFT_PORT\n              value: {{ tuple \"ovn-ovsdb-sb\" \"internal\" \"raft\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n{{ if $env_ovn.common }}{{ toYaml $env_ovn.common | indent 12 }}{{ end }}\n{{ if $env_ovn.ovsdb_sb }}{{ toYaml $env_ovn.ovsdb_sb | indent 12 }}{{ end }}\n          volumeMounts:\n            - name: run-openvswitch\n              mountPath: /var/run/openvswitch\n            - name: run-openvswitch\n              mountPath: /var/run/ovn\n            - name: data\n              mountPath: {{ $envAll.Values.volume.ovn_ovsdb_sb.path }}\n      volumes:\n        - name: run-openvswitch\n          hostPath:\n            path: /run/openvswitch\n            type: DirectoryOrCreate\n{{- if not .Values.volume.ovn_ovsdb_sb.enabled }}\n        - name: data\n          emptyDir: {}\n{{- else }}\n  volumeClaimTemplates:\n    - apiVersion: v1\n      kind: PersistentVolumeClaim\n      metadata:\n        name: data\n      spec:\n        accessModes: [\"ReadWriteOnce\"]\n        storageClassName: {{ $envAll.Values.volume.ovn_ovsdb_sb.class_name }}\n        resources:\n          requests:\n            storage: {{ $envAll.Values.volume.ovn_ovsdb_sb.size }}\n{{- end }}\n\n{{- end }}\n"
  },
  {
    "path": "ovn/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for openvswitch.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nrelease_group: null\n\nimages:\n  tags:\n    ovn_ovsdb_nb: quay.io/airshipit/ovn:ubuntu_noble\n    ovn_ovsdb_sb: quay.io/airshipit/ovn:ubuntu_noble\n    ovn_northd: quay.io/airshipit/ovn:ubuntu_noble\n    ovn_controller: quay.io/airshipit/ovn:ubuntu_noble\n    ovn_controller_kubectl: quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n    vector: docker.io/timberio/vector:0.51.1-debian\n    ovn_logging_parser: quay.io/airshipit/neutron:2025.1-ubuntu_noble\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  ovn_ovsdb_nb:\n    node_selector_key: openstack-network-node\n    node_selector_value: enabled\n  ovn_ovsdb_sb:\n    node_selector_key: openstack-network-node\n    node_selector_value: enabled\n  ovn_northd:\n    node_selector_key: openstack-network-node\n    node_selector_value: enabled\n  ovn_controller:\n    node_selector_key: openvswitch\n    node_selector_value: enabled\n  ovn_controller_gw:\n    node_selector_key: l3-agent\n    node_selector_value: enabled\n\nvolume:\n  ovn_ovsdb_nb:\n    path: /var/lib/ovn\n    enabled: true\n    class_name: general\n    size: 5Gi\n  ovn_ovsdb_sb:\n    path: /var/lib/ovn\n    enabled: true\n    class_name: general\n    size: 5Gi\n\nnetwork:\n  interface:\n    # Tunnel interface will be used for VXLAN tunneling.\n    tunnel: null\n    # If tunnel is null there is a fallback mechanism to search\n    # for interface with routing using tunnel network cidr.\n    tunnel_network_cidr: \"0/0\"\n\nconf:\n  ovn_cms_options: \"availability-zones=nova\"\n  ovn_cms_options_gw_enabled: \"enable-chassis-as-gw,availability-zones=nova\"\n  ovn_encap_type: geneve\n  ovn_bridge: br-int\n  ovn_bridge_mappings: external:br-ex\n  ovn_monitor_all: false\n  # For DPDK enabled environments, enable netdev datapath type for br-int\n  # ovn_bridge_datapath_type: netdev\n\n  # auto_bridge_add:\n  #   br-private: eth0\n  #   br-public: eth1\n  auto_bridge_add: {}\n  ovs_user_name: openvswitch\n  ovn_network_logging_parser_uwsgi:\n    uwsgi:\n      add-header: \"Connection: close\"\n      buffer-size: 65535\n      die-on-term: true\n      enable-threads: true\n      exit-on-reload: false\n      hook-master-start: unix_signal:15 gracefully_kill_them_all\n      lazy-apps: true\n      log-x-forwarded-for: true\n      master: true\n      processes: 1\n      procname-prefix-spaced: \"neutron-ovn-network-logging-parser:\"\n      route-user-agent: '^kube-probe.* donotlog:'\n      thunder-lock: true\n      worker-reload-mercy: 80\n      wsgi-file: /var/lib/openstack/bin/neutron-ovn-network-logging-parser-wsgi\n      stats: 0.0.0.0:1717\n      stats-http: true\n  vector: |\n    [sources.file_logs]\n    type = \"file\"\n    include = [ \"/logs/ovn-controller.log\" ]\n\n    [sinks.ovn_log_parser_in]\n    type = \"http\"\n    inputs = [\"file_logs\"]\n    uri = \"{{ tuple \"ovn_logging_parser\" \"default\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" }}\"\n    encoding.codec = \"json\"\n    method = \"post\"\n\n    [sources.ovn_log_parser_out]\n    type = \"http_server\"\n    address = \"0.0.0.0:5001\"\n    encoding = \"json\"\n\n    [transforms.parse_log_message]\n    type = \"remap\"\n    inputs = [\"ovn_log_parser_out\"]\n    source = '''\n      del(.source_type)\n      del(.path)\n    '''\n\n    [sinks.loki_sink]\n    type = \"loki\"\n    labels.event_source = \"network_logs\"\n    inputs = [\"parse_log_message\"]\n    endpoint = \"http://loki.monitoring:3100\"\n    encoding.codec = \"json\"\n    tenant_id = \"{{`{{ project_id }}`}}\"\n\npod:\n  # NOTE: should be same as nova.pod.use_fqdn.compute\n  use_fqdn:\n    compute: true\n  envs:\n    common:\n      - name: OVN_DAEMONSET_VERSION\n        value: \"3\"\n      - name: OVN_KUBERNETES_NAMESPACE\n        valueFrom:\n          fieldRef:\n            fieldPath: metadata.namespace\n      - name: OVN_KUBERNETES_NB_STATEFULSET\n        value: ovn-ovsdb-nb\n      - name: OVN_KUBERNETES_SB_STATEFULSET\n        value: ovn-ovsdb-sb\n      - name: OVN_SSL_ENABLE\n        value: \"no\"\n    controller:\n      - name: OVN_LOGLEVEL_CONTROLLER\n        value: \"-vconsole:info -vfile:info\"\n    northd:\n      - name: OVN_LOGLEVEL_NORTHD\n        value: \"-vconsole:info -vfile:info\"\n    ovsdb_nb:\n      - name: OVN_LOGLEVEL_NB\n        value: \"-vconsole:info -vfile:info\"\n      - name: OVN_KUBERNETES_STATEFULSET\n        value: ovn-ovsdb-nb\n      - name: POD_NAME\n        valueFrom:\n          fieldRef:\n            fieldPath: metadata.name\n      - name: ENABLE_IPSEC\n        value: \"false\"\n      - name: OVN_NB_RAFT_ELECTION_TIMER\n        value: \"1000\"\n    ovsdb_sb:\n      - name: OVN_LOGLEVEL_SB\n        value: \"-vconsole:info -vfile:info\"\n      - name: OVN_KUBERNETES_STATEFULSET\n        value: ovn-ovsdb-sb\n      - name: POD_NAME\n        valueFrom:\n          fieldRef:\n            fieldPath: metadata.name\n      - name: ENABLE_IPSEC\n        value: \"false\"\n      - name: OVN_SB_RAFT_ELECTION_TIMER\n        value: \"1000\"\n  security_context:\n    ovn_northd:\n      container:\n        northd:\n          capabilities:\n            add:\n              - SYS_NICE\n    ovn_controller:\n      container:\n        controller_init:\n          readOnlyRootFilesystem: true\n          privileged: true\n        controller:\n          readOnlyRootFilesystem: true\n          privileged: true\n        ovn_logging_parser:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        vector:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n  tolerations:\n    ovn_ovsdb_nb:\n      enabled: false\n    ovn_ovsdb_sb:\n      enabled: false\n    ovn_northd:\n      enabled: false\n    ovn_controller:\n      enabled: false\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n\n  probes:\n    ovn_northd:\n      northd:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            timeoutSeconds: 30\n            periodSeconds: 60\n    ovn_ovsdb_nb:\n      ovsdb:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            timeoutSeconds: 30\n            periodSeconds: 60\n    ovn_ovsdb_sb:\n      ovsdb:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            timeoutSeconds: 30\n            periodSeconds: 60\n    ovn_controller:\n      controller:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            timeoutSeconds: 30\n            periodSeconds: 60\n    ovn_controller_gw:\n      controller:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            timeoutSeconds: 30\n            periodSeconds: 60\n  dns_policy: \"ClusterFirstWithHostNet\"\n  replicas:\n    ovn_ovsdb_nb: 1\n    ovn_ovsdb_sb: 1\n    ovn_northd: 1\n  lifecycle:\n    upgrades:\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        ovn_ovsdb_nb:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n        ovn_ovsdb_sb:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n        ovn_northd:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n        ovn_controller:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n  resources:\n    enabled: false\n    ovn_ovsdb_nb:\n      requests:\n        memory: \"384Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"1000m\"\n    ovn_ovsdb_sb:\n      requests:\n        memory: \"384Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"1000m\"\n    ovn_northd:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    ovn_controller:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    ovn_logging_parser:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"256Mi\"\n        cpu: \"500m\"\n    vector:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"256Mi\"\n        cpu: \"500m\"\n    jobs:\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\n  sidecars:\n    ovn_logging_parser: false\n    vector: false\n\nsecrets:\n  oci_image_registry:\n    ovn: ovn-oci-image-registry-key\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      ovn:\n        username: openvswitch\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  ovn_ovsdb_nb:\n    name: ovn-ovsdb-nb\n    namespace: null\n    hosts:\n      default: ovn-ovsdb-nb\n    host_fqdn_override:\n      default: null\n    port:\n      ovsdb:\n        default: 6641\n      raft:\n        default: 6643\n  ovn_ovsdb_sb:\n    name: ovn-ovsdb-sb\n    namespace: null\n    hosts:\n      default: ovn-ovsdb-sb\n    host_fqdn_override:\n      default: null\n    port:\n      ovsdb:\n        default: 6642\n      raft:\n        default: 6644\n  ovn_logging_parser:\n    name: ovn-logging-parser\n    namespace: null\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: localhost\n    scheme:\n      default: 'http'\n      service: 'http'\n    path:\n      default: \"/logs\"\n    port:\n      api:\n        default: 9697\n        service: 9697\n\nnetwork_policy:\n  ovn_ovsdb_nb:\n    ingress:\n      - {}\n    egress:\n      - {}\n  ovn_ovsdb_sb:\n    ingress:\n      - {}\n    egress:\n      - {}\n  ovn_northd:\n    ingress:\n      - {}\n    egress:\n      - {}\n  ovn_controller:\n    ingress:\n      - {}\n    egress:\n      - {}\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - openvswitch-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    ovn_ovsdb_nb: null\n    ovn_ovsdb_sb: null\n    ovn_northd:\n      services:\n        - endpoint: internal\n          service: ovn-ovsdb-nb\n        - endpoint: internal\n          service: ovn-ovsdb-sb\n    ovn_controller:\n      services:\n        - endpoint: internal\n          service: ovn-ovsdb-sb\n      pod:\n        - requireSameNode: true\n          labels:\n            application: openvswitch\n            component: server\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  deployment_northd: true\n  service_ovn_ovsdb_nb: true\n  service_ovn_ovsdb_sb: true\n  statefulset_ovn_ovsdb_nb: true\n  statefulset_ovn_ovsdb_sb: true\n  deployment_ovn_northd: true\n  daemonset_ovn_controller: true\n  job_image_repo_sync: true\n  secret_registry: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "placement/.helmignore",
    "content": "values_overrides\n"
  },
  {
    "path": "placement/Chart.yaml",
    "content": "# Copyright 2019 Intel Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Placement\nname: placement\nversion: 2025.2.0\nhome: https://docs.openstack.org/placement/latest/\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Placement/OpenStack_Project_Placement_vertical.png\nsources:\n  - https://opendev.org/openstack/placement\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "placement/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2019 Intel Corporation.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nplacement-manage db sync\n"
  },
  {
    "path": "placement/templates/bin/_placement-api.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nCopyright 2019 Intel Corporation.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n{{- if .Values.manifests.certificates }}\n  cp -a $(type -p placement-api) /var/www/cgi-bin/placement/\n  if [ -f /etc/apache2/envvars ]; then\n    # Loading Apache2 ENV variables\n    source /etc/apache2/envvars\n    # The directory below has to be created due to the fact that\n    # libapache2-mod-wsgi-py3 doesn't create it in contrary by libapache2-mod-wsgi\n    mkdir -p ${APACHE_RUN_DIR}\n  fi\n\n  # Get rid of stale pid file if present.\n  rm -f /var/run/apache2/*.pid\n\n  # Start Apache2\n  {{- if .Values.conf.software.apache2.a2enmod }}\n    {{- range .Values.conf.software.apache2.a2enmod }}\n  a2enmod {{ . }}\n    {{- end }}\n  {{- end }}\n  {{- if .Values.conf.software.apache2.a2dismod }}\n    {{- range .Values.conf.software.apache2.a2dismod }}\n  a2dismod {{ . }}\n    {{- end }}\n  {{- end }}\n  exec {{ .Values.conf.software.apache2.binary }} {{ .Values.conf.software.apache2.start_parameters }}\n{{- else }}\n  exec uwsgi --ini /etc/placement/placement-api-uwsgi.ini\n{{- end }}\n}\n\nfunction stop () {\n{{- if .Values.manifests.certificates }}\n  if [ -f /etc/apache2/envvars ]; then\n    source /etc/apache2/envvars\n  fi\n  {{ .Values.conf.software.apache2.binary }} -k graceful-stop\n{{- else }}\n  kill -TERM 1\n{{- end }}\n}\n\n$COMMAND\n"
  },
  {
    "path": "placement/templates/certificates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.certificates -}}\n{{  dict \"envAll\" . \"service\" \"placement\" \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end -}}\n"
  },
  {
    "path": "placement/templates/configmap-bin.yaml",
    "content": "{{/*\nCopyright 2019 Intel Corporation.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: placement-bin\ndata:\n  placement-api.sh: |\n{{ tuple \"bin/_placement-api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "placement/templates/configmap-etc.yaml",
    "content": "{{/*\nCopyright 2019 Intel Corporation.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.placement.placement_database.connection)) (empty .Values.conf.placement.placement_database.connection) -}}\n{{- $connection := tuple \"oslo_db\" \"internal\" \"placement\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | set .Values.conf.placement.placement_database \"connection\" -}}\n{{- else -}}\n{{- $_ := set .Values.conf.placement.placement_database \"connection\" $connection -}}\n{{- end -}}\n{{- end -}}\n\n{{- if empty .Values.conf.placement.keystone_authtoken.auth_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.placement.keystone_authtoken \"auth_uri\" -}}\n{{- end -}}\n{{- if empty .Values.conf.placement.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.placement.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.placement.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.placement.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n{{- if empty .Values.conf.placement.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.placement.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if empty (index .Values.conf.placement_api_uwsgi.uwsgi \"http-socket\") -}}\n{{- $http_socket_port := tuple \"placement\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | toString }}\n{{- $http_socket := printf \"0.0.0.0:%s\" $http_socket_port }}\n{{- $_ := set .Values.conf.placement_api_uwsgi.uwsgi \"http-socket\" $http_socket -}}\n{{- end -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: placement-etc\ntype: Opaque\ndata:\n  policy.yaml: {{ toYaml .Values.conf.policy | b64enc }}\n  placement.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.placement | b64enc }}\n  placement-api-uwsgi.ini: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.placement_api_uwsgi | b64enc }}\n{{- if .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.wsgi_placement \"key\" \"wsgi-placement.conf\" \"format\" \"Secret\" ) | indent 2 }}\n{{- end }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.logging | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "placement/templates/deployment.yaml",
    "content": "{{/*\nCopyright 2019 Intel Corporation.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment }}\n{{- $envAll := . }}\n\n{{- $mounts_placement := .Values.pod.mounts.placement.placement }}\n{{- $mounts_placement_init := .Values.pod.mounts.placement.init_container }}\n{{- $etcSources := .Values.pod.etcSources.placement }}\n{{- if eq .Values.manifests.secret_ks_etc true }}\n{{- $etcSources = append $etcSources (dict \"secret\" (dict \"name\" \"placement-ks-etc\")) }}\n{{- end }}\n\n{{- $serviceAccountName := \"placement-api\" }}\n{{ tuple $envAll \"api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: placement-api\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"placement\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"placement\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"placement\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"placement\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"placement-api\" \"containerNames\" (list \"placement-api\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"placement\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"placement\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"placement\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"placement\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.api.node_selector_key }}: {{ .Values.labels.api.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.placement.enabled }}\n{{ tuple $envAll \"placement\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.api.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"api\" $mounts_placement_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        {{- if .Values.conf.placement.DEFAULT.log_dir }}\n        - name: placement-api-init-log\n{{ tuple $envAll \"placement\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"placement\" \"container\" \"placement_api\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - chown\n            - -R\n            - \"placement:\"\n            - {{ .Values.conf.placement.DEFAULT.log_dir }}\n          volumeMounts:\n            - name: log\n              mountPath: {{ .Values.conf.placement.DEFAULT.log_dir }}\n        {{- end }}\n      containers:\n        - name: placement-api\n{{ tuple $envAll \"placement\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"placement\" \"container\" \"placement_api\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n          env:\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/placement/certs/ca.crt\"\n{{- end }}\n          command:\n            - /tmp/placement-api.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/placement-api.sh\n                  - stop\n          ports:\n            - name: p-api\n              containerPort: {{ tuple \"placement\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            httpGet:\n              scheme: {{ tuple \"placement\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n              path: /\n              port: {{ tuple \"placement\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 5\n            periodSeconds: 10\n          livenessProbe:\n            httpGet:\n              scheme: {{ tuple \"placement\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n              path: /\n              {{- if .Values.pod.probes.placement.api.liveness.port }}\n              port: {{ .Values.pod.probes.placement.api.liveness.port }}\n              {{- else }}\n              port: {{ tuple \"placement\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n              {{- end }}\n            initialDelaySeconds: 5\n            periodSeconds: 10\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.placement.oslo_concurrency.lock_path }}\n            - name: wsgi-placement\n              mountPath: /var/www/cgi-bin/placement\n            - name: placement-bin\n              mountPath: /tmp/placement-api.sh\n              subPath: placement-api.sh\n              readOnly: true\n            - name: placement-etc\n              mountPath: /etc/placement/placement.conf\n              subPath: placement.conf\n              readOnly: true\n            - name: placement-etc-snippets\n              mountPath: /etc/placement/placement.conf.d/\n              readOnly: true\n            - name: placement-etc\n              mountPath: /etc/placement/placement-api-uwsgi.ini\n              subPath: placement-api-uwsgi.ini\n              readOnly: true\n            {{- if .Values.conf.placement.DEFAULT.log_config_append }}\n            - name: placement-etc\n              mountPath: {{ .Values.conf.placement.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.placement.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: placement-etc\n              mountPath: /etc/placement/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n            - name: placement-etc\n              mountPath: /etc/apache2/conf-enabled/wsgi-placement.conf\n              subPath: wsgi-placement.conf\n              readOnly: true\n            {{- if .Values.conf.placement.DEFAULT.log_dir }}\n            - name: log\n              mountPath: {{ .Values.conf.placement.DEFAULT.log_dir }}\n            {{- end }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.placement.api.internal \"path\" \"/etc/placement/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_placement.volumeMounts }}{{ toYaml $mounts_placement.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: wsgi-placement\n          emptyDir: {}\n        - name: placement-bin\n          configMap:\n            name: placement-bin\n            defaultMode: 0555\n        - name: placement-etc\n          secret:\n            secretName: placement-etc\n            defaultMode: 0444\n        - name: placement-etc-snippets\n{{- if $etcSources }}\n          projected:\n            sources:\n{{ toYaml $etcSources | indent 14 }}\n{{- else }}\n          emptyDir: {}\n{{ end }}\n        {{- if .Values.conf.placement.DEFAULT.log_dir }}\n        - name: log\n          hostPath:\n            path: {{ .Values.conf.placement.DEFAULT.log_dir }}\n            type: DirectoryOrCreate\n        {{- end }}\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.placement.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_placement.volumes }}{{ toYaml $mounts_placement.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "placement/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "placement/templates/ingress.yaml",
    "content": "{{/*\nCopyright 2019 Intel Corporation.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress .Values.network.api.ingress.public }}\n{{- $envAll := . -}}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendServiceType\" \"placement\" \"backendPort\" \"p-api\" -}}\n{{- $secretName := $envAll.Values.secrets.tls.placement.api.internal -}}\n{{- if and .Values.manifests.certificates $secretName -}}\n{{- $_ := set $ingressOpts \"certIssuer\" .Values.endpoints.placement.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "placement/templates/job-db-drop.yaml",
    "content": "{{/*\nCopyright 2019 Intel Corporation.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $serviceName := \"placement\" -}}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" $serviceName -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbDropJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- if .Values.pod.tolerations.placement.enabled -}}\n{{- $_ := set $dbDropJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "placement/templates/job-db-init.yaml",
    "content": "{{/*\nCopyright 2019 Intel Corporation.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $serviceName := \"placement\" -}}\n{{- $dbApi := dict \"adminSecret\" .Values.secrets.oslo_db.admin \"configFile\" (printf \"/etc/%s/%s.conf\" $serviceName $serviceName ) \"logConfigFile\" (printf \"/etc/%s/logging.conf\" $serviceName ) \"configDbSection\" \"placement_database\" \"configDbKey\" \"connection\" -}}\n{{- $dbsToInit := list $dbApi }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" $serviceName \"dbsToInit\" $dbsToInit -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbInitJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbInitJob \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.placement.enabled -}}\n{{- $_ := set $dbInitJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "placement/templates/job-db-sync.yaml",
    "content": "{{/*\nCopyright 2019 Intel Corporation.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"placement\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbSyncJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.placement.enabled -}}\n{{- $_ := set $dbSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "placement/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nCopyright 2019 Intel Corporation.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.repo_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\n{{- end }}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"placement\" -}}\n{{- $_ := set $imageRepoSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.repo_sync\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.placement.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "placement/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nCopyright 2019 Intel Corporation.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_endpoints\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"placement\" \"serviceTypes\" ( tuple \"placement\" ) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.placement.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_endpoints\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.placement.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "placement/templates/job-ks-service.yaml",
    "content": "{{/*\nCopyright 2019 Intel Corporation.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_service\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-2\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"placement\" \"serviceTypes\" ( tuple \"placement\" ) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.placement.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_service\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.placement.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "placement/templates/job-ks-user.yaml",
    "content": "{{/*\nCopyright 2019 Intel Corporation.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"placement\" -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksUserJob \"tlsSecret\" .Values.secrets.tls.placement.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksUserJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.placement.enabled -}}\n{{- $_ := set $ksUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "placement/templates/network_policy.yaml",
    "content": "{{/*\nCopyright 2019 Intel Corporation.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"placement\" }}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "placement/templates/pdb.yaml",
    "content": "{{/*\nCopyright 2019 Intel Corporation.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: placement-api\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.api.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"placement\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "placement/templates/secret-db.yaml",
    "content": "{{/*\nCopyright 2019 Intel Corporation.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"placement\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n{{- $connection :=  tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- if $envAll.Values.manifests.certificates }}\n  DB_CONNECTION: {{ (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | b64enc -}}\n{{- else }}\n  DB_CONNECTION: {{  $connection | b64enc -}}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "placement/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nCopyright 2019 Intel Corporation.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{ include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"placement\" ) }}\n{{- end }}\n"
  },
  {
    "path": "placement/templates/secret-keystone.yaml",
    "content": "{{/*\nCopyright 2019 Intel Corporation.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"placement\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "placement/templates/secret-ks-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $envAll := . -}}\n{{/* the endpoints.identity.auth sections with the oslo conf sections they get rendered to */}}\n{{- $ksUsers := dict\n  \"placement\" \"keystone_authtoken\"\n-}}\n{{ dict\n  \"envAll\" $envAll\n  \"serviceName\" \"placement\"\n  \"serviceUserSections\" $ksUsers\n  | include \"helm-toolkit.manifests.secret_ks_etc\"\n}}\n{{- end }}\n"
  },
  {
    "path": "placement/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "placement/templates/service-ingress.yaml",
    "content": "{{/*\nCopyright 2019 Intel Corporation.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress .Values.network.api.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"placement\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "placement/templates/service.yaml",
    "content": "{{/*\nCopyright 2019 Intel Corporation.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"placement\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: p-api\n    port: {{ tuple \"placement\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.api.node_port.enabled }}\n    nodePort: {{ .Values.network.api.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"placement\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.api.node_port.enabled }}\n  type: NodePort\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "placement/values.yaml",
    "content": "# Copyright 2019 Intel Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for openstack-placement.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nrelease_group: null\n\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  pull_policy: IfNotPresent\n  tags:\n    placement: quay.io/airshipit/placement:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    placement_db_sync: quay.io/airshipit/placement:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: docker.io/docker:17.07.0\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nnetwork:\n  api:\n    port: 8778\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30778\n\nconf:\n  policy: {}\n  placement:\n    DEFAULT:\n      debug: false\n      use_syslog: false\n      log_config_append: /etc/placement/logging.conf\n    placement_database:\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    keystone_authtoken:\n      service_token_roles: service\n      service_token_roles_required: true\n      auth_version: v3\n      auth_type: password\n      memcache_security_strategy: ENCRYPT\n      service_type: placement\n    oslo_concurrency:\n      lock_path: /var/lock\n  logging:\n    loggers:\n      keys:\n        - root\n        - placement\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: WARNING\n      handlers: 'null'\n    logger_placement:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: placement\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    formatter_default:\n      format: \"%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n  placement_api_uwsgi:\n    uwsgi:\n      processes: 1\n      add-header: \"Connection: close\"\n      buffer-size: 65535\n      die-on-term: true\n      enable-threads: true\n      exit-on-reload: false\n      hook-master-start: unix_signal:15 gracefully_kill_them_all\n      lazy-apps: true\n      log-x-forwarded-for: true\n      master: true\n      procname-prefix-spaced: \"placement-api:\"\n      route-user-agent: '^kube-probe.* donotlog:'\n      thunder-lock: true\n      worker-reload-mercy: 80\n      wsgi-file: /var/lib/openstack/bin/placement-api\n      stats: 0.0.0.0:1717\n      stats-http: true\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      placement:\n        username: placement\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n        secret:\n          tls:\n            internal: mariadb-tls-direct\n      placement:\n        username: placement\n        password: password\n      # NOTE: This should be the username/password used to access the nova_api\n      # database. This is required only if database migration from nova to\n      # placement is desired.\n      nova_api:\n        username: nova\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /placement\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_cache:\n    auth:\n      # NOTE(portdirect): this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      placement:\n        role: admin\n        region_name: RegionOne\n        username: placement\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  placement:\n    name: placement\n    hosts:\n      default: placement-api\n      public: placement\n    host_fqdn_override:\n      default: null\n    path:\n      default: /\n    scheme:\n      default: 'http'\n      service: 'http'\n    port:\n      api:\n        default: 8778\n        public: 80\n        service: 8778\n\npod:\n  security_context:\n    placement:\n      pod:\n        runAsUser: 42424\n      container:\n        placement_api:\n          readOnlyRootFilesystem: false\n          runAsUser: 0\n        placement_mysql_migration:\n          readOnlyRootFilesystem: false\n          runAsUser: 0\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n  tolerations:\n    placement:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n        effect: NoSchedule\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n        effect: NoSchedule\n  mounts:\n    placement:\n      init_container: null\n      placement:\n        volumeMounts:\n        volumes:\n    placement_db_sync:\n      init_container: null\n      placement_db_sync:\n        volumeMounts:\n        volumes:\n  # -- This allows users to add Kubernetes Projected Volumes to be mounted at /etc/placement/placement.conf.d/\n  ## This is a list of projected volume source objects for each deployment/statefulset/daemonset/cronjob\n  ## https://kubernetes.io/docs/concepts/storage/projected-volumes/\n  etcSources:\n    placement: []\n    placement_db_sync: []\n  replicas:\n    api: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    disruption_budget:\n      api:\n        min_available: 0\n    termination_grace_period:\n      api:\n        timeout: 30\n  resources:\n    enabled: false\n    api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n  probes:\n    placement:\n      api:\n        liveness:\n          port: 1717\n\nsecrets:\n  identity:\n    admin: placement-keystone-admin\n    placement: placement-keystone-user\n  oslo_db:\n    admin: placement-db-admin\n    placement: placement-db-user\n  tls:\n    placement:\n      api:\n        public: placement-tls-public\n        internal: placement-tls-api\n  oci_image_registry:\n    placement: placement-oci-image-registry\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    api:\n      jobs:\n        - placement-db-sync\n        - placement-ks-service\n        - placement-ks-user\n        - placement-ks-endpoints\n    ks_endpoints:\n      jobs:\n        - placement-ks-user\n        - placement-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    db_drop:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - placement-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n\ntls:\n  identity: false\n  oslo_messaging: false\n  oslo_db: false\n\nmanifests:\n  certificates: false\n  configmap_bin: true\n  configmap_etc: true\n  deployment: true\n  job_image_repo_sync: true\n  job_db_init: true\n  job_db_sync: true\n  job_db_drop: false\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  network_policy: false\n  secret_db: true\n  secret_ingress_tls: true\n  secret_registry: true\n  pdb: true\n  ingress: true\n  secret_keystone: true\n  secret_ks_etc: true\n  service_ingress: true\n  service: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "playbooks/build-chart.yaml",
    "content": "---\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n- hosts: all\n  roles:\n    - ensure-helm\n    - ensure-chart-testing\n\n  tasks:\n    - name: Install reno\n      pip:\n        name: reno>=4.1.0\n        virtualenv: \"{{ virtualenv }}\"\n        virtualenv_command: python3 -m venv\n\n    - name: Get list of changed charts\n      shell: \"ct list-changed --target-branch master --since {{ zuul.oldrev | default('HEAD~1') }} --chart-dirs . 2>/dev/null\"\n      args:\n        chdir: \"{{ zuul.project.src_dir }}\"\n      register: changed_charts_output\n      changed_when: false\n\n    - name: Parse changed charts\n      set_fact:\n        changed_charts: \"{{ changed_charts_output.stdout_lines }}\"\n\n    - name: Display changed charts\n      debug:\n        msg: \"Changed charts: {{ changed_charts }}\"\n\n    - name: Build each changed chart\n      make:\n        chdir: \"{{ zuul.project.src_dir }}\"\n        target: \"{{ item }}\"\n        params:\n          PYTHON: \"{{ virtualenv }}/bin/python\"\n          BASE_VERSION: \"{{ base_version }}\"\n      loop: \"{{ changed_charts }}\"\n      when: changed_charts | length > 0\n\n    - name: Move chart packages to subdirectories\n      shell: |\n        mkdir -p {{ zuul.project.src_dir }}/{{ item }}\n        mv {{ zuul.project.src_dir }}/{{ item }}-*.tgz {{ zuul.project.src_dir }}/{{ item }}/\n      loop: \"{{ changed_charts }}\"\n      when: changed_charts | length > 0\n...\n"
  },
  {
    "path": "playbooks/collect-logs.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- hosts: all\n  vars:\n    work_dir: \"{{ zuul.project.src_dir }}\"\n    logs_dir: \"/tmp/logs\"\n  roles:\n    - gather-host-logs\n\n- hosts: primary\n  vars:\n    work_dir: \"{{ zuul.project.src_dir }}\"\n    logs_dir: \"/tmp/logs\"\n  roles:\n    - helm-release-status\n    - describe-kubernetes-objects\n    - gather-pod-logs\n    - gather-prom-metrics\n    - gather-selenium-data\n...\n"
  },
  {
    "path": "playbooks/deploy-env-kubespray.yaml",
    "content": "---\n- hosts: all\n  become: true\n  gather_facts: true\n  tasks:\n    - name: Install prerequisites\n      include_role:\n        name: deploy-env\n        tasks_from: prerequisites\n\n    - name: Create loopback devices\n      include_role:\n        name: deploy-env\n        tasks_from: loopback_devices\n      when:\n        - loopback_setup\n        - inventory_hostname in (groups['k8s_cluster'] | default([]))\n\n- hosts: primary\n  become: false\n  gather_facts: true\n  vars:\n    home_dir: /home/zuul\n    ansible_user: zuul\n  tasks:\n    - name: Clone Kubespray repo\n      shell: |\n        set -x\n        git clone https://github.com/kubernetes-sigs/kubespray.git\n        cd kubespray\n        git checkout -b release-2.25\n        git reset --hard v2.25.0\n      args:\n        chdir: \"{{ home_dir }}\"\n\n    - name: Install Kubespray Python dependencies\n      become: true\n      pip:\n        chdir: \"{{ home_dir }}/kubespray\"\n        requirements: requirements.txt\n\n    - name: Prepare Kubespray inventory (not Zuul job inventory)\n      shell: |\n        #!/bin/bash\n        set -x\n        python3 contrib/inventory_builder/inventory.py {{ groups['k8s_cluster'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | join(' ') }}\n      args:\n        chdir: \"{{ home_dir }}/kubespray\"\n      environment:\n        KUBE_MASTERS: \"1\"\n\n    - name: Prepare Kubespray variables\n      shell: |\n        #!/bin/bash\n        set -x\n        tee inventory/sample/group_vars/all/xxx.yaml <<EOF\n        ansible_user: {{ ansible_user }}\n        ansible_ssh_private_key_file: {{ home_dir }}/.ssh/id_rsa\n        ansible_ssh_extra_args: -o StrictHostKeyChecking=no\n        upstream_dns_servers:\n          - 8.8.8.8\n        override_system_hostname: false\n        EOF\n\n        tee inventory/sample/group_vars/k8s_cluster/xxx.yaml << EOF\n        kube_version: \"{{ kube_version_kubespray }}\"\n        kube_network_plugin: flannel\n        kube_service_addresses: \"{{ kubeadm.service_cidr }}\"\n        kube_pods_subnet: \"{{ kubeadm.pod_network_cidr }}\"\n\n        kubeconfig_localhost: true\n        kubeconfig_localhost_ansible_host: true\n\n        enable_nodelocaldns: false\n        resolvconf_mode: none\n\n        kube_override_hostname: >-\n          {% raw %}{{ ansible_hostname }}{% endraw %}\n\n        EOF\n      args:\n        chdir: \"{{ home_dir }}/kubespray\"\n\n    - name: Deploy Kubernetes\n      shell: |\n        #!/bin/bash\n        set -x\n        ansible-playbook -i inventory/sample/hosts.yaml --become --become-user=root cluster.yml\n      args:\n        chdir: /home/zuul/kubespray\n\n    - name: Copy kubectl config to localhost (will be used in the following tasks)\n      synchronize:\n        mode: pull\n        src: /home/zuul/kubespray/inventory/sample/artifacts/admin.conf\n        dest: /tmp/kube_config\n\n- hosts: primary\n  become: true\n  gather_facts: true\n  tasks:\n    - name: Install Docker\n      include_role:\n        name: deploy-env\n        tasks_from: containerd\n\n    - name: Install and configure Kubectl and Helm\n      include_role:\n        name: deploy-env\n        tasks_from: k8s_client\n\n    - name: Deploy Metallb on K8s\n      include_role:\n        name: deploy-env\n        tasks_from: metallb\n\n    - name: Create Openstack Metallb endpoint\n      include_role:\n        name: deploy-env\n        tasks_from: openstack_metallb_endpoint\n\n- hosts: all\n  become: true\n  gather_facts: true\n  tasks:\n    - name: Create client-to-cluster wireguard tunnel\n      include_role:\n        name: deploy-env\n        tasks_from: client_cluster_tunnel\n\n    - name: Install Docker\n      include_role:\n        name: deploy-env\n        tasks_from: containerd\n      when:\n        - openstack_provider_gateway_setup\n        - inventory_hostname in (groups['k8s_control_plane'] | default([]))\n\n    - name: Deploy Openstack provider gateway\n      include_role:\n        name: deploy-env\n        tasks_from: openstack_provider_gateway\n      when:\n        - openstack_provider_gateway_setup\n        - inventory_hostname in (groups['k8s_control_plane'] | default([]))\n...\n"
  },
  {
    "path": "playbooks/deploy-env.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n---\n- hosts: all\n  strategy: linear\n  become: true\n  gather_facts: true\n  roles:\n    - ensure-python\n    - ensure-pip\n    - clear-firewall\n    - deploy-apparmor\n    - deploy-selenium\n    - deploy-env\n...\n"
  },
  {
    "path": "playbooks/enable-hugepages.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- hosts: all\n  gather_facts: True\n  become: yes\n  roles:\n    - role: enable-hugepages\n      when: hugepages.enabled|default(false)|bool == true\n...\n"
  },
  {
    "path": "playbooks/inject-keys.yaml",
    "content": "---\n- hosts: all\n  tasks:\n    - name: Put keys to .ssh/authorized_keys\n      lineinfile:\n        path: /home/zuul/.ssh/authorized_keys\n        state: present\n        line: \"{{ item }}\"\n      loop:\n        - \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN9wbA25JdmVAKqiO78/1P97r4ctR1tH3MLelByCj8wC vlad@russell\"\n...\n"
  },
  {
    "path": "playbooks/lint.yaml",
    "content": "---\n# Copyright 2018 SUSE LINUX GmbH.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n- hosts: all\n  roles:\n    - ensure-python\n    - ensure-pip\n    - ensure-helm\n    - ensure-chart-testing\n    - name: chart-testing\n      chart_testing_options: \"--target-branch=master --chart-dirs=. --validate-maintainers=false --check-version-increment=false\"\n      zuul_work_dir: \"{{ work_dir }}\"\n  vars:\n    work_dir: \"{{ zuul.project.src_dir }}\"\n\n  tasks:\n    - name: Install yamllint\n      pip:\n        name:\n          - yq\n          - yamllint\n        virtualenv: \"{{ virtualenv }}\"\n        virtualenv_command: python3 -m venv\n\n    - name: Run yamllint\n      shell: |\n        cat > /tmp/yamllint.sh <<EOF\n        #!/bin/bash\n        set -xe\n        source \"{{ virtualenv }}/bin/activate\"\n        pip freeze\n        rm -rf */charts/helm-toolkit\n        mkdir .yamllint\n        cp -r * .yamllint\n        rm -rf .yamllint/roles\n        rm -rf .yamllint/*/templates\n\n        for i in */; do\n            # avoid helm-toolkit to symlink on itself\n            [ -d \"\\$i/templates\" -a \"\\$i\" != \"helm-toolkit/\" ] || continue\n            mkdir -p \\$i/charts\n            ln -s ../../helm-toolkit \\$i/charts/helm-toolkit\n            helm template \\$i --output-dir .yamllint 2>&1 > /dev/null\n        done\n        find .yamllint -type f -exec sed -i 's/%%%.*/XXX/g' {} +\n\n        shopt -s globstar extglob\n        # Lint all yaml files except templates\n        yamllint -c yamllint.conf .yamllint/*{,/!(templates)/**}/*.y*ml yamllint*.conf\n        # Lint templates\n        yamllint -c yamllint-templates.conf .yamllint/*/templates/*.yaml\n        EOF\n        chmod +x /tmp/yamllint.sh\n        /tmp/yamllint.sh\n      args:\n        chdir: \"{{ work_dir }}\"\n        executable: /bin/bash\n...\n"
  },
  {
    "path": "playbooks/mount-volumes.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- hosts: all\n  roles:\n    - mount-extra-volume\n...\n"
  },
  {
    "path": "playbooks/osh-bandit.yaml",
    "content": "---\n- hosts: primary\n  roles:\n    - ensure-python\n    - ensure-pip\n    - osh-bandit\n...\n"
  },
  {
    "path": "playbooks/prepare-hosts.yaml",
    "content": "---\n- hosts: all\n  roles:\n    - ensure-python\n    - ensure-pip\n    - clear-firewall\n...\n"
  },
  {
    "path": "playbooks/publish/post.yaml",
    "content": "---\n# Copyright 2020 VEXXHOST, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n- hosts: all\n  tasks:\n    - name: Get list of changed charts\n      shell: \"ct list-changed --target-branch master --since {{ zuul.oldrev | default('HEAD~1') }} --chart-dirs . 2>/dev/null\"\n      args:\n        chdir: \"{{ zuul.project.src_dir }}\"\n      register: changed_charts_output\n      changed_when: false\n\n    - name: Parse changed charts\n      set_fact:\n        changed_charts: \"{{ changed_charts_output.stdout_lines }}\"\n\n    - name: Display changed charts\n      debug:\n        msg: \"Changed charts to publish: {{ changed_charts }}\"\n\n    - name: Download current index\n      register: _get_url\n      failed_when: _get_url.status_code not in (200, 404)\n      get_url:\n        url: \"https://tarballs.opendev.org/{{ zuul.project.name }}/index.yaml\"\n        dest: \"{{ zuul.project.src_dir }}/index.yaml\"\n\n    - name: Create a new index\n      when: _get_url.status_code == 404\n      shell: helm repo index {{ zuul.project.src_dir }} --url https://tarballs.opendev.org/{{ zuul.project.name }}\n\n    - name: Merge into existing index\n      when: _get_url.status_code == 200\n      shell: helm repo index {{ zuul.project.src_dir }} --merge {{ zuul.project.src_dir }}/index.yaml --url https://tarballs.opendev.org/{{ zuul.project.name }}\n\n    - name: Cat updated index\n      shell: cat {{ zuul.project.src_dir }}/index.yaml\n      register: index_content\n      changed_when: false\n\n    - name: Display updated index\n      debug:\n        msg: \"{{ index_content.stdout }}\"\n\n    - name: Ensure artifact directory exists\n      file:\n        path: \"{{ zuul.executor.work_root }}/artifacts/\"\n        state: directory\n      delegate_to: localhost\n\n    - name: Ensure chart subdirectories exist in artifacts\n      file:\n        path: \"{{ zuul.executor.work_root }}/artifacts/{{ item }}\"\n        state: directory\n      delegate_to: localhost\n      loop: \"{{ changed_charts }}\"\n      when: changed_charts | length > 0\n\n    - name: Gather packaged charts from changed chart directories\n      find:\n        file_type: file\n        paths: \"{{ zuul.project.src_dir }}\"\n        patterns: \"{{ item }}-*.tgz\"\n        recurse: true\n      register: chart_packages\n      loop: \"{{ changed_charts }}\"\n      when: changed_charts | length > 0\n\n    - name: Display chart tarballs to be published\n      debug:\n        msg: \"src: {{ item.1.path }} dest: {{ zuul.executor.work_root }}/artifacts/{{ item.0.item }}/\"\n      loop: \"{{ chart_packages.results | subelements('files', skip_missing=True) }}\"\n      when: changed_charts | length > 0\n\n    - name: Copy packaged charts to artifacts preserving directory structure\n      synchronize:\n        mode: pull\n        src: \"{{ item.1.path }}\"\n        dest: \"{{ zuul.executor.work_root }}/artifacts/{{ item.0.item }}/\"\n        verify_host: true\n        owner: no\n        group: no\n      loop: \"{{ chart_packages.results | subelements('files', skip_missing=True) }}\"\n      when: changed_charts | length > 0\n\n    - name: Copy index.yaml to artifacts\n      synchronize:\n        mode: pull\n        src: \"{{ zuul.project.src_dir }}/index.yaml\"\n        dest: \"{{ zuul.executor.work_root }}/artifacts/\"\n        verify_host: true\n        owner: no\n        group: no\n...\n"
  },
  {
    "path": "playbooks/publish/run.yaml",
    "content": "---\n# Copyright 2020 VEXXHOST, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n- hosts: all\n  roles:\n    - name: build-helm-packages\n      work_dir: \"{{ zuul.project.src_dir }}\"\n...\n"
  },
  {
    "path": "playbooks/run-scripts.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- hosts: all\n  become: true\n  tasks:\n    - name: Configure /etc/hosts for buildset_registry to workaround docker not understanding ipv6 addresses\n      lineinfile:\n        path: /etc/hosts\n        state: present\n        regex: \"^{{ buildset_registry.host }}\\tzuul-jobs.buildset-registry$\"\n        line: \"{{ buildset_registry.host }}\\tzuul-jobs.buildset-registry\"\n        insertafter: EOF\n      when:\n        - buildset_registry is defined\n        - buildset_registry.host | ipaddr\n\n- hosts: primary\n  tasks:\n    - name: Override images\n      when: buildset_registry is defined\n      vars:\n        work_dir: \"{{ zuul.project.src_dir }}\"\n      block:\n        - name: Set buildset_registry alias variable when using ip\n          set_fact:\n            buildset_registry_alias: zuul-jobs.buildset-registry\n          when:\n            - buildset_registry.host | ipaddr\n\n        - name: Set buildset_registry alias variable when using name\n          set_fact:\n            buildset_registry_alias: \"{{ buildset_registry.host }}\"\n          when:\n            - not ( buildset_registry.host | ipaddr )\n\n        - name: Print zuul\n          debug:\n            var: zuul\n\n        - name: Override proposed images from artifacts\n          shell: >\n            set -ex;\n            find {{ override_paths | join(\" \") }} -type f -exec sed -Ei\n            \"s#['\\\"]?(docker|quay)\\.io/(openstackhelm|loci|airshipit)/({{ repo }}):({{ tag }})['\\\"]?\\$#{{ buildset_registry_alias }}:{{ buildset_registry.port }}/{{ repo_org }}/{{ repo }}:\\4#g\" {} +\n          loop: \"{{ zuul.artifacts | default([]) }}\"\n          args:\n            chdir: \"{{ work_dir }}\"\n          loop_control:\n            loop_var: zj_zuul_artifact\n          when: \"'metadata' in zj_zuul_artifact and zj_zuul_artifact.metadata.type | default('') == 'container_image'\"\n          vars:\n            tag: \"{{ zj_zuul_artifact.metadata.tag }}\"\n            repo_org: \"{{ zj_zuul_artifact.metadata.repository | dirname }}\"\n            repo: \"{{ zj_zuul_artifact.metadata.repository | basename }}\"\n            override_paths:\n              - ../openstack-helm/values_overrides\n              - ../openstack-helm/*/values*\n              - ../openstack-helm/tools/deployment/\n\n        - name: Diff\n          shell: |\n              set -ex\n              cd \"{{ work_dir }}/../openstack-helm\"\n              git diff\n\n    - name: \"creating directory for run artifacts\"\n      file:\n        path: \"/tmp/artifacts\"\n        state: directory\n\n    # NOTE: After switching to Ansible 2.11 then xtrace stopped working\n    #       for shell tasks in included roles. So instead of using\n    #       osh-run-script-* roles we directly run the scripts in the\n    #       playbook.\n    - name: \"Run script {{ item }}\"\n      shell: |\n        set -xe\n        env\n        {{ item }}\n      args:\n        chdir: \"{{ zuul.project.src_dir }}/{{ gate_scripts_relative_path }}\"\n        executable: /bin/bash\n      environment:\n        CEPH_OSD_DATA_DEVICE: \"{{ ceph_osd_data_device }}\"\n        POD_NETWORK_CIDR: \"{{ kubeadm.pod_network_cidr }}\"\n        zuul_site_mirror_fqdn: \"{{ zuul_site_mirror_fqdn }}\"\n        OSH_EXTRA_HELM_ARGS: \"{{ zuul_osh_extra_helm_args | default('') }}\"\n        OSH_HELM_REPO: \"{{ osh_helm_repo | default('../openstack-helm') }}\"\n        DOWNLOAD_OVERRIDES: \"{{ download_overrides | default('') }}\"\n        OSH_PATH: \"{{ zuul_osh_relative_path | default('../openstack-helm/') }}\"\n        OSH_VALUES_OVERRIDES_PATH: \"{{ osh_values_overrides_path }}\"\n        OPENSTACK_RELEASE: \"{{ osh_params.openstack_release | default('') }}\"\n        CONTAINER_DISTRO_NAME: \"{{ osh_params.container_distro_name | default('') }}\"\n        CONTAINER_DISTRO_VERSION: \"{{ osh_params.container_distro_version | default('') }}\"\n        FEATURES: \"{{ osh_params.feature_gates | default('') | regex_replace(',', ' ')  }} {{ osh_params.openstack_release | default('') }} {{ osh_params.container_distro_name | default('') }}_{{ osh_params.container_distro_version | default('') }} {{ osh_params.container_distro_name | default('') }}\"\n        RUN_HELM_TESTS: \"{{ run_helm_tests | default('yes') }}\"\n      loop: \"{{ gate_scripts }}\"\n\n    - name: \"Downloads artifacts to executor\"\n      synchronize:\n        src: \"/tmp/artifacts\"\n        dest: \"{{ zuul.executor.log_root }}/{{ inventory_hostname }}\"\n        mode: pull\n      ignore_errors: True\n...\n"
  },
  {
    "path": "postgresql/.helmignore",
    "content": "# Patterns to ignore when building packages.\n# This supports shell glob matching, relative path matching, and\n# negation (prefixed with !). Only one pattern per line.\n.DS_Store\n# Common VCS dirs\n.git/\n.gitignore\n.bzr/\n.bzrignore\n.hg/\n.hgignore\n.svn/\n# Common backup files\n*.swp\n*.bak\n*.tmp\n*~\n# Various IDEs\n.project\n.idea/\n*.tmproj\n"
  },
  {
    "path": "postgresql/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v14.5\ndescription: OpenStack-Helm PostgreSQL\nname: postgresql\nversion: 2025.2.0\nhome: https://www.postgresql.org\nsources:\n  - https://github.com/postgres/postgres\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "postgresql/templates/bin/_backup_postgresql.sh.tpl",
    "content": "#!/bin/bash\n\nSCOPE=${1:-\"all\"}\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n# This is needed to get the postgresql admin password\n# Turn off tracing so the password doesn't get printed.\nset +x\nexport PGPASSWORD=$(cat /etc/postgresql/admin_user.conf \\\n                    | grep postgres | awk -F: '{print $5}')\n\n# Note: not using set -e in this script because more elaborate error handling\n# is needed.\n\nsource /tmp/backup_main.sh\n\n# Export the variables required by the framework\n#  Note: REMOTE_BACKUP_ENABLED and CONTAINER_NAME are already exported\nexport DB_NAMESPACE=${POSTGRESQL_POD_NAMESPACE}\nexport DB_NAME=\"postgres\"\nexport LOCAL_DAYS_TO_KEEP=$POSTGRESQL_LOCAL_BACKUP_DAYS_TO_KEEP\nexport REMOTE_DAYS_TO_KEEP=$POSTGRESQL_REMOTE_BACKUP_DAYS_TO_KEEP\nexport REMOTE_BACKUP_RETRIES=${NUMBER_OF_RETRIES_SEND_BACKUP_TO_REMOTE}\nexport MIN_DELAY_SEND_REMOTE=${MIN_DELAY_SEND_BACKUP_TO_REMOTE}\nexport MAX_DELAY_SEND_REMOTE=${MAX_DELAY_SEND_BACKUP_TO_REMOTE}\nexport ARCHIVE_DIR=${POSTGRESQL_BACKUP_BASE_DIR}/db/${DB_NAMESPACE}/${DB_NAME}/archive\n\n# This function dumps all database files to the $TMP_DIR that is being\n# used as a staging area for preparing the backup tarball. Log file to\n# write to is passed in - the framework will expect that file to have any\n# errors that occur if the database dump is unsuccessful, so that it can\n# add the file contents to its own logs.\ndump_databases_to_directory() {\n  TMP_DIR=$1\n  LOG_FILE=$2\n  SCOPE=${3:-\"all\"}\n\n  PG_DUMP_OPTIONS=$(echo $POSTGRESQL_BACKUP_PG_DUMPALL_OPTIONS | sed 's/\"//g')\n  PG_DUMP=\"pg_dump \\\n             $PG_DUMP_OPTIONS --create \\\n             -U $POSTGRESQL_ADMIN_USER \\\n             -h $POSTGRESQL_SERVICE_HOST\"\n  PG_DUMPALL=\"pg_dumpall \\\n                $PG_DUMP_OPTIONS \\\n                -U $POSTGRESQL_ADMIN_USER \\\n                -h $POSTGRESQL_SERVICE_HOST\"\n\n  SQL_FILE=postgres.${POSTGRESQL_POD_NAMESPACE}.${SCOPE}\n\n  cd $TMP_DIR\n\n  if [[ \"${SCOPE}\" == \"all\" ]]; then\n    # Dump all databases\n    ${PG_DUMPALL} --file=${TMP_DIR}/${SQL_FILE}.sql 2>>${LOG_FILE}\n  else\n    if [[ \"${SCOPE}\" != \"postgres\" && \"${SCOPE}\" != \"template0\" && \"${SCOPE}\" != \"template1\" ]]; then\n      # Dump the specified database\n      ${PG_DUMP} --file=${TMP_DIR}/${SQL_FILE}.sql ${SCOPE} 2>>${LOG_FILE}\n    else\n      log ERROR \"It is not allowed to backup up the ${SCOPE} database.\"\n      return 1\n    fi\n  fi\n\n  if [[ $? -eq 0 && -s \"${TMP_DIR}/${SQL_FILE}.sql\" ]]; then\n    log INFO postgresql_backup \"Database(s) dumped successfully. (SCOPE = ${SCOPE})\"\n    return 0\n  else\n    log ERROR \"Backup of the postgresql database(s) failed and needs attention. (SCOPE = ${SCOPE})\"\n    return 1\n  fi\n}\n\n# Verify all the databases backup archives\nverify_databases_backup_archives() {\n  ####################################\n  # TODO: add implementation of local backup verification\n  ####################################\n  return 0\n}\n\n# Call main program to start the database backup\nbackup_databases ${SCOPE}\n"
  },
  {
    "path": "postgresql/templates/bin/_common_backup_restore.sh.tpl",
    "content": "#!/bin/bash\n\n# Copyright 2018 The Openstack-Helm Authors.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n# Do not use set -x here because the manual backup or restore pods may be using\n# these functions, and it will distort the command output to have tracing on.\n\nlog_backup_error_exit() {\n  MSG=$1\n  ERRCODE=$2\n  log ERROR postgresql_backup \"${MSG}\"\n  exit $ERRCODE\n}\n\nlog() {\n  #Log message to a file or stdout\n  #TODO: This can be convert into mail alert of alert send to a monitoring system\n  #Params: $1 log level\n  #Params: $2 service\n  #Params: $3 message\n  #Params: $4 Destination\n  LEVEL=$1\n  SERVICE=$2\n  MSG=$3\n  DEST=$4\n  DATE=$(date +\"%m-%d-%y %H:%M:%S\")\n  if [ -z \"$DEST\" ]\n  then\n    echo \"${DATE} ${LEVEL}: $(hostname) ${SERVICE}: ${MSG}\"\n  else\n    echo \"${DATE} ${LEVEL}: $(hostname) ${SERVICE}: ${MSG}\" >>$DEST\n  fi\n}\n\n#Get the day delta since the archive file backup\nseconds_difference() {\n  archive_date=$( date --date=\"$1\" +%s )\n  if [ \"$?\" -ne 0 ]\n  then\n    second_delta=0\n  fi\n  current_date=$( date +%s )\n  second_delta=$(($current_date-$archive_date))\n  if [ \"$second_delta\" -lt 0 ]\n  then\n    second_delta=0\n  fi\n  echo $second_delta\n}\n\n# Wait for a file to be available on the file system (written by the other\n# container).\nwait_for_file() {\n  WAIT_FILE=$1\n  NO_TIMEOUT=${2:-false}\n  TIMEOUT=300\n  if [[ $NO_TIMEOUT == \"true\" ]]\n  then\n    # Such a large value to virtually never timeout\n    TIMEOUT=999999999\n  fi\n  TIMEOUT_EXP=$(( $(date +%s) + $TIMEOUT ))\n  DONE=false\n  while [[ $DONE == \"false\" ]]\n  do\n    DELTA=$(( TIMEOUT_EXP - $(date +%s) ))\n    if [[ \"$(ls -l ${WAIT_FILE} 2>/dev/null | wc -l)\" -gt 0 ]];\n    then\n      DONE=true\n    elif [[ $DELTA -lt 0 ]]\n    then\n      DONE=true\n      echo \"Timed out waiting for file ${WAIT_FILE}.\"\n      return 1\n    else\n      echo \"Still waiting ...will time out in ${DELTA} seconds...\"\n      sleep 5\n    fi\n  done\n  return 0\n}\n\n"
  },
  {
    "path": "postgresql/templates/bin/_db_test.sh.tpl",
    "content": "#!/bin/bash\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\ntrap cleanup EXIT SIGTERM SIGINT SIGKILL\n\nTEST_DATABASE_NAME=\"pg_helmtest_db\"\nTEST_DATABASE_USER=\"pg_helmtest_user\"\nTEST_DATABASE_PASSWORD=$RANDOM\nTEST_TABLE_NAME=\"pg_helmtest\"\n\nfunction psql_cmd {\n  DATABASE=$1\n  DB_USER=$2\n  export PGPASSWORD=$3\n  DB_COMMAND=$4\n  EXIT_ON_FAIL=${5:-1}\n\n  psql \\\n  -h $DB_FQDN \\\n  -p $DB_PORT \\\n  -U $DB_USER \\\n  -d $DATABASE \\\n  -v \"ON_ERROR_STOP=1\" \\\n  --command=\"${DB_COMMAND}\"\n\n  RC=$?\n\n  if [[ $RC -ne 0 ]]\n  then\n    echo 'FAIL!'\n    if [[ $EXIT_ON_FAIL -eq 1 ]]\n    then\n      exit $RC\n    fi\n  fi\n\n  return 0\n}\n\nfunction cleanup {\n  echo 'Cleaning up the database...'\n  psql_cmd \"postgres\" ${DB_ADMIN_USER} ${ADMIN_PASSWORD} \"DROP DATABASE IF EXISTS ${TEST_DATABASE_NAME};\" 0\n  psql_cmd \"postgres\" ${DB_ADMIN_USER} ${ADMIN_PASSWORD} \"DROP ROLE IF EXISTS ${TEST_DATABASE_USER};\" 0\n  echo 'Cleanup Finished.'\n}\n\n# Create db\necho 'Testing database connectivity as admin user...'\npsql_cmd \"postgres\" ${DB_ADMIN_USER} ${ADMIN_PASSWORD} \"SELECT 1 FROM pg_database;\"\necho 'Connectivity Test SUCCESS!'\n\necho 'Testing creation of an application database...'\npsql_cmd \"postgres\" ${DB_ADMIN_USER} ${ADMIN_PASSWORD} \"CREATE DATABASE ${TEST_DATABASE_NAME};\"\necho 'Database Creation Test SUCCESS!'\n\necho 'Testing creation of an application user...'\npsql_cmd \"postgres\" ${DB_ADMIN_USER} ${ADMIN_PASSWORD} \"CREATE ROLE ${TEST_DATABASE_USER} LOGIN PASSWORD '${TEST_DATABASE_PASSWORD}';\"\npsql_cmd \"postgres\" ${DB_ADMIN_USER} ${ADMIN_PASSWORD} \"GRANT ALL PRIVILEGES ON DATABASE ${TEST_DATABASE_NAME} to ${TEST_DATABASE_USER};\"\necho 'User Creation SUCCESS!'\n\necho 'Testing creation of an application table...'\npsql_cmd ${TEST_DATABASE_NAME} ${TEST_DATABASE_USER} ${TEST_DATABASE_PASSWORD} \"CREATE TABLE ${TEST_TABLE_NAME} (name text);\"\necho 'Table Creation SUCCESS!'\n\necho 'Testing DML...'\npsql_cmd ${TEST_DATABASE_NAME} ${TEST_DATABASE_USER} ${TEST_DATABASE_PASSWORD} \"INSERT INTO ${TEST_TABLE_NAME} (name) VALUES ('test.');\"\npsql_cmd ${TEST_DATABASE_NAME} ${TEST_DATABASE_USER} ${TEST_DATABASE_PASSWORD} \"SELECT * FROM ${TEST_TABLE_NAME};\"\npsql_cmd ${TEST_DATABASE_NAME} ${TEST_DATABASE_USER} ${TEST_DATABASE_PASSWORD} \"DELETE FROM ${TEST_TABLE_NAME};\"\necho 'DML Test SUCCESS!'\n\nexit 0\n"
  },
  {
    "path": "postgresql/templates/bin/_postgresql_archive_cleanup.sh.tpl",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset +ex\n\n# ARCHIVE_LIMIT env variable is Threshold of archiving supposed to be kept in percentage\nclean_up () {\n  echo \"Cleanup required as Utilization is above threshold\"\n  # Get file count and delete half of the archive while maintaining the order of the files\n  FILE_COUNT=$(ls -1 ${ARCHIVE_PATH} | sort | wc -l)\n  COUNT=0\n  echo $((FILE_COUNT/2))\n  for file in $(ls -1 ${ARCHIVE_PATH} | sort); do\n    if [[ $COUNT -lt $((FILE_COUNT/2)) ]]; then\n      echo \"removing following file $file\"\n      rm -rf ${ARCHIVE_PATH}/$file\n    else\n      break\n    fi\n    COUNT=$((COUNT+1))\n  done\n}\n#infinite loop to check the utilization of archive\nwhile true\ndo\n  # checking the utilization of archive directory\n  UTILIZATION=$(df -h ${ARCHIVE_PATH} | awk ' NR==2 {print $5} ' | awk '{ print substr( $0, 1, length($0)-1 ) }')\n  if [[ $UTILIZATION -gt ${ARCHIVE_LIMIT} ]];\n  then\n    clean_up\n  fi\n  sleep 3600\ndone\n\n\n"
  },
  {
    "path": "postgresql/templates/bin/_readiness.sh.tpl",
    "content": "#!/usr/bin/env bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\npg_isready -U ${POSTGRES_USER}\n"
  },
  {
    "path": "postgresql/templates/bin/_remote_retrieve_postgresql.sh.tpl",
    "content": "#!/bin/bash\n\n# Copyright 2018 The Openstack-Helm Authors.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -x\n\nRESTORE_DIR=${POSTGRESQL_BACKUP_BASE_DIR}/db/${POSTGRESQL_POD_NAMESPACE}/postgres/restore\nARCHIVE_DIR=${POSTGRESQL_BACKUP_BASE_DIR}/db/${POSTGRESQL_POD_NAMESPACE}/postgres/archive\n\nsource /tmp/common_backup_restore.sh\n\n# Keep processing requests for the life of the pod.\nwhile true\ndo\n  # Wait until a restore request file is present on the disk\n  echo \"Waiting for a restore request...\"\n  NO_TIMEOUT=true\n  wait_for_file $RESTORE_DIR/*_request $NO_TIMEOUT\n\n  echo \"Done waiting. Request received\"\n\n  CONTAINER_NAME={{ .Values.conf.backup.remote_backup.container_name }}\n\n  if [[ -e $RESTORE_DIR/archive_listing_request ]]\n  then\n    # We've finished consuming the request, so delete the request file.\n    rm -rf $RESTORE_DIR/*_request\n\n    openstack container show $CONTAINER_NAME\n    if [[ $? -eq 0 ]]\n    then\n      # Get the list, ensureing that we only pick up postgres backups from the\n      # requested namespace\n      openstack object list $CONTAINER_NAME | grep postgres | grep $POSTGRESQL_POD_NAMESPACE | awk '{print $2}' > $RESTORE_DIR/archive_list_response\n      if [[ $? != 0 ]]\n      then\n        echo \"Container object listing could not be obtained.\" >> $RESTORE_DIR/archive_list_error\n      else\n        echo \"Archive listing successfully retrieved.\"\n      fi\n    else\n      echo \"Container $CONTAINER_NAME does not exist.\" >> $RESTORE_DIR/archive_list_error\n    fi\n  elif [[ -e $RESTORE_DIR/get_archive_request ]]\n  then\n    ARCHIVE=`cat $RESTORE_DIR/get_archive_request`\n\n    echo \"Request for archive $ARCHIVE received.\"\n\n    # We've finished consuming the request, so delete the request file.\n    rm -rf $RESTORE_DIR/*_request\n\n    openstack object save --file $RESTORE_DIR/$ARCHIVE $CONTAINER_NAME $ARCHIVE\n    if [[ $? != 0 ]]\n    then\n      echo \"Archive $ARCHIVE could not be retrieved.\" >> $RESTORE_DIR/archive_error\n    else\n      echo \"Archive $ARCHIVE successfully retrieved.\"\n    fi\n\n    # Signal to the other container that the archive is available.\n    touch $RESTORE_DIR/archive_response\n  else\n    rm -rf $RESTORE_DIR/*_request\n    echo \"Invalid request received.\"\n  fi\n\n  sleep 5\ndone\n"
  },
  {
    "path": "postgresql/templates/bin/_remote_store_postgresql.sh.tpl",
    "content": "#!/bin/bash\n\n# Copyright 2018 The Openstack-Helm Authors.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n# Note: not using set -e because more elaborate error handling is required.\nset -x\n\nBACKUPS_DIR=${POSTGRESQL_BACKUP_BASE_DIR}/db/${POSTGRESQL_POD_NAMESPACE}/postgres/current\n\n# Create the working backups directory if the other container didn't already,\n# and if this container creates it first, ensure that permissions are writable\n# for the other container (running as \"postgres\" user) in the same \"postgres\"\n# group.\nmkdir -p $BACKUPS_DIR || log_backup_error_exit \"Cannot create directory ${BACKUPS_DIR}!\" 1\nchmod 775 $BACKUPS_DIR\n\nsource /tmp/common_backup_restore.sh\n\n#Send backup file to storage\nsend_to_storage() {\n  FILEPATH=$1\n  FILE=$2\n\n  CONTAINER_NAME={{ .Values.conf.backup.remote_backup.container_name }}\n\n  # Grab the list of containers on the remote site\n  RESULT=$(openstack container list 2>&1)\n\n  if [[ $? == 0 ]]\n  then\n    echo $RESULT | grep $CONTAINER_NAME\n    if [[ $? != 0 ]]\n    then\n      # Create the container\n      openstack container create $CONTAINER_NAME || log ERROR postgresql_backup \"Cannot create container ${CONTAINER_NAME}!\"\n      openstack container show $CONTAINER_NAME\n      if [[ $? != 0 ]]\n      then\n        log ERROR postgresql_backup \"Error retrieving container $CONTAINER_NAME after creation.\"\n        return 1\n      fi\n    fi\n  else\n    echo $RESULT | grep \"HTTP 401\"\n    if [[ $? == 0 ]]\n    then\n      log ERROR postgresql_backup \"Could not access keystone: HTTP 401\"\n      return 1\n    else\n      echo $RESULT | grep \"ConnectionError\"\n      if [[ $? == 0 ]]\n      then\n        log ERROR postgresql_backup \"Could not access keystone: ConnectionError\"\n        # In this case, keystone or the site/node may be temporarily down.\n        # Return slightly different error code so the calling code can retry\n        return 2\n      else\n        log ERROR postgresql_backup \"Could not get container list: ${RESULT}\"\n        return 1\n      fi\n    fi\n  fi\n\n  # Create an object to store the file\n  openstack object create --name $FILE $CONTAINER_NAME $FILEPATH/$FILE || log ERROR postgresql_backup \"Cannot create container object ${FILE}!\"\n  openstack object show $CONTAINER_NAME $FILE\n  if [[ $? != 0 ]]\n  then\n    log ERROR postgresql_backup \"Error retrieving container object $FILE after creation.\"\n    return 1\n  fi\n\n  log INFO postgresql_backup \"Created file $FILE in container $CONTAINER_NAME successfully.\"\n  return 0\n}\n\nif {{ .Values.conf.backup.remote_backup.enabled }}\nthen\n  WAIT_FOR_BACKUP_TIMEOUT=1800\n  WAIT_FOR_RGW_AVAIL_TIMEOUT=1800\n\n  # Wait until a backup file is ready to ship to RGW, or until we time out.\n  DONE=false\n  TIMEOUT_EXP=$(( $(date +%s) + $WAIT_FOR_BACKUP_TIMEOUT ))\n  while [[ $DONE == \"false\" ]]\n  do\n    log INFO postgresql_backup \"Waiting for a backup file to be written to the disk.\"\n    sleep 5\n    DELTA=$(( TIMEOUT_EXP - $(date +%s) ))\n    ls -l ${BACKUPS_DIR}/backup_completed\n    if [[ $? -eq 0 ]]\n    then\n      DONE=true\n    elif [[ $DELTA -lt 0 ]]\n    then\n      DONE=true\n    fi\n  done\n\n  log INFO postgresql_backup \"Done waiting.\"\n  FILE_TO_SEND=$(ls $BACKUPS_DIR/*.tar.gz)\n\n  ERROR_SEEN=false\n\n  if [[ $FILE_TO_SEND != \"\" ]]\n  then\n    if [[ $(echo $FILE_TO_SEND | wc -w) -gt 1 ]]\n    then\n      # There should only be one backup file to send - this is an error\n      log_backup_error_exit \"More than one backup file found (${FILE_TO_SEND}) - can only handle 1!\" 1\n    fi\n\n    # Get just the filename from the file (strip the path)\n    FILE=$(basename $FILE_TO_SEND)\n\n    log INFO postgresql_backup \"Backup file ${BACKUPS_DIR}/${FILE} found.\"\n\n    DONE=false\n    TIMEOUT_EXP=$(( $(date +%s) + $WAIT_FOR_RGW_AVAIL_TIMEOUT ))\n    while [[ $DONE == \"false\" ]]\n    do\n      # Store the new archive to the remote backup storage facility.\n      send_to_storage $BACKUPS_DIR $FILE\n\n      # Check if successful\n      if [[ $? -eq 0 ]]\n      then\n        log INFO postgresql_backup \"Backup file ${BACKUPS_DIR}/${FILE} successfully sent to RGW. Deleting from current backup directory.\"\n        DONE=true\n      elif [[ $? -eq 2 ]]\n      then\n        # Temporary failure occurred. We need to retry if we haven't timed out\n        log WARN postgresql_backup \"Backup file ${BACKUPS_DIR}/${FILE} could not be sent to RGW due to connection issue.\"\n        DELTA=$(( TIMEOUT_EXP - $(date +%s) ))\n        if [[ $DELTA -lt 0 ]]\n        then\n          DONE=true\n          log ERROR postgresql_backup \"Timed out waiting for RGW to become available.\"\n          ERROR_SEEN=true\n        else\n          log INFO postgresql_backup \"Sleeping 30 seconds waiting for RGW to become available...\"\n          sleep 30\n          log INFO postgresql_backup \"Retrying...\"\n        fi\n      else\n        log ERROR postgresql_backup \"Backup file ${BACKUPS_DIR}/${FILE} could not be sent to the RGW.\"\n        ERROR_SEEN=true\n        DONE=true\n      fi\n    done\n  else\n    log ERROR postgresql_backup \"No backup file found in $BACKUPS_DIR.\"\n    ERROR_SEEN=true\n  fi\n\n  if [[ $ERROR_SEEN == \"true\" ]]\n  then\n    log ERROR postgresql_backup \"Errors encountered. Exiting.\"\n    exit 1\n  fi\n\n  # At this point, we should remove the files in current dir.\n  # If an error occurred, then we need the file to remain there for future\n  # container restarts, and maybe it will eventually succeed.\n  rm -rf $BACKUPS_DIR/*\n\n  #Only delete an old archive after a successful archive\n  if [ \"${POSTGRESQL_BACKUP_DAYS_TO_KEEP}\" -gt 0 ]\n  then\n    log INFO postgresql_backup \"Deleting backups older than ${POSTGRESQL_BACKUP_DAYS_TO_KEEP} days\"\n    BACKUP_FILES=/tmp/backup_files\n    PG_BACKUP_FILES=/tmp/pg_backup_files\n\n    openstack object list $CONTAINER_NAME > $BACKUP_FILES\n    if [[ $? != 0 ]]\n    then\n      log_backup_error_exit \"Could not obtain a list of current backup files in the RGW\" 1\n    fi\n\n    # Filter out other types of files like mariadb, etcd backupes etc..\n    cat $BACKUP_FILES | grep postgres | grep $POSTGRESQL_POD_NAMESPACE | awk '{print $2}' > $PG_BACKUP_FILES\n\n    for ARCHIVE_FILE in $(cat $PG_BACKUP_FILES)\n    do\n      ARCHIVE_DATE=$( echo $ARCHIVE_FILE | awk -F/ '{print $NF}' | cut -d'.' -f 4)\n      if [ \"$(seconds_difference ${ARCHIVE_DATE})\" -gt \"$((${POSTGRESQL_BACKUP_DAYS_TO_KEEP}*86400))\" ]\n      then\n        log INFO postgresql_backup \"Deleting file ${ARCHIVE_FILE} from the RGW\"\n        openstack object delete $CONTAINER_NAME $ARCHIVE_FILE || log_backup_error_exit \"Cannot delete container object ${ARCHIVE_FILE}!\" 1\n      fi\n    done\n  fi\nelse\n  log INFO postgresql_backup \"Remote backup is not enabled\"\n  exit 0\nfi\n"
  },
  {
    "path": "postgresql/templates/bin/_restore_postgresql.sh.tpl",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n# Capture the user's command line arguments\nARGS=(\"$@\")\n\n# This is needed to get the postgresql admin password\n# Note: xtracing should be off so it doesn't print the pw\nexport PGPASSWORD=$(cat /etc/postgresql/admin_user.conf \\\n                    | grep postgres | awk -F: '{print $5}')\n\nsource /tmp/restore_main.sh\n\n# Export the variables needed by the framework\nexport DB_NAME=\"postgres\"\nexport DB_NAMESPACE=${POSTGRESQL_POD_NAMESPACE}\nexport ARCHIVE_DIR=${POSTGRESQL_BACKUP_BASE_DIR}/db/${DB_NAMESPACE}/${DB_NAME}/archive\n\n# Define variables needed in this file\nPOSTGRESQL_HOST=$(cat /etc/postgresql/admin_user.conf | cut -d: -f 1)\nexport PSQL=\"psql -U $POSTGRESQL_ADMIN_USER -h $POSTGRESQL_HOST\"\nexport LOG_FILE=/tmp/dbrestore.log\n\n# Extract all databases from an archive and put them in the requested\n# file.\nget_databases() {\n  TMP_DIR=$1\n  DB_FILE=$2\n\n  SQL_FILE=$TMP_DIR/postgres.$POSTGRESQL_POD_NAMESPACE.*.sql\n  if [ -f $SQL_FILE ]; then\n    grep 'CREATE DATABASE' $SQL_FILE | awk '{ print $3 }' > $DB_FILE\n  else\n    # Error, cannot report the databases\n    echo \"No SQL file found - cannot extract the databases\"\n    return 1\n  fi\n}\n\n# Extract all tables of a database from an archive and put them in the requested\n# file.\nget_tables() {\n  DATABASE=$1\n  TMP_DIR=$2\n  TABLE_FILE=$3\n\n  SQL_FILE=$TMP_DIR/postgres.$POSTGRESQL_POD_NAMESPACE.*.sql\n  if [ -f $SQL_FILE ]; then\n    cat $SQL_FILE | sed -n /'\\\\connect '$DATABASE/,/'\\\\connect'/p | grep \"CREATE TABLE\" | awk -F'[. ]' '{print $4}' > $TABLE_FILE\n  else\n    # Error, cannot report the tables\n    echo \"No SQL file found - cannot extract the tables\"\n    return 1\n  fi\n}\n\n# Extract all rows in the given table of a database from an archive and put them in the requested\n# file.\nget_rows() {\n  DATABASE=$1\n  TABLE=$2\n  TMP_DIR=$3\n  ROW_FILE=$4\n\n  SQL_FILE=$TMP_DIR/postgres.$POSTGRESQL_POD_NAMESPACE.*.sql\n  if [ -f $SQL_FILE ]; then\n    cat $SQL_FILE | sed -n /'\\\\connect '${DATABASE}/,/'\\\\connect'/p > /tmp/db.sql\n    cat /tmp/db.sql | grep \"INSERT INTO public.${TABLE} VALUES\" > $ROW_FILE\n    rm /tmp/db.sql\n  else\n    # Error, cannot report the rows\n    echo \"No SQL file found - cannot extract the rows\"\n    return 1\n  fi\n}\n\n# Extract the schema for the given table in the given database belonging to the archive file\n# found in the TMP_DIR.\nget_schema() {\n  DATABASE=$1\n  TABLE=$2\n  TMP_DIR=$3\n  SCHEMA_FILE=$4\n\n  SQL_FILE=$TMP_DIR/postgres.$POSTGRESQL_POD_NAMESPACE.*.sql\n  if [ -f $SQL_FILE ]; then\n    DB_FILE=$(mktemp -p /tmp)\n    cat $SQL_FILE | sed -n /'\\\\connect '${DATABASE}/,/'\\\\connect'/p > ${DB_FILE}\n    cat ${DB_FILE} | sed -n /'CREATE TABLE public.'${TABLE}' ('/,/'--'/p > ${SCHEMA_FILE}\n    cat ${DB_FILE} | sed -n /'CREATE SEQUENCE public.'${TABLE}/,/'--'/p >> ${SCHEMA_FILE}\n    cat ${DB_FILE} | sed -n /'ALTER TABLE public.'${TABLE}/,/'--'/p >> ${SCHEMA_FILE}\n    cat ${DB_FILE} | sed -n /'ALTER TABLE ONLY public.'${TABLE}/,/'--'/p >> ${SCHEMA_FILE}\n    cat ${DB_FILE} | sed -n /'ALTER SEQUENCE public.'${TABLE}/,/'--'/p >> ${SCHEMA_FILE}\n    cat ${DB_FILE} | sed -n /'SELECT pg_catalog.*public.'${TABLE}/,/'--'/p >> ${SCHEMA_FILE}\n    cat ${DB_FILE} | sed -n /'CREATE INDEX.*public.'${TABLE}' USING'/,/'--'/p >> ${SCHEMA_FILE}\n    cat ${DB_FILE} | sed -n /'GRANT.*public.'${TABLE}' TO'/,/'--'/p >> ${SCHEMA_FILE}\n    rm -f ${DB_FILE}\n  else\n    # Error, cannot report the rows\n    echo \"No SQL file found - cannot extract the schema\"\n    return 1\n  fi\n}\n\n# Extract Single Database SQL Dump from pg_dumpall dump file\nextract_single_db_dump() {\n  ARCHIVE=$1\n  DATABASE=$2\n  DIR=$3\n  sed -n '/\\\\connect'\" ${DATABASE}/,/PostgreSQL database dump complete/p\" ${ARCHIVE} > ${DIR}/${DATABASE}.sql\n}\n\n# Re-enable connections to a database\nreenable_connections() {\n  SINGLE_DB_NAME=$1\n\n  # First make sure this is not the main postgres database or either of the\n  # two template databases that should not be touched.\n  if [[ ${SINGLE_DB_NAME} == \"postgres\" ||\n        ${SINGLE_DB_NAME} == \"template0\" ||\n        ${SINGLE_DB_NAME} == \"template1\" ]]; then\n    echo \"Cannot re-enable connections on an postgres internal db ${SINGLE_DB_NAME}\"\n    return 1\n  fi\n\n  # Re-enable connections to the DB\n  $PSQL -tc \"UPDATE pg_database SET datallowconn = 'true' WHERE datname = '${SINGLE_DB_NAME}';\" > /dev/null 2>&1\n  if [[ \"$?\" -ne 0 ]]; then\n    echo \"Could not re-enable connections for database ${SINGLE_DB_NAME}\"\n    return 1\n  fi\n  return 0\n}\n\n# Drop connections from a database\ndrop_connections() {\n  SINGLE_DB_NAME=$1\n\n  # First make sure this is not the main postgres database or either of the\n  # two template databases that should not be touched.\n  if [[ ${SINGLE_DB_NAME} == \"postgres\" ||\n        ${SINGLE_DB_NAME} == \"template0\" ||\n        ${SINGLE_DB_NAME} == \"template1\" ]]; then\n    echo \"Cannot drop connections on an postgres internal db ${SINGLE_DB_NAME}\"\n    return 1\n  fi\n\n  # First, prevent any new connections from happening on this database.\n  $PSQL -tc \"UPDATE pg_database SET datallowconn = 'false' WHERE datname = '${SINGLE_DB_NAME}';\" > /dev/null 2>&1\n  if [[ \"$?\" -ne 0 ]]; then\n    echo \"Could not prevent new connections before restoring database ${SINGLE_DB_NAME}.\"\n    return 1\n  fi\n\n  # Next, force disconnection of all clients currently connected to this database.\n  $PSQL -tc \"SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = '${SINGLE_DB_NAME}';\" > /dev/null 2>&1\n  if [[ \"$?\" -ne 0 ]]; then\n    echo \"Could not drop existing connections before restoring database ${SINGLE_DB_NAME}.\"\n    reenable_connections ${SINGLE_DB_NAME}\n    return 1\n  fi\n  return 0\n}\n\n# Re-enable connections for all of the databases within Postgresql\nreenable_connections_on_all_dbs() {\n  # Get a list of the databases\n  DB_LIST=$($PSQL -tc \"\\l\" | grep \"| postgres |\" | awk '{print $1}')\n\n  RET=0\n\n  # Re-enable the connections for each of the databases.\n  for DB in $DB_LIST; do\n    if [[ ${DB} != \"postgres\" && ${DB} != \"template0\" && ${DB} != \"template1\" ]]; then\n      reenable_connections $DB\n      if [[ \"$?\" -ne 0 ]]; then\n        RET=1\n      fi\n    fi\n  done\n\n  return $RET\n}\n\n# Drop connections in all of the databases within Postgresql\ndrop_connections_on_all_dbs() {\n  # Get a list of the databases\n  DB_LIST=$($PSQL -tc \"\\l\" | grep \"| postgres |\" | awk '{print $1}')\n\n  RET=0\n\n  # Drop the connections for each of the databases.\n  for DB in $DB_LIST; do\n    # Make sure this is not the main postgres database or either of the\n    # two template databases that should not be touched.\n    if [[ ${DB} != \"postgres\" && ${DB} != \"template0\" && ${DB} != \"template1\" ]]; then\n      drop_connections $DB\n      if [[ \"$?\" -ne 0 ]]; then\n        RET=1\n      fi\n    fi\n  done\n\n  # If there was a failure to drop any connections, go ahead and re-enable\n  # them all to prevent a lock-out condition\n  if [[ $RET -ne 0 ]]; then\n    reenable_connections_on_all_dbs\n  fi\n\n  return $RET\n}\n\n# Restore a single database dump from pg_dumpall sql dumpfile.\nrestore_single_db() {\n  SINGLE_DB_NAME=$1\n  TMP_DIR=$2\n\n  # Reset the logfile incase there was some older log there\n  rm -rf ${LOG_FILE}\n  touch ${LOG_FILE}\n\n  # First make sure this is not the main postgres database or either of the\n  # two template databases that should not be touched.\n  if [[ ${SINGLE_DB_NAME} == \"postgres\" ||\n        ${SINGLE_DB_NAME} == \"template0\" ||\n        ${SINGLE_DB_NAME} == \"template1\" ]]; then\n    echo \"Cannot restore an postgres internal db ${SINGLE_DB_NAME}\"\n    return 1\n  fi\n\n  SQL_FILE=$TMP_DIR/postgres.$POSTGRESQL_POD_NAMESPACE.*.sql\n  if [ -f $SQL_FILE ]; then\n    extract_single_db_dump $SQL_FILE $SINGLE_DB_NAME $TMP_DIR\n    if [[ -f $TMP_DIR/$SINGLE_DB_NAME.sql && -s $TMP_DIR/$SINGLE_DB_NAME.sql ]]; then\n      # Drop connections first\n      drop_connections ${SINGLE_DB_NAME}\n      if [[ \"$?\" -ne 0 ]]; then\n        return 1\n      fi\n\n      # Next, drop the database\n      $PSQL -tc \"DROP DATABASE $SINGLE_DB_NAME;\"\n      if [[ \"$?\" -ne 0 ]]; then\n        echo \"Could not drop the old ${SINGLE_DB_NAME} database before restoring it.\"\n        reenable_connections ${SINGLE_DB_NAME}\n        return 1\n      fi\n\n      # Postgresql does not have the concept of creating database if condition.\n      # This next command creates the database in case it does not exist.\n      $PSQL -tc \"SELECT 1 FROM pg_database WHERE datname = '$SINGLE_DB_NAME'\" | grep -q 1 || \\\n            $PSQL -c \"CREATE DATABASE $SINGLE_DB_NAME\"\n      if [[ \"$?\" -ne 0 ]]; then\n        echo \"Could not create the single database being restored: ${SINGLE_DB_NAME}.\"\n        reenable_connections ${SINGLE_DB_NAME}\n        return 1\n      fi\n      $PSQL -d $SINGLE_DB_NAME -f ${TMP_DIR}/${SINGLE_DB_NAME}.sql 2>>$LOG_FILE >> $LOG_FILE\n      if [[ \"$?\" -eq 0 ]]; then\n        if grep \"ERROR:\" ${LOG_FILE} > /dev/null 2>&1; then\n          cat $LOG_FILE\n          echo \"Errors occurred during the restore of database ${SINGLE_DB_NAME}\"\n          reenable_connections ${SINGLE_DB_NAME}\n          return 1\n        else\n          echo \"Database restore Successful.\"\n        fi\n      else\n        # Dump out the log file for debugging\n        cat $LOG_FILE\n        echo -e \"\\nDatabase restore Failed.\"\n        reenable_connections ${SINGLE_DB_NAME}\n        return 1\n      fi\n\n      # Re-enable connections to the DB\n      reenable_connections ${SINGLE_DB_NAME}\n      if [[ \"$?\" -ne 0 ]]; then\n        return 1\n      fi\n    else\n      echo \"Database dump For $SINGLE_DB_NAME is empty or not available.\"\n      return 1\n    fi\n  else\n    echo \"No database file available to restore from.\"\n    return 1\n  fi\n  return 0\n}\n\n# Restore all the databases from the pg_dumpall sql file.\nrestore_all_dbs() {\n  TMP_DIR=$1\n\n  # Reset the logfile incase there was some older log there\n  rm -rf ${LOG_FILE}\n  touch ${LOG_FILE}\n\n  SQL_FILE=$TMP_DIR/postgres.$POSTGRESQL_POD_NAMESPACE.*.sql\n  if [ -f $SQL_FILE ]; then\n\n    # Check the scope of the archive.\n    SCOPE=$(echo ${SQL_FILE} | awk -F'.' '{print $(NF-1)}')\n    if [[ \"${SCOPE}\" != \"all\" ]]; then\n      # This is just a single database backup. The user should\n      # instead use the single database restore option.\n      echo \"Cannot use the restore all option for an archive containing only a single database.\"\n      echo \"Please use the single database restore option.\"\n      return 1\n    fi\n\n    # First drop all connections on all databases\n    drop_connections_on_all_dbs\n    if [[ \"$?\" -ne 0 ]]; then\n      return 1\n    fi\n\n    $PSQL postgres -f $SQL_FILE 2>>$LOG_FILE >> $LOG_FILE\n    if [[ \"$?\" -eq 0 ]]; then\n      if grep \"ERROR:\" ${LOG_FILE} > /dev/null 2>&1; then\n        cat ${LOG_FILE}\n        echo \"Errors occurred during the restore of the databases.\"\n        reenable_connections_on_all_dbs\n        return 1\n      else\n        echo \"Database Restore Successful.\"\n      fi\n    else\n      # Dump out the log file for debugging\n      cat ${LOG_FILE}\n      echo -e \"\\nDatabase Restore failed.\"\n      reenable_connections_on_all_dbs\n      return 1\n    fi\n\n    # Re-enable connections on all databases\n    reenable_connections_on_all_dbs\n    if [[ \"$?\" -ne 0 ]]; then\n      return 1\n    fi\n  else\n    echo \"There is no database file available to restore from.\"\n    return 1\n  fi\n  return 0\n}\n\n# Call the CLI interpreter, providing the archive directory path and the\n# user arguments passed in\ncli_main ${ARGS[@]}\n"
  },
  {
    "path": "postgresql/templates/bin/_start.sh.tpl",
    "content": "#!/usr/bin/env bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n# Disable echo mode while setting the password\n# unless we are in debug mode\n{{- if .Values.conf.debug }}\nset -x\n{{- end }}\nset -e\n\nPOSTGRES_DB=${POSTGRES_DB:-\"postgres\"}\n\n# Check if the Postgres data directory exists before attempting to\n# set the password\nif [[ -d \"$PGDATA\" && -s \"$PGDATA/PG_VERSION\" ]]\nthen\n  postgres --single -D \"$PGDATA\" \"$POSTGRES_DB\" <<EOF\nALTER ROLE $POSTGRES_USER WITH PASSWORD '$POSTGRES_PASSWORD'\nEOF\n\nfi\n\nset -x\n\nbash /tmp/archive_cleanup.sh &\n\nexec /usr/local/bin/docker-entrypoint.sh postgres -c config_file=/tmp/postgresql.conf\n"
  },
  {
    "path": "postgresql/templates/certificates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n  http://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- if .Values.manifests.certificates -}}\n{{ dict \"envAll\" . \"service\" \"postgresql\" \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end -}}\n"
  },
  {
    "path": "postgresql/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n{{- $configMapBinName := printf \"%s-%s\" $envAll.Release.Name \"etcd-bin\"  }}\n---\napiVersion: v1\n{{/* Note: this is a secret because credentials must be rendered into the password script. */}}\nkind: Secret\nmetadata:\n  name: postgresql-bin\ntype: Opaque\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: {{- include \"helm-toolkit.scripts.image_repo_sync\" . | b64enc }}\n{{- end }}\n  start.sh: {{ tuple \"bin/_start.sh.tpl\" . | include \"helm-toolkit.utils.template\" | b64enc }}\n  readiness.sh: {{ tuple \"bin/_readiness.sh.tpl\" . | include \"helm-toolkit.utils.template\" | b64enc }}\n  archive_cleanup.sh: {{ tuple \"bin/_postgresql_archive_cleanup.sh.tpl\" . | include \"helm-toolkit.utils.template\" | b64enc }}\n  db_test.sh: {{ tuple \"bin/_db_test.sh.tpl\" . | include \"helm-toolkit.utils.template\" | b64enc }}\n{{- if .Values.conf.backup.enabled }}\n  backup_postgresql.sh: {{ tuple \"bin/_backup_postgresql.sh.tpl\" . | include \"helm-toolkit.utils.template\" | b64enc }}\n  restore_postgresql.sh: {{ tuple \"bin/_restore_postgresql.sh.tpl\" . | include \"helm-toolkit.utils.template\" | b64enc }}\n  backup_main.sh: {{ include \"helm-toolkit.scripts.db-backup-restore.backup_main\" . | b64enc }}\n  restore_main.sh: {{ include \"helm-toolkit.scripts.db-backup-restore.restore_main\" . | b64enc }}\n{{- end }}\n{{- if .Values.manifests.job_ks_user }}\n  ks-user.sh: {{ include \"helm-toolkit.scripts.keystone_user\" . | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "postgresql/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: postgresql-etc\ndata:\n  postgresql.conf: |\n{{- range $key, $value := default dict .Values.conf.postgresql }}\n    {{ $key | snakecase }} = '{{ $value }}'\n{{- end }}\n  pg_hba.conf: |\n{{ .Values.conf.pg_hba | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "postgresql/templates/cron-job-backup-postgres.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.cron_job_postgresql_backup }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"postgresql-backup\" }}\n{{- $failoverUserClass := .Values.conf.backup.remote_backup.failover_user_class }}\n{{ tuple $envAll \"postgresql_backup\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: postgresql-backup\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"postgresql-backup\" \"backup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  schedule: {{ .Values.jobs.postgresql_backup.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.postgresql_backup.history.success }}\n  failedJobsHistoryLimit: {{ .Values.jobs.postgresql_backup.history.failed }}\n  concurrencyPolicy: Forbid\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"postgresql-backup\" \"backup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" \"postgresql-backup\" \"containerNames\" (list \"init\" \"backup-perms\" \"postgresql-backup\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{- if .Values.jobs.postgresql_backup.backoffLimit }}\n      backoffLimit: {{ .Values.jobs.postgresql_backup.backoffLimit }}\n{{- end }}\n{{- if .Values.jobs.postgresql_backup.activeDeadlineSeconds }}\n      activeDeadlineSeconds: {{ .Values.jobs.postgresql_backup.activeDeadlineSeconds }}\n{{- end }}\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"postgresql-backup\" \"backup\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n        spec:\n{{ dict \"envAll\" $envAll \"application\" \"postgresql_backup\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 10 }}\n          serviceAccountName: {{ $serviceAccountName }}\n          restartPolicy: OnFailure\n{{- if $envAll.Values.pod.affinity }}\n{{- if $envAll.Values.pod.affinity.postgresql_backup }}\n          affinity:\n{{  index $envAll.Values.pod.affinity \"postgresql_backup\"  | toYaml | indent 12}}\n{{- end }}\n{{- end }}\n          nodeSelector:\n            {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n          initContainers:\n{{ tuple $envAll \"postgresql_backup\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 12 }}\n            - name: backup-perms\n{{ tuple $envAll \"postgresql_backup\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.postgresql_backup | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{ dict \"envAll\" $envAll \"application\" \"postgresql_backup\" \"container\" \"backup_perms\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 14 }}\n              command:\n                - chown\n                - -R\n                - \"65534:65534\"\n                - $(POSTGRESQL_BACKUP_BASE_DIR)\n              env:\n                - name: POSTGRESQL_BACKUP_BASE_DIR\n                  value: {{ .Values.conf.backup.base_path }}\n              volumeMounts:\n                - mountPath: /tmp\n                  name: pod-tmp\n                - mountPath: {{ .Values.conf.backup.base_path }}\n                  name: postgresql-backup-dir\n          containers:\n            - name: postgresql-backup\n{{ tuple $envAll \"postgresql_backup\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.postgresql_backup | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{ dict \"envAll\" $envAll \"application\" \"postgresql_backup\" \"container\" \"postgresql_backup\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 14 }}\n              command:\n                - /tmp/backup_postgresql.sh\n              env:\n                - name: POSTGRESQL_ADMIN_PASSWORD\n                  valueFrom:\n                    secretKeyRef:\n                      key: POSTGRES_PASSWORD\n                      name: postgresql-admin\n                - name: POSTGRESQL_ADMIN_USER\n                  valueFrom:\n                    secretKeyRef:\n                      key: POSTGRES_USER\n                      name: postgresql-admin\n                - name: POSTGRESQL_BACKUP_BASE_DIR\n                  value: {{ .Values.conf.backup.base_path }}\n                - name: POSTGRESQL_BACKUP_PG_DUMPALL_OPTIONS\n                  value: {{ .Values.conf.backup.pg_dumpall_options }}\n                - name: POSTGRESQL_LOCAL_BACKUP_DAYS_TO_KEEP\n                  value: \"{{ .Values.conf.backup.days_to_keep }}\"\n                - name: POSTGRESQL_POD_NAMESPACE\n                  valueFrom:\n                    fieldRef:\n                      fieldPath: metadata.namespace\n                - name: REMOTE_BACKUP_ENABLED\n                  value: \"{{ .Values.conf.backup.remote_backup.enabled }}\"\n{{- if .Values.conf.backup.remote_backup.enabled }}\n                - name: POSTGRESQL_REMOTE_BACKUP_DAYS_TO_KEEP\n                  value: \"{{ .Values.conf.backup.remote_backup.days_to_keep }}\"\n                - name: CONTAINER_NAME\n                  value: \"{{ .Values.conf.backup.remote_backup.container_name }}\"\n                - name: STORAGE_POLICY\n                  value: \"{{ .Values.conf.backup.remote_backup.storage_policy }}\"\n                - name: NUMBER_OF_RETRIES_SEND_BACKUP_TO_REMOTE\n                  value: {{ .Values.conf.backup.remote_backup.number_of_retries | quote }}\n                - name: MIN_DELAY_SEND_BACKUP_TO_REMOTE\n                  value: {{ .Values.conf.backup.remote_backup.delay_range.min | quote }}\n                - name: MAX_DELAY_SEND_BACKUP_TO_REMOTE\n                  value: {{ .Values.conf.backup.remote_backup.delay_range.max | quote }}\n                - name: THROTTLE_BACKUPS_ENABLED\n                  value: \"{{ .Values.conf.backup.remote_backup.throttle_backups.enabled }}\"\n                - name: THROTTLE_LIMIT\n                  value: {{ .Values.conf.backup.remote_backup.throttle_backups.sessions_limit | quote }}\n                - name: THROTTLE_LOCK_EXPIRE_AFTER\n                  value: {{ .Values.conf.backup.remote_backup.throttle_backups.lock_expire_after | quote }}\n                - name: THROTTLE_RETRY_AFTER\n                  value: {{ .Values.conf.backup.remote_backup.throttle_backups.retry_after | quote }}\n                - name: THROTTLE_CONTAINER_NAME\n                  value: {{ .Values.conf.backup.remote_backup.throttle_backups.container_name | quote }}\n{{- with $env := dict \"ksUserSecret\" $envAll.Values.secrets.identity.postgresql }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 16 }}\n{{- $failoverIdentityClass := index $envAll.Values.endpoints.identity.auth $failoverUserClass }}\n{{- if $failoverIdentityClass }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_failover_env_vars\" $env | indent 16 }}\n{{- end }}\n{{- end }}\n{{- end }}\n              volumeMounts:\n                - name: pod-tmp\n                  mountPath: /tmp\n                - mountPath: /tmp/backup_postgresql.sh\n                  name: postgresql-bin\n                  readOnly: true\n                  subPath: backup_postgresql.sh\n                - mountPath: /tmp/backup_main.sh\n                  name: postgresql-bin\n                  readOnly: true\n                  subPath: backup_main.sh\n                - mountPath: {{ .Values.conf.backup.base_path }}\n                  name: postgresql-backup-dir\n                - name: postgresql-secrets\n                  mountPath: /etc/postgresql/admin_user.conf\n                  subPath: admin_user.conf\n                  readOnly: true\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: postgresql-secrets\n              secret:\n                secretName: postgresql-secrets\n                defaultMode: 292\n            - name: postgresql-bin\n              secret:\n                secretName: postgresql-bin\n                defaultMode: 365\n            {{- if and .Values.volume.backup.enabled  .Values.manifests.pvc_backup }}\n            - name: postgresql-backup-dir\n              persistentVolumeClaim:\n                claimName: postgresql-backup-data\n            {{- else }}\n            - hostPath:\n                path: {{ .Values.conf.backup.base_path }}\n                type: DirectoryOrCreate\n              name: postgresql-backup-dir\n            {{- end }}\n{{- end }}\n"
  },
  {
    "path": "postgresql/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "postgresql/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"postgresql\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "postgresql/templates/job-ks-user.yaml",
    "content": "{{/*\nCopyright 2019 The Openstack-Helm Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $backoffLimit := .Values.jobs.ks_user.backoffLimit }}\n{{- $activeDeadlineSeconds := .Values.jobs.ks_user.activeDeadlineSeconds }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"postgresql\" \"secretBin\" \"postgresql-bin\" \"backoffLimit\" $backoffLimit \"activeDeadlineSeconds\" $activeDeadlineSeconds -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "postgresql/templates/monitoring/prometheus/bin/_create-postgresql-exporter-user.sh.tpl",
    "content": "#!/bin/sh\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -e\n\npsql \"postgresql://${ADMIN_USER}:${ADMIN_PASSWORD}@${POSTGRESQL_HOST_PORT}?sslmode=disable\" << EOF\nCREATE USER ${EXPORTER_USER} WITH PASSWORD '${EXPORTER_PASSWORD}';\nALTER USER ${EXPORTER_USER} SET SEARCH_PATH TO postgres_exporter,pg_catalog;\nGRANT SELECT ON pg_stat_database TO ${EXPORTER_USER};\nEOF\n"
  },
  {
    "path": "postgresql/templates/monitoring/prometheus/exporter-configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.monitoring.prometheus.configmap_bin .Values.monitoring.prometheus.enabled }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: postgresql-exporter-bin\ndata:\n  create-postgresql-exporter-user.sh: |\n{{ tuple \"bin/_create-postgresql-exporter-user.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "postgresql/templates/monitoring/prometheus/exporter-configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.monitoring.prometheus.configmap_etc .Values.monitoring.prometheus.enabled }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: postgresql-exporter-etc\ntype: Opaque\ndata:\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.exporter.queries \"key\" \"queries.yaml\" \"format\" \"Secret\") | indent 2 }}\n{{- end }}"
  },
  {
    "path": "postgresql/templates/monitoring/prometheus/exporter-deployment.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.monitoring.prometheus.deployment_exporter .Values.monitoring.prometheus.enabled }}\n{{- $envAll := . }}\n{{- $serviceAccountName := \"prometheus-postgresql-exporter\" }}\n{{ tuple $envAll \"prometheus_postgresql_exporter\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: prometheus-postgresql-exporter\nspec:\n  replicas: {{ .Values.pod.replicas.prometheus_postgresql_exporter }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"prometheus_postgresql_exporter\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"prometheus_postgresql_exporter\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      namespace: {{ .Values.endpoints.prometheus_postgresql_exporter.namespace }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"prometheus-postgresql-exporter\" \"containerNames\" (list \"postgresql-exporter\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"prometheus_postgresql_exporter\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      nodeSelector:\n        {{ .Values.labels.prometheus_postgresql_exporter.node_selector_key }}: {{ .Values.labels.prometheus_postgresql_exporter.node_selector_value }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.prometheus_postgresql_exporter.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"prometheus_postgresql_exporter\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: postgresql-exporter\n{{ tuple $envAll \"prometheus_postgresql_exporter\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.prometheus_postgresql_exporter | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"prometheus_postgresql_exporter\" \"container\" \"postgresql_exporter\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - \"/postgres_exporter\"\n            - \"--extend.query-path=/queries.yaml\"\n          ports:\n            - name: metrics\n              containerPort: {{ tuple \"prometheus_postgresql_exporter\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          env:\n            - name: DATA_SOURCE_NAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.postgresql.exporter }}\n                  key: DATA_SOURCE_NAME\n          volumeMounts:\n            - name: postgresql-exporter-etc\n              mountPath: /queries.yaml\n              subPath: queries.yaml\n      volumes:\n      - name: postgresql-exporter-etc\n        secret:\n          secretName: postgresql-exporter-etc\n          defaultMode: 0444\n{{- end }}\n"
  },
  {
    "path": "postgresql/templates/monitoring/prometheus/exporter-job-create-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.monitoring.prometheus.job_user_create .Values.monitoring.prometheus.enabled }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"prometheus-postgresql-exporter-create-user\" }}\n{{ tuple $envAll \"prometheus_postgresql_exporter_create_user\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: prometheus-postgresql-exporter-create-user\n  labels:\n{{ tuple $envAll \"prometheus_postgresql_exporter\" \"create_user\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"prometheus_postgresql_exporter\" \"create_user\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"prometheus-postgresql-exporter-create-user\" \"containerNames\" (list \"prometheus-postgresql-exporter-create-user\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"create_user\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.prometheus_postgresql_exporter.node_selector_key }}: {{ .Values.labels.prometheus_postgresql_exporter.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"prometheus_postgresql_exporter_create_user\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: prometheus-postgresql-exporter-create-user\n{{ tuple $envAll \"prometheus_postgresql_exporter_create_user\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.prometheus_postgresql_exporter_create_user | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"create_user\" \"container\" \"prometheus_postgresql_exporter_create_user\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/create-postgresql-exporter-user.sh\n          env:\n            - name: EXPORTER_USER\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.postgresql.exporter }}\n                  key: EXPORTER_USER\n            - name: EXPORTER_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.postgresql.exporter }}\n                  key: EXPORTER_PASSWORD\n            - name: ADMIN_USER\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.postgresql.admin }}\n                  key: POSTGRES_USER\n            - name: ADMIN_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.postgresql.admin }}\n                  key: POSTGRES_PASSWORD\n            - name: POSTGRESQL_HOST_PORT\n              value: {{ tuple \"postgresql\" \"internal\" \"postgresql\" $envAll | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: postgresql-exporter-bin\n              mountPath: /tmp/create-postgresql-exporter-user.sh\n              subPath: create-postgresql-exporter-user.sh\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: postgresql-exporter-bin\n          configMap:\n            name: postgresql-exporter-bin\n            defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "postgresql/templates/monitoring/prometheus/exporter-secrets-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.monitoring.prometheus.secret_etc .Values.monitoring.prometheus.enabled }}\n{{- $envAll := . }}\n{{- $exporter_user := .Values.endpoints.postgresql.auth.exporter.username }}\n{{- $exporter_password := .Values.endpoints.postgresql.auth.exporter.password }}\n{{- $db_host := tuple \"postgresql\" \"internal\" \"postgresql\" $envAll | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n{{- $data_source_name := printf \"postgresql://%s:%s@%s/postgres?sslmode=disable\" $exporter_user $exporter_password $db_host }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ .Values.secrets.postgresql.exporter }}\ntype: Opaque\ndata:\n  DATA_SOURCE_NAME: {{ $data_source_name | b64enc }}\n  EXPORTER_USER: {{ $exporter_user | b64enc }}\n  EXPORTER_PASSWORD: {{ $exporter_password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "postgresql/templates/monitoring/prometheus/exporter-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.monitoring.prometheus.service_exporter .Values.monitoring.prometheus.enabled }}\n{{- $envAll := . }}\n{{- $prometheus_annotations := $envAll.Values.monitoring.prometheus.postgresql_exporter }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"prometheus_postgresql_exporter\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  labels:\n{{ tuple $envAll \"prometheus_postgresql_exporter\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n{{- if .Values.monitoring.prometheus.enabled }}\n{{ tuple $prometheus_annotations | include \"helm-toolkit.snippets.prometheus_service_annotations\" | indent 4 }}\n{{- end }}\nspec:\n  ports:\n  - name: metrics\n    port: {{ tuple \"prometheus_postgresql_exporter\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"prometheus_postgresql_exporter\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "postgresql/templates/network_policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"postgresql\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "postgresql/templates/pod-test.yaml",
    "content": "{{/*\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n*/}}\n\n{{- if .Values.manifests.test_basic }}\n{{- $dependencies := .Values.dependencies.static.tests }}\n{{- $serviceAccountName := print .Release.Name \"-test\" }}\n{{ tuple . $dependencies $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: \"{{ .Release.Name }}-db-test\"\n  annotations:\n    \"helm.sh/hook\": \"test-success\"\nspec:\n  restartPolicy: Never\n  serviceAccountName: {{ $serviceAccountName }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n  initContainers:\n{{ tuple . $dependencies list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n  containers:\n    - name: \"{{ .Release.Name }}-db-test\"\n      env:\n        - name: DB_FQDN\n          valueFrom:\n            secretKeyRef:\n              name: {{ .Values.secrets.postgresql.admin }}\n              key: DATABASE_HOST\n        - name: DB_PORT\n          valueFrom:\n            secretKeyRef:\n              name: {{ .Values.secrets.postgresql.admin }}\n              key: DATABASE_PORT\n        - name: DB_ADMIN_USER\n          valueFrom:\n            secretKeyRef:\n              name: {{ .Values.secrets.postgresql.admin }}\n              key: POSTGRES_USER\n        - name: ADMIN_PASSWORD\n          valueFrom:\n            secretKeyRef:\n              name: {{ .Values.secrets.postgresql.admin }}\n              key: POSTGRES_PASSWORD\n      image: {{ .Values.images.tags.postgresql }}\n      imagePullPolicy: {{ .Values.images.pull_policy }}\n{{ tuple . .Values.pod.resources.test | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n      command:\n        - /tmp/db_test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: postgresql-bin\n          mountPath: /tmp/db_test.sh\n          subPath: db_test.sh\n          readOnly: true\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: postgresql-bin\n      secret:\n        secretName: postgresql-bin\n        defaultMode: 0555\n...\n{{- end }}\n"
  },
  {
    "path": "postgresql/templates/postgresql-backup-pvc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.volume.backup.enabled .Values.manifests.pvc_backup }}\n---\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: postgresql-backup-data\nspec:\n  accessModes: [ \"ReadWriteOnce\" ]\n  resources:\n    requests:\n      storage: {{ .Values.volume.backup.size }}\n  storageClassName: {{ .Values.volume.backup.class_name }}\n{{- end }}\n\n"
  },
  {
    "path": "postgresql/templates/secret-admin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_admin }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ .Values.secrets.postgresql.admin }}\ntype: Opaque\ndata:\n  POSTGRES_USER: {{ .Values.endpoints.postgresql.auth.admin.username | b64enc }}\n  POSTGRES_PASSWORD: {{ .Values.endpoints.postgresql.auth.admin.password | b64enc }}\n  DATABASE_PORT: {{ tuple \"postgresql\" \"internal\" \"postgresql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | b64enc }}\n  DATABASE_HOST: |-\n{{ tuple \"postgresql\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" | b64enc | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "postgresql/templates/secret-audit.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- if .Values.manifests.secret_audit }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ .Values.secrets.postgresql.audit }}\ntype: Opaque\ndata:\n  AUDIT_PASSWORD: {{ .Values.endpoints.postgresql.auth.audit.password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "postgresql/templates/secret-backup-restore.yaml",
    "content": "{{/*\nThis manifest results a secret being created which has the key information\nneeded for backing up and restoring the Postgresql databases.\n*/}}\n\n{{- if and .Values.conf.backup.enabled .Values.manifests.secret_backup_restore }}\n\n{{- $envAll := . }}\n{{- $userClass := \"backup_restore\" }}\n{{- $secretName := index $envAll.Values.secrets.postgresql $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n  BACKUP_ENABLED: {{ $envAll.Values.conf.backup.enabled | quote | b64enc }}\n  BACKUP_BASE_PATH: {{ $envAll.Values.conf.backup.base_path | b64enc }}\n  LOCAL_DAYS_TO_KEEP: {{ $envAll.Values.conf.backup.days_to_keep | quote | b64enc }}\n  PG_DUMPALL_OPTIONS: {{ $envAll.Values.conf.backup.pg_dumpall_options | quote | b64enc }}\n  REMOTE_BACKUP_ENABLED: {{ $envAll.Values.conf.backup.remote_backup.enabled | quote | b64enc }}\n  REMOTE_BACKUP_CONTAINER: {{ $envAll.Values.conf.backup.remote_backup.container_name | b64enc }}\n  REMOTE_BACKUP_DAYS_TO_KEEP: {{ $envAll.Values.conf.backup.remote_backup.days_to_keep | quote | b64enc }}\n  REMOTE_BACKUP_STORAGE_POLICY: {{ $envAll.Values.conf.backup.remote_backup.storage_policy | b64enc }}\n  REMOTE_BACKUP_RETRIES: {{ $envAll.Values.conf.backup.remote_backup.number_of_retries | quote | b64enc }}\n  REMOTE_BACKUP_SEND_DELAY_MIN: {{ $envAll.Values.conf.backup.remote_backup.delay_range.min | quote | b64enc }}\n  REMOTE_BACKUP_SEND_DELAY_MAX: {{ $envAll.Values.conf.backup.remote_backup.delay_range.max | quote | b64enc }}\n  THROTTLE_BACKUPS_ENABLED: {{ $envAll.Values.conf.backup.remote_backup.throttle_backups.enabled | quote | b64enc }}\n  THROTTLE_LIMIT: {{ $envAll.Values.conf.backup.remote_backup.throttle_backups.sessions_limit | quote | b64enc }}\n  THROTTLE_LOCK_EXPIRE_AFTER: {{ $envAll.Values.conf.backup.remote_backup.throttle_backups.lock_expire_after | quote | b64enc }}\n  THROTTLE_RETRY_AFTER: {{ $envAll.Values.conf.backup.remote_backup.throttle_backups.retry_after | quote | b64enc }}\n  THROTTLE_CONTAINER_NAME: {{ $envAll.Values.conf.backup.remote_backup.throttle_backups.container_name | quote | b64enc }}\n...\n{{- end }}\n"
  },
  {
    "path": "postgresql/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "postgresql/templates/secret-rgw.yaml",
    "content": "{{/*\nThis manifest results in two secrets being created:\n  1) Keystone \"postgresql\" secret, which is needed to access the cluster\n     (remote or same cluster) for storing postgresql backups. If the\n     cluster is remote, the auth_url would be non-null.\n  2) Keystone \"admin\" secret, which is needed to create the \"postgresql\"\n     keystone account mentioned above. This may not be needed if the\n     account is in a remote cluster (auth_url is non-null in that case).\n*/}}\n\n{{- if .Values.conf.backup.remote_backup.enabled }}\n\n{{- $envAll := . }}\n{{- $userClass := .Values.conf.backup.remote_backup.primary_user_class }}\n{{- $failoverUserClass := .Values.conf.backup.remote_backup.failover_user_class }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n{{- $identityClass := index .Values.endpoints.identity.auth $userClass }}\n{{- if $identityClass.auth_url }}\n  OS_AUTH_URL: {{ $identityClass.auth_url | b64enc }}\n{{- else }}\n  OS_AUTH_URL: {{ tuple \"identity\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n  OS_REGION_NAME: {{ $identityClass.region_name | b64enc }}\n  OS_INTERFACE: {{ $identityClass.interface | default \"internal\" | b64enc }}\n  OS_PROJECT_DOMAIN_NAME: {{ $identityClass.project_domain_name | b64enc }}\n  OS_PROJECT_NAME: {{ $identityClass.project_name | b64enc }}\n  OS_USER_DOMAIN_NAME: {{ $identityClass.user_domain_name | b64enc }}\n  OS_USERNAME: {{ $identityClass.username | b64enc }}\n  OS_PASSWORD: {{ $identityClass.password | b64enc }}\n  OS_DEFAULT_DOMAIN: {{ $identityClass.default_domain_id | default \"default\" | b64enc }}\n\n{{- $failoverIdentityClass := index .Values.endpoints.identity.auth $failoverUserClass }}\n{{- if $failoverIdentityClass }}\n{{- if $failoverIdentityClass.auth_url }}\n  OS_AUTH_URL_FAILOVER: {{ $failoverIdentityClass.auth_url | b64enc }}\n{{- else }}\n  OS_AUTH_URL_FAILOVER: {{ tuple \"identity\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n  OS_REGION_NAME_FAILOVER: {{ $failoverIdentityClass.region_name | b64enc }}\n  OS_INTERFACE_FAILOVER: {{ $failoverIdentityClass.interface | default \"internal\" | b64enc }}\n  OS_PROJECT_DOMAIN_NAME_FAILOVER: {{ $failoverIdentityClass.project_domain_name | b64enc }}\n  OS_PROJECT_NAME_FAILOVER: {{ $failoverIdentityClass.project_name | b64enc }}\n  OS_USER_DOMAIN_NAME_FAILOVER: {{ $failoverIdentityClass.user_domain_name | b64enc }}\n  OS_USERNAME_FAILOVER: {{ $failoverIdentityClass.username | b64enc }}\n  OS_PASSWORD_FAILOVER: {{ $failoverIdentityClass.password | b64enc }}\n  OS_DEFAULT_DOMAIN_FAILOVER: {{ $failoverIdentityClass.default_domain_id | default \"default\" | b64enc }}\n{{- end }}\n...\n{{- if .Values.manifests.job_ks_user }}\n{{- $userClass := \"admin\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n{{- $identityClass := index .Values.endpoints.identity.auth $userClass }}\n{{- if $identityClass.auth_url }}\n  OS_AUTH_URL: {{ $identityClass.auth_url | b64enc }}\n{{- else }}\n  OS_AUTH_URL: {{ tuple \"identity\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n  OS_REGION_NAME: {{ $identityClass.region_name | b64enc }}\n  OS_INTERFACE: {{ $identityClass.interface | default \"internal\" | b64enc }}\n  OS_PROJECT_DOMAIN_NAME: {{ $identityClass.project_domain_name | b64enc }}\n  OS_PROJECT_NAME: {{ $identityClass.project_name | b64enc }}\n  OS_USER_DOMAIN_NAME: {{ $identityClass.user_domain_name | b64enc }}\n  OS_USERNAME: {{ $identityClass.username | b64enc }}\n  OS_PASSWORD: {{ $identityClass.password | b64enc }}\n  OS_DEFAULT_DOMAIN: {{ $identityClass.default_domain_id | default \"default\" | b64enc }}\n...\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "postgresql/templates/secrets/_admin_user.conf.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{ tuple \"postgresql\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}:*:*:{{ .Values.endpoints.postgresql.auth.admin.username }}:{{ .Values.endpoints.postgresql.auth.admin.password }}\n"
  },
  {
    "path": "postgresql/templates/secrets-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_etc }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: postgresql-secrets\ntype: Opaque\ndata:\n  admin_user.conf: {{ tuple \"secrets/_admin_user.conf.tpl\" . | include \"helm-toolkit.utils.template\"  | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "postgresql/templates/service-postgres.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"postgresql\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: db\n      port: {{ tuple \"postgresql\" \"internal\" \"postgresql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"postgresql\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "postgresql/templates/service-restapi.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"postgresql-restapi\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: restapi\n      port: {{ tuple \"postgresql-restapi\" \"internal\" \"restapi\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"postgresql\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "postgresql/templates/statefulset.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"livenessProbeTemplate\" -}}\nexec:\n  command:\n    - /tmp/readiness.sh\n{{- end -}}\n\n{{- define \"readinessProbeTemplate\" -}}\nexec:\n  command:\n    - /tmp/readiness.sh\n{{- end -}}\n\n{{- if .Values.manifests.statefulset }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"postgresql\" }}\n{{ tuple $envAll \"postgresql\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $serviceAccountName }}\n  namespace: {{ $envAll.Release.Namespace }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - configmaps\n    verbs:\n      - create\n      - get\n      - list\n      - patch\n      - update\n      - watch\n  - apiGroups:\n      - \"\"\n    resources:\n      - endpoints\n    verbs:\n      - get\n      - patch\n      - update\n      # the following three privileges are necessary only when using endpoints\n      - create\n      - list\n      - watch\n  - apiGroups:\n      - \"\"\n    resources:\n      - pods\n    verbs:\n      - get\n      - list\n      - patch\n      - update\n      - watch\n  # The following privilege is only necessary for creation of headless service\n  # for postgresql-config endpoint, in order to prevent cleaning it up by the\n  # k8s master.\n  - apiGroups:\n      - \"\"\n    resources:\n      - services\n    verbs:\n      - create\n      - get\n      - list\n      - patch\n      - update\n      - watch\n      - delete\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\n  namespace: {{ $envAll.Release.Namespace }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\n---\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  name: postgresql\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"postgresql\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n    cluster-name: {{ tuple \"postgresql\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  serviceName: {{ tuple \"postgresql\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  podManagementPolicy: \"Parallel\"\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_statefulset\" | indent 2 }}\n  replicas: {{ .Values.pod.replicas.server }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"postgresql\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n      cluster-name: {{ tuple \"postgresql\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"postgresql\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n        cluster-name: {{ tuple \"postgresql\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"postgresql\" \"containerNames\" (list \"postgresql\" \"set-volume-perms\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n        configmap-admin-hash: {{ tuple \"secret-admin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-secrets-etc-hash: {{ tuple \"secrets-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"server\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"postgresql\" \"server\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.server.node_selector_key }}: {{ .Values.labels.server.node_selector_value }}\n\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.server.timeout | default \"180\" }}\n      initContainers:\n{{ tuple $envAll \"postgresql\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: set-volume-perms\n{{ tuple $envAll \"postgresql\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.server | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command: [\"/bin/sh\", \"-c\"]\n          args:\n            - set -xe;\n              /bin/chown {{ .Values.pod.security_context.server.pod.runAsUser }} {{ .Values.storage.mount.path }};\n              /bin/chmod 700 {{ .Values.storage.mount.path }};\n              /bin/chmod 700 {{ .Values.storage.mount.path }}/*;\n{{- if .Values.manifests.certificates }}\n              /bin/cp /server_certs_temp/* /server_certs/.;\n              /bin/chown {{ .Values.pod.security_context.server.pod.runAsUser }} /server_certs;\n              /bin/chown {{ .Values.pod.security_context.server.pod.runAsUser }} /server_certs/*;\n              /bin/chmod 700 /server_certs;\n              /bin/chmod 600 /server_certs/*;\n{{- end }}\n{{ dict \"envAll\" $envAll \"application\" \"server\" \"container\" \"set_volume_perms\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: postgresql-data\n              mountPath: {{ .Values.storage.mount.path }}\n              subPath: {{ .Values.storage.mount.subpath }}\n{{- if .Values.manifests.certificates }}\n            - name: server-certs\n              mountPath: /server_certs\n              # server-cert-temp mountpoint is temp storage for secrets. We copy the\n              # secrets to server-certs folder and set owner and permissions.\n              # This is needed because the secrets are always created readonly.\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.postgresql.tls.server.internal \"path\" \"/server_certs_temp\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- end }}\n      containers:\n        - name: postgresql\n{{ tuple $envAll \"postgresql\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.server | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"server\" \"container\" \"postgresql\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          ports:\n            - containerPort: {{ tuple \"postgresql-restapi\" \"internal\" \"restapi\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n              protocol: TCP\n            - containerPort: {{ tuple \"postgresql\" \"internal\" \"postgresql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n              protocol: TCP\n          env:\n            - name: PGDATA\n              value: \"{{ .Values.storage.mount.path }}/pgdata\"\n            - name: ARCHIVE_LIMIT\n              value: \"{{ .Values.storage.archive.archive_limit }}\"\n            - name: ARCHIVE_PATH\n              value: \"{{ .Values.storage.archive.mount_path }}\"\n            - name: KUBERNETES_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.name\n            - name: 'POSTGRES_PASSWORD'\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.postgresql.admin }}\n                  key: 'POSTGRES_PASSWORD'\n            - name: 'POSTGRES_USER'\n              valueFrom:\n                secretKeyRef:\n                  name: {{ .Values.secrets.postgresql.admin }}\n                  key: 'POSTGRES_USER'\n          command:\n            - /tmp/start.sh\n{{ dict \"envAll\" . \"component\" \"server\" \"container\" \"postgresql\" \"type\" \"liveness\" \"probeTemplate\" (include \"livenessProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | trim | indent 10 }}\n{{ dict \"envAll\" . \"component\" \"server\" \"container\" \"postgresql\" \"type\" \"readiness\" \"probeTemplate\" (include \"readinessProbeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | trim | indent 10 }}\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - bash\n                  - -c\n                  - kill -INT 1\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: pg-run\n              mountPath: /var/run/postgresql\n            - name: postgresql-bin\n              mountPath: /tmp/start.sh\n              subPath: start.sh\n              readOnly: true\n            - name: postgresql-bin\n              mountPath: /tmp/readiness.sh\n              subPath: readiness.sh\n              readOnly: true\n            - name: postgresql-etc\n              mountPath: /tmp/pg_hba.conf\n              subPath: pg_hba.conf\n              readOnly: true\n            - name: postgresql-etc\n              mountPath: /tmp/postgresql.conf\n              subPath: postgresql.conf\n              readOnly: true\n            - name: postgresql-data\n              mountPath: {{ .Values.storage.mount.path }}\n              subPath: {{ .Values.storage.mount.subpath }}\n{{- if  eq .Values.conf.postgresql.archive_mode \"on\" }}\n            - name: postgresql-archive\n              mountPath: {{ .Values.storage.archive.mount_path }}\n              subPath: {{ .Values.storage.mount.subpath }}\n            - name: postgresql-bin\n              mountPath: /tmp/archive_cleanup.sh\n              subPath: archive_cleanup.sh\n              readOnly: true\n{{- end }}\n{{- if .Values.manifests.certificates }}\n            - name: server-certs\n              mountPath: /server_certs\n{{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: postgres-home-config\n          emptyDir: {}\n        - name: pg-run\n          emptyDir:\n            medium: \"Memory\"\n        - name: postgresql-bin\n          secret:\n            secretName: postgresql-bin\n            defaultMode: 0555\n{{- if .Values.manifests.certificates }}\n        - name: server-certs\n          emptyDir: {}\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.postgresql.tls.server.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n        - name: postgresql-etc\n          configMap:\n            name: postgresql-etc\n            defaultMode: 0444\n{{- if not .Values.storage.pvc.enabled }}\n        - name: postgresql-data\n          hostPath:\n            path: {{ .Values.storage.host.host_path }}\n            type: DirectoryOrCreate\n{{- if  eq .Values.conf.postgresql.archive_mode \"on\" }}\n        - name: postgresql-archive\n          hostPath:\n            path: {{ .Values.storage.host.archive_host_path }}\n            type: DirectoryOrCreate\n{{- end }}\n{{- else }}\n  volumeClaimTemplates:\n    - metadata:\n        name: postgresql-data\n        annotations:\n          {{ .Values.storage.pvc.class_path }}: {{ .Values.storage.pvc.class_name }}\n      spec:\n        accessModes: [\"ReadWriteOnce\"]\n        resources:\n          requests:\n            storage: {{ .Values.storage.pvc.size }}\n{{- if  eq .Values.conf.postgresql.archive_mode \"on\" }}\n    - metadata:\n        name: postgresql-archive\n        annotations:\n          {{ .Values.storage.archive_pvc.class_path }}: {{ .Values.storage.archive_pvc.class_name }}\n      spec:\n        accessModes: [\"ReadWriteOnce\"]\n        resources:\n          requests:\n            storage: {{ .Values.storage.archive_pvc.size }}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "postgresql/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for postgresql.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nrelease_group: null\n\npod:\n  security_context:\n    prometheus_postgresql_exporter:\n      pod:\n        runAsUser: 65534\n      container:\n        postgresql_exporter:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    server:\n      pod:\n        runAsUser: 999\n        # fsGroup used to allows cert file be witten to file.\n        fsGroup: 999\n      container:\n        set_volume_perms:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        postgresql:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    postgresql_backup:\n      pod:\n        runAsUser: 65534\n      container:\n        backup_perms:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        postgresql_backup:\n          runAsUser: 65534\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    create_user:\n      pod:\n        runAsUser: 65534\n      container:\n        prometheus_postgresql_exporter_create_user:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  replicas:\n    # only 1 replica currently supported\n    server: 1\n    prometheus_postgresql_exporter: 1\n  lifecycle:\n    upgrades:\n      statefulsets:\n        pod_replacement_strategy: OnDelete\n        partition: 0\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    termination_grace_period:\n      prometheus_postgresql_exporter:\n        timeout: 30\n      server:\n        timeout: 180\n  probes:\n    server:\n      postgresql:\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            timeoutSeconds: 5\n            failureThreshold: 10\n        readiness:\n          enabled: false\n          params:\n            initialDelaySeconds: 30\n            timeoutSeconds: 5\n            failureThreshold: 10\n  resources:\n    enabled: false\n    server:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    test:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    prometheus_postgresql_exporter:\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n      requests:\n        memory: \"128Mi\"\n        cpu: \"500m\"\n    jobs:\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      prometheus_postgresql_exporter_create_user:\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n      postgresql_backup:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\n# using dockerhub postgresql: https://hub.docker.com/r/library/postgres/tags/\nimages:\n  tags:\n    postgresql: \"docker.io/library/postgres:14.5\"\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    prometheus_postgresql_exporter: docker.io/wrouesnel/postgres_exporter:v0.4.6\n    prometheus_postgresql_exporter_create_user: \"docker.io/library/postgres:14.5\"\n    postgresql_backup: \"quay.io/airshipit/porthole-postgresql-utility:latest-ubuntu_jammy\"\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nstorage:\n  pvc:\n    enabled: true\n    size: 5Gi\n    class_name: general\n    class_path: volume.beta.kubernetes.io/storage-class\n  archive_pvc:\n    size: 5Gi\n    class_name: general\n    class_path: volume.beta.kubernetes.io/storage-class\n  host:\n    host_path: /data/openstack-helm/postgresql\n    archive_host_path: /data/openstack-helm/postgresql-archive\n  mount:\n    path: /var/lib/postgresql\n    subpath: .\n  archive:\n    mount_path: /var/lib/archive\n    archive_limit: 60\n\nlabels:\n  server:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selectory_key: openstack-control-plane\n    node_selector_value: enabled\n  prometheus_postgresql_exporter:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - postgresql-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    postgresql_backup:\n      jobs:\n        - postgresql-ks-user\n      services:\n        - endpoint: internal\n          service: postgresql\n    tests:\n      services:\n        - endpoint: internal\n          service: postgresql\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    prometheus_postgresql_exporter_create_user:\n      services:\n        - endpoint: internal\n          service: postgresql\n    prometheus_postgresql_exporter:\n      services:\n        - endpoint: internal\n          service: postgresql\n      jobs:\n        - prometheus-postgresql-exporter-create-user\n\nmonitoring:\n  prometheus:\n    enabled: false\n    postgresql_exporter:\n      scrape: true\n\nvolume:\n  backup:\n    enabled: true\n    class_name: general\n    size: 5Gi\n\njobs:\n  postgresql_backup:\n    # activeDeadlineSeconds == 0 means no deadline\n    activeDeadlineSeconds: 0\n    backoffLimit: 6\n    cron: \"0 0 * * *\"\n    history:\n      success: 3\n      failed: 1\n  ks_user:\n    # activeDeadlineSeconds == 0 means no deadline\n    activeDeadlineSeconds: 0\n    backoffLimit: 6\n\nnetwork_policy:\n  postgresql:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nconf:\n  debug: false\n  pg_hba: |\n    host all all 127.0.0.1/32 trust\n    host all all 0.0.0.0/0 md5\n    local all all trust\n\n  postgresql:\n    archive_mode: 'on'\n    archive_command: 'test ! -f /var/lib/archive/%f && gzip < %p > /var/lib/archive/%f'\n    cluster_name: 'postgresql'\n    datestyle: 'iso, mdy'\n    external_pid_file: '/tmp/postgres.pid'\n    fsync: 'on'\n    listen_addresses: '0.0.0.0'\n    log_checkpoints: 'on'\n    log_connections: 'on'\n    log_disconnections: 'on'\n    log_line_prefix: 'postgresql: %t [%p]: [%l-1] %c %x %d %u %a %h %m '\n    log_lock_waits: 'on'\n    log_temp_files: '0'\n    log_timezone: 'UTC'\n    max_connections: '1000'\n    max_locks_per_transaction: '64'\n    max_prepared_transactions: '0'\n    max_wal_senders: '16'\n    max_worker_processes: '10'\n    port: '5432'\n    shared_buffers: '2GB'\n    ssl: 'off'\n    ssl_cert_file: '/server_certs/tls.crt'\n    ssl_ca_file: '/server_certs/ca.crt'\n    ssl_key_file: '/server_certs/tls.key'\n    ssl_ciphers: 'TLSv1.2:!aNULL'\n    tcp_keepalives_idle: '900'\n    tcp_keepalives_interval: '100'\n    timezone: 'UTC'\n    track_commit_timestamp: 'on'\n    track_functions: 'all'\n    wal_keep_size: '256'\n    wal_level: 'hot_standby'\n    wal_log_hints: 'on'\n    hba_file: '/tmp/pg_hba.conf'\n    ident_file: '/tmp/pg_ident.conf'\n  backup:\n    enabled: false\n    base_path: /var/backup\n    days_to_keep: 3\n    pg_dumpall_options: '--inserts --clean'\n    remote_backup:\n      enabled: false\n      container_name: postgresql\n      days_to_keep: 14\n      storage_policy: default-placement\n      number_of_retries: 5\n      delay_range:\n        min: 30\n        max: 60\n      throttle_backups:\n        enabled: false\n        sessions_limit: 480\n        lock_expire_after: 7200\n        retry_after: 3600\n        container_name: throttle-backups-manager\n      primary_user_class: postgresql\n      failover_user_class: postgresql_failover\n\n  exporter:\n    queries:\n      pg_postmaster:\n        query: \"SELECT pg_postmaster_start_time as start_time_seconds from pg_postmaster_start_time()\"\n        master: true\n        metrics:\n          - start_time_seconds:\n              usage: \"GAUGE\"\n              description: \"Time at which postmaster started\"\n\nsecrets:\n  oci_image_registry:\n    postgresql: postgresql-oci-image-registry-key\n  postgresql:\n    admin: postgresql-admin\n    exporter: postgresql-exporter\n    audit: postgresql-audit\n    backup_restore: postgresql-backup-restore\n    tls:\n      server:\n        internal: postgresql-tls-direct\n  identity:\n    admin: keystone-admin-user\n    postgresql: postgresql-backup-user\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      postresql:\n        username: postresql\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  postgresql:\n    auth:\n      admin:\n        username: postgres\n        password: password\n      exporter:\n        username: psql_exporter\n        password: psql_exp_pass\n      audit:\n        username: audit\n        password: password\n    hosts:\n      default: postgresql\n    host_fqdn_override:\n      default: null\n    path: null\n    scheme: postgresql\n    port:\n      postgresql:\n        default: 5432\n  postgresql_restapi:\n    hosts:\n      default: postgresql-restapi\n    host_fqdn_override:\n      default: null\n    path: null\n    scheme: postgresql\n    port:\n      restapi:\n        default: 8008\n  prometheus_postgresql_exporter:\n    namespace: null\n    hosts:\n      default: postgresql-exporter\n    host_fqdn_override:\n      default: null\n    path:\n      default: /metrics\n    scheme:\n      default: 'http'\n    port:\n      metrics:\n        default: 9187\n  identity:\n    name: backup-storage-auth\n    namespace: openstack\n    auth:\n      admin:\n        # Auth URL of null indicates local authentication\n        # HTK will form the URL unless specified here\n        auth_url: null\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      postgresql:\n        # Auth URL of null indicates local authentication\n        # HTK will form the URL unless specified here\n        auth_url: null\n        role: admin\n        region_name: RegionOne\n        username: postgresql-backup-user\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 80\n        internal: 5000\n\nmanifests:\n  certificates: false\n  configmap_bin: true\n  configmap_etc: true\n  job_image_repo_sync: true\n  network_policy: false\n  job_ks_user: false\n  secret_admin: true\n  secret_etc: true\n  secret_audit: true\n  secret_backup_restore: false\n  secret_registry: true\n  service: true\n  statefulset: true\n  cron_job_postgresql_backup: false\n  pvc_backup: false\n  monitoring:\n    prometheus:\n      configmap_bin: true\n      configmap_etc: true\n      deployment_exporter: true\n      job_user_create: true\n      secret_etc: true\n      service_exporter: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "powerdns/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v4.1.10\ndescription: OpenStack-Helm PowerDNS\nname: powerdns\nversion: 2025.2.0\nhome: https://www.powerdns.com/\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "powerdns/templates/bin/_powerdns-mysql-sync.sh.tpl",
    "content": "#!/bin/sh\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nMYSQLCMD='mysql -r -N'\nif [ $(echo 'show tables' | $MYSQLCMD | wc -c) -eq 0 ]; then\n  $MYSQLCMD < /etc/pdns/schema.sql\nfi\n"
  },
  {
    "path": "powerdns/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: powerdns-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  powerdns-mysql-sync.sh: |\n{{ tuple \"bin/_powerdns-mysql-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "powerdns/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"powerdns.configmap.etc\" -}}\n{{- range $key, $value :=  . }}\n{{ $key | replace \"_\" \"-\" }} = {{ $value }}\n{{- end }}\n{{- end -}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $mysql := .Values.conf.mysql.client }}\n\n{{- if empty $mysql.host -}}\n{{- $_ :=  tuple \"oslo_db\" \"internal\" . | include \"helm-toolkit.endpoints.endpoint_host_lookup\" | set $mysql \"host\" -}}\n{{- $_ :=  $mysql.host | set .Values.conf.powerdns \"gmysql_host\" -}}\n{{- end -}}\n\n{{- if empty $mysql.port -}}\n{{- $_ :=  tuple \"oslo_db\" \"internal\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set $mysql \"port\" -}}\n{{- $_ :=  $mysql.port | set .Values.conf.powerdns \"gmysql_port\" -}}\n{{- end -}}\n\n{{- if empty $mysql.user -}}\n{{- $_ :=  .Values.endpoints.oslo_db.auth.powerdns.username | set $mysql \"user\" -}}\n{{- $_ :=  $mysql.user | set .Values.conf.powerdns \"gmysql_user\" -}}\n{{- end -}}\n\n{{- if empty $mysql.password -}}\n{{- $_ :=  .Values.endpoints.oslo_db.auth.powerdns.password | set $mysql \"password\" -}}\n{{- $_ :=  $mysql.password | set .Values.conf.powerdns \"gmysql_password\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.powerdns.api_key -}}\n{{- $_ :=  tuple \"powerdns\" \"service\" . | include \"helm-toolkit.endpoints.endpoint_token_lookup\" | set .Values.conf.powerdns \"api_key\" -}}\n{{- end -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: powerdns-etc\ntype: Opaque\ndata:\n  pdns.conf: {{ include \"powerdns.configmap.etc\" .Values.conf.powerdns | b64enc }}\n  my.cnf: {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.mysql | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "powerdns/templates/deployment.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"powerdns\" }}\n{{ tuple $envAll \"powerdns\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: powerdns\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"powerdns\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.server }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"powerdns\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"powerdns\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"powerdns\" \"server\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.powerdns.node_selector_key }}: {{ .Values.labels.powerdns.node_selector_value | quote }}\n      initContainers:\n{{ tuple $envAll \"powerdns\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: powerdns\n{{ tuple $envAll \"powerdns\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.server | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - pdns_server\n          ports:\n            - containerPort: {{ tuple \"powerdns\" \"internal\" \"powerdns\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n              name: pdns-udp\n              protocol: UDP\n            - containerPort: {{ tuple \"powerdns\" \"internal\" \"powerdns_tcp\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n              name: pdns-tcp\n            - containerPort: {{ tuple \"powerdns\" \"internal\" \"powerdns_api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n              name: pdns-api\n          readinessProbe:\n            tcpSocket:\n              port: {{ tuple \"powerdns\" \"internal\" \"powerdns_tcp\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          volumeMounts:\n            - name: powerdns-etc\n              mountPath: /etc/pdns/conf.d/pdns.conf\n              subPath: pdns.conf\n              readOnly: true\n      volumes:\n        - name: powerdns-etc\n          secret:\n            secretName: powerdns-etc\n            defaultMode: 0444\n{{- end }}\n"
  },
  {
    "path": "powerdns/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "powerdns/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_init }}\n\n{{- $dbToInit := dict \"inputType\" \"secret\" \"adminSecret\" .Values.secrets.oslo_db.admin \"userSecret\" .Values.secrets.oslo_db.powerdns -}}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"powerdns\" \"dbToInit\" $dbToInit -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n\n{{- end }}\n"
  },
  {
    "path": "powerdns/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $envAll := . }}\n\n\n{{- $serviceAccountName := \"powerdns-db-sync\" }}\n{{ tuple $envAll \"db_sync\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ $serviceAccountName }}\n  labels:\n{{ tuple $envAll \"powerdns\" \"db-sync\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"powerdns\" \"db-sync\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"db_sync\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: powerdns-db-sync\n{{ tuple $envAll \"db_sync\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.db_sync | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/powerdns-mysql-sync.sh\n          volumeMounts:\n            - name: powerdns-bin\n              mountPath: /tmp/powerdns-mysql-sync.sh\n              subPath: powerdns-mysql-sync.sh\n              readOnly: true\n            - name: powerdns-etc\n              mountPath: /etc/mysql/my.cnf\n              subPath: my.cnf\n              readOnly: true\n      volumes:\n        - name: powerdns-bin\n          configMap:\n            name: powerdns-bin\n            defaultMode: 0555\n        - name: powerdns-etc\n          secret:\n            secretName: powerdns-etc\n            defaultMode: 0444\n{{- end }}\n"
  },
  {
    "path": "powerdns/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"powerdns\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "powerdns/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"powerdns\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n  DB_CONNECTION: {{ tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "powerdns/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "powerdns/templates/service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_dns }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"powerdns\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - port: {{ tuple \"powerdns\" \"internal\" \"powerdns\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n      name: pdns-udp\n      protocol: UDP\n    - port: {{ tuple \"powerdns\" \"internal\" \"powerdns_tcp\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n      name: pdns-tcp\n    {{- if .Values.manifests.service_api }}\n    - port: {{ tuple \"powerdns\" \"internal\" \"powerdns_api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n      name: pdns-api\n    {{- end }}\n  selector:\n{{ tuple $envAll \"powerdns\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{- if .Values.network.node_port_enabled }}\n{{/*\nSet Type=NodePort to get output packets from cluster internal IP\nof the POD instead of container one.\n*/}}\n  type: NodePort\n  {{- if .Values.network.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{- end }}\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "powerdns/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for powerdns.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nimages:\n  tags:\n    powerdns: docker.io/psitrax/powerdns:4.1.10\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_sync: docker.io/psitrax/powerdns:4.1.10\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\npod:\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  replicas:\n    server: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n  resources:\n    enabled: false\n    server:\n      limits:\n        memory: \"128Mi\"\n        cpu: \"500m\"\n      requests:\n        memory: \"128Mi\"\n        cpu: \"500m\"\n    jobs:\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nlabels:\n  powerdns:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - powerdns-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    powerdns:\n      jobs:\n        - powerdns-db-init\n        - powerdns-db-sync\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - powerdns-db-init\n      services:\n        - service: oslo_db\n          endpoint: internal\n\nnetwork:\n  node_port_enabled: true\n  external_policy_local: true\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      powerdns:\n        username: powerdns\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  powerdns:\n    auth:\n      service:\n        token: chiave_segreta\n    hosts:\n      default: powerdns\n    host_fqdn_override:\n      default: null\n    port:\n      powerdns_api:\n        default: 8081\n      powerdns_tcp:\n        default: 53\n      powerdns:\n        default: 53\n        protocol: UDP\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n      powerdns:\n        username: powerdns\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /powerdns\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n\nsecrets:\n  oci_image_registry:\n    powerdns: powerdns-oci-image-registry-key\n  oslo_db:\n    admin: powerdns-db-admin\n    powerdns: powerdns-db-user\n\nconf:\n  powerdns:\n    slave: true\n    dnsupdate: true\n    api: true\n    cache_ttl: 0\n    query_cache_ttl: 0\n    negquery_cache_ttl: 0\n    out_of_zone_additional_processing: no\n    webserver: true\n    webserver_address: 0.0.0.0\n    webserver_allow_from: 0.0.0.0/0\n    gmysql_dbname: powerdns\n    gmysql_dnssec: yes\n  mysql:\n    client:\n      database: powerdns\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  deployment: true\n  job_db_init: true\n  job_db_sync: true\n  secret_db: true\n  secret_registry: true\n  service_dns: true\n  service_api: false\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "prometheus/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v2.25.0\ndescription: OpenStack-Helm Prometheus\nname: prometheus\nversion: 2025.2.0\nhome: https://prometheus.io/\nsources:\n  - https://github.com/prometheus/prometheus\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "prometheus/templates/bin/_apache.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ev\n\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n\n  if [ -f /etc/apache2/envvars ]; then\n     # Loading Apache2 ENV variables\n     source /etc/httpd/apache2/envvars\n  fi\n  # Apache gets grumpy about PID files pre-existing\n  rm -f /etc/httpd/logs/httpd.pid\n\n  if [ -f /usr/local/apache2/conf/.htpasswd ]; then\n    htpasswd -b /usr/local/apache2/conf/.htpasswd \"$PROMETHEUS_ADMIN_USERNAME\" \"$PROMETHEUS_ADMIN_PASSWORD\"\n  else\n    htpasswd -cb /usr/local/apache2/conf/.htpasswd \"$PROMETHEUS_ADMIN_USERNAME\" \"$PROMETHEUS_ADMIN_PASSWORD\"\n  fi\n\n  if [ -n \"$PROMETHEUS_FEDERATE_USERNAME\" ]; then\n    htpasswd -b /usr/local/apache2/conf/.htpasswd \"$PROMETHEUS_FEDERATE_USERNAME\" \"$PROMETHEUS_FEDERATE_PASSWORD\"\n  fi\n\n  #Launch Apache on Foreground\n  exec httpd -DFOREGROUND\n}\n\nfunction stop () {\n  apachectl -k graceful-stop\n}\n\n$COMMAND\n"
  },
  {
    "path": "prometheus/templates/bin/_helm-tests.sh.tpl",
    "content": "#!/bin/bash\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n\nset -ex\n\nfunction endpoints_up () {\n  endpoints_result=$(curl ${CACERT_OPTION} -K- <<< \"--user ${PROMETHEUS_ADMIN_USERNAME}:${PROMETHEUS_ADMIN_PASSWORD}\" \\\n    \"${PROMETHEUS_ENDPOINT}/api/v1/query?query=up\" \\\n    | python -c \"import sys, json; print(json.load(sys.stdin)['status'])\")\n  if [ \"$endpoints_result\" = \"success\" ];\n  then\n    echo \"PASS: Endpoints successfully queried!\"\n  else\n    echo \"FAIL: Endpoints not queried!\";\n    exit 1;\n  fi\n}\n\nfunction get_targets () {\n  targets_result=$(curl ${CACERT_OPTION} -K- <<< \"--user ${PROMETHEUS_ADMIN_USERNAME}:${PROMETHEUS_ADMIN_PASSWORD}\" \\\n    \"${PROMETHEUS_ENDPOINT}/api/v1/targets\" \\\n    | python -c \"import sys, json; print(json.load(sys.stdin)['status'])\")\n  if [ \"$targets_result\" = \"success\" ];\n  then\n    echo \"PASS: Targets successfully queried!\"\n  else\n    echo \"FAIL: Endpoints not queried!\";\n    exit 1;\n  fi\n}\n\nfunction get_alertmanagers () {\n  alertmanager=$(curl ${CACERT_OPTION} -K- <<< \"--user ${PROMETHEUS_ADMIN_USERNAME}:${PROMETHEUS_ADMIN_PASSWORD}\" \\\n    \"${PROMETHEUS_ENDPOINT}/api/v1/alertmanagers\" \\\n    |  python -c \"import sys, json; print(json.load(sys.stdin)['status'])\")\n  if [ \"$alertmanager\" = \"success\" ];\n  then\n    echo \"PASS: Alertmanager successfully queried!\"\n  else\n    echo \"FAIL: Alertmanager not queried!\";\n    exit 1;\n  fi\n}\n\nendpoints_up\nget_targets\nget_alertmanagers\n"
  },
  {
    "path": "prometheus/templates/bin/_prometheus.sh.tpl",
    "content": "#!/bin/sh\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n# Two ways how to launch init process in container: by default and custom (defined in override values).\n{{ $deflaunch := .Values.proc_launch.prometheus.default }}\nif [ \"{{ $deflaunch }}\" = true ]\nthen\n  COMMAND=\"${@:-start}\"\n\n  function start () {\n  {{ $flags := include \"prometheus.utils.command_line_flags\" .Values.conf.prometheus.command_line_flags }}\n    exec /bin/prometheus --config.file=/etc/config/prometheus.yml {{ $flags }}\n  }\n\n  function stop () {\n    kill -TERM 1\n  }\n\n  $COMMAND\nelse\n  {{ tpl (.Values.proc_launch.prometheus.custom_launch) . }}\nfi\n"
  },
  {
    "path": "prometheus/templates/certificates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.certificates -}}\n{{  dict \"envAll\" . \"service\" \"monitoring\" \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end -}}\n"
  },
  {
    "path": "prometheus/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.Release.Name \"prometheus-bin\" | quote }}\ndata:\n  apache.sh: |\n{{ tuple \"bin/_apache.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  prometheus.sh: |\n{{ tuple \"bin/_prometheus.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  helm-tests.sh: |\n{{ tuple \"bin/_helm-tests.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "prometheus/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.Release.Name \"prometheus-etc\" | quote }}\ntype: Opaque\ndata:\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.prometheus.scrape_configs.template \"key\" \"prometheus.yml\" \"format\" \"Secret\") | indent 2 }}\n{{ range $key, $value := .Values.conf.prometheus.rules }}\n  {{ $key }}.rules: {{ toYaml $value | b64enc }}\n{{ end }}\n  # NOTE(srwilkers): this must be last, to work round helm ~2.7 bug.\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.httpd \"key\" \"httpd.conf\" \"format\" \"Secret\") | indent 2 }}\n{{- end }}\n"
  },
  {
    "path": "prometheus/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "prometheus/templates/ingress-prometheus.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress .Values.network.prometheus.ingress.public }}\n{{- $envAll := . -}}\n{{- $port := tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" }}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendService\" \"prometheus\" \"backendServiceType\" \"monitoring\" \"backendPort\" $port -}}\n{{- $secretName := $envAll.Values.secrets.tls.monitoring.prometheus.internal -}}\n{{- if and .Values.manifests.certificates $secretName -}}\n{{- $_ := set $ingressOpts \"certIssuer\" .Values.endpoints.monitoring.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "prometheus/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"prometheus\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "prometheus/templates/network_policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License. */}}\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"prometheus\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "prometheus/templates/pod-helm-tests.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.helm_tests }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := print .Release.Name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: \"{{.Release.Name}}-test\"\n  labels:\n{{ tuple $envAll \"prometheus\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"prometheus-test\" \"containerNames\" (list \"init\" \"prometheus-helm-tests\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\n    \"helm.sh/hook\": test-success\nspec:\n{{ dict \"envAll\" $envAll \"application\" \"test\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 2 }}\n  serviceAccountName: {{ $serviceAccountName }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n  restartPolicy: Never\n  initContainers:\n{{ tuple $envAll \"tests\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n  containers:\n    - name: prometheus-helm-tests\n{{ tuple $envAll \"helm_tests\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" \"container\" \"prometheus_helm_tests\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      command:\n        - /tmp/helm-tests.sh\n      env:\n        - name: PROMETHEUS_ADMIN_USERNAME\n          valueFrom:\n            secretKeyRef:\n              name: {{ printf \"%s-%s\" $envAll.Release.Name \"admin-user\" | quote }}\n              key: PROMETHEUS_ADMIN_USERNAME\n        - name: PROMETHEUS_ADMIN_PASSWORD\n          valueFrom:\n            secretKeyRef:\n              name: {{ printf \"%s-%s\" $envAll.Release.Name \"admin-user\" | quote }}\n              key: PROMETHEUS_ADMIN_PASSWORD\n\n{{- if .Values.manifests.certificates }}\n        - name: CACERT_OPTION\n          value: \"--cacert /etc/prometheus/certs/ca.crt\"\n{{- end }}\n        - name: PROMETHEUS_ENDPOINT\n          value: {{ printf \"%s://%s\" (tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\") (tuple \"monitoring\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\") }}\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: prometheus-bin\n          mountPath: /tmp/helm-tests.sh\n          subPath: helm-tests.sh\n          readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.monitoring.prometheus.internal \"path\" \"/etc/prometheus/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 8 }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: prometheus-bin\n      configMap:\n        name: {{ printf \"%s-%s\" $envAll.Release.Name \"prometheus-bin\" | quote }}\n        defaultMode: 0555\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.monitoring.prometheus.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "prometheus/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{- include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"monitoring\" \"backendService\" \"prometheus\" ) }}\n{{- end }}\n"
  },
  {
    "path": "prometheus/templates/secret-prometheus.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_prometheus }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.Release.Name \"admin-user\" | quote }}\ntype: Opaque\ndata:\n  PROMETHEUS_ADMIN_USERNAME: {{ .Values.endpoints.monitoring.auth.admin.username | b64enc }}\n  PROMETHEUS_ADMIN_PASSWORD: {{ .Values.endpoints.monitoring.auth.admin.password | b64enc }}\n  PROMETHEUS_FEDERATE_USERNAME: {{ .Values.endpoints.monitoring.auth.federate.username | b64enc }}\n  PROMETHEUS_FEDERATE_PASSWORD: {{ .Values.endpoints.monitoring.auth.federate.password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "prometheus/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "prometheus/templates/secret-tls-configs.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.tls_configs }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ .Release.Name }}-tls-configs\ndata:\n{{- range $k, $v := .Values.tls_configs }}\n{{- range $f, $c := $v }}\n  {{ $k }}.{{ $f }}: {{ $c | b64enc }}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "prometheus/templates/service-ingress-prometheus.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress .Values.network.prometheus.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"monitoring\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "prometheus/templates/service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service }}\n{{- $envAll := . }}\n{{- $prometheus_annotations := $envAll.Values.monitoring.prometheus.prometheus }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"monitoring\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  labels:\n{{ tuple $envAll \"prometheus\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n{{- if .Values.monitoring.prometheus.enabled }}\n{{ tuple $prometheus_annotations | include \"helm-toolkit.snippets.prometheus_service_annotations\" | indent 4 }}\n{{- end }}\nspec:\n  ports:\n  - name: {{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" }}\n    port: {{ tuple \"monitoring\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    targetPort: {{ tuple \"monitoring\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.prometheus.node_port.enabled }}\n    nodePort: {{ .Values.network.prometheus.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"prometheus\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.prometheus.node_port.enabled }}\n  type: NodePort\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "prometheus/templates/statefulset.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"probeTemplate\" }}\n{{- $probePort := tuple \"monitoring\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $probeUser := .Values.endpoints.monitoring.auth.admin.username }}\n{{- $probePass := .Values.endpoints.monitoring.auth.admin.password }}\n{{- $authHeader := printf \"%s:%s\" $probeUser $probePass | b64enc }}\nhttpGet:\n  path: /-/ready\n  scheme: {{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n  port: {{ $probePort }}\n  httpHeaders:\n    - name: Authorization\n      value: Basic {{ $authHeader }}\n{{- end }}\n\n\n{{- if .Values.manifests.statefulset_prometheus }}\n{{- $envAll := . }}\n\n{{- $mounts_prometheus := .Values.pod.mounts.prometheus.prometheus }}\n{{- $mounts_prometheus_init := .Values.pod.mounts.prometheus.init_container }}\n\n{{- $rcControllerName := printf \"%s-%s\" $envAll.Release.Name \"prometheus\" }}\n{{ tuple $envAll \"prometheus\" $rcControllerName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ $rcControllerName | quote }}\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - nodes\n      - nodes/proxy\n      - services\n      - endpoints\n      - pods\n    verbs:\n      - get\n      - list\n      - watch\n  - apiGroups:\n      - \"\"\n    resources:\n      - configmaps\n    verbs:\n      - get\n  - nonResourceURLs:\n      - \"/metrics\"\n    verbs:\n      - get\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ $rcControllerName | quote }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $rcControllerName | quote }}\n    namespace: {{ .Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $rcControllerName | quote }}\n  apiGroup: rbac.authorization.k8s.io\n---\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  name: {{ $rcControllerName | quote }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"prometheus\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  serviceName: {{ tuple \"monitoring\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  podManagementPolicy: \"Parallel\"\n  replicas: {{ .Values.pod.replicas.prometheus }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"prometheus\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"prometheus\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"prometheus\" \"containerNames\" (list \"prometheus\" \"prometheus-perms\" \"apache-proxy\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $rcControllerName | quote }}\n      affinity:\n{{ tuple $envAll \"prometheus\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.prometheus.node_selector_key }}: {{ .Values.labels.prometheus.node_selector_value | quote }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.prometheus.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"prometheus\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: prometheus-perms\n{{ tuple $envAll \"prometheus\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.prometheus | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"api\" \"container\" \"prometheus_perms\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - chown\n            - -R\n            - \"nobody:\"\n            - /var/lib/prometheus/data\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: storage\n              mountPath: /var/lib/prometheus/data\n      containers:\n        - name: apache-proxy\n{{ tuple $envAll \"apache_proxy\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.apache_proxy | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"api\" \"container\" \"apache_proxy\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/apache.sh\n            - start\n          ports:\n            - name: {{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" }}\n              containerPort: {{ tuple \"monitoring\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          env:\n            - name: PROMETHEUS_PORT\n              value: {{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: PROMETHEUS_ADMIN_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ printf \"%s-%s\" $envAll.Release.Name \"admin-user\" | quote }}\n                  key: PROMETHEUS_ADMIN_USERNAME\n            - name: PROMETHEUS_ADMIN_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ printf \"%s-%s\" $envAll.Release.Name \"admin-user\" | quote }}\n                  key: PROMETHEUS_ADMIN_PASSWORD\n            - name: PROMETHEUS_FEDERATE_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ printf \"%s-%s\" $envAll.Release.Name \"admin-user\" | quote }}\n                  key: PROMETHEUS_FEDERATE_USERNAME\n            - name: PROMETHEUS_FEDERATE_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ printf \"%s-%s\" $envAll.Release.Name \"admin-user\" | quote }}\n                  key: PROMETHEUS_FEDERATE_PASSWORD\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: prometheus-bin\n              mountPath: /tmp/apache.sh\n              subPath: apache.sh\n              readOnly: true\n            - name: prometheus-etc\n              mountPath: /usr/local/apache2/conf/httpd.conf\n              subPath: httpd.conf\n              readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.monitoring.prometheus.internal \"path\" \"/etc/prometheus/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n        - name: prometheus\n{{ tuple $envAll \"prometheus\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.prometheus | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"api\" \"container\" \"prometheus\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/prometheus.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/prometheus.sh\n                  - stop\n          ports:\n            - name: prom-metrics\n              containerPort: {{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{ dict \"envAll\" . \"component\" \"prometheus\" \"container\" \"prometheus\" \"type\" \"readiness\" \"probeTemplate\" (include \"probeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" . \"component\" \"prometheus\" \"container\" \"prometheus\" \"type\" \"liveness\" \"probeTemplate\" (include \"probeTemplate\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          env:\n{{- if .Values.pod.env.prometheus }}\n{{ include \"helm-toolkit.utils.to_k8s_env_vars\" .Values.pod.env.prometheus | indent 12 }}\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etcprometheus\n              mountPath: /etc/config\n            - name: rulesprometheus\n              mountPath: /etc/config/rules\n            {{- range $key, $value := .Values.conf.prometheus.rules }}\n            - name: prometheus-etc\n              mountPath: /etc/config/rules/{{ $key }}.rules\n              subPath: {{ $key }}.rules\n              readOnly: true\n            {{- end }}\n            - name: prometheus-etc\n              mountPath: /etc/config/prometheus.yml\n              subPath: prometheus.yml\n              readOnly: true\n            - name: prometheus-bin\n              mountPath: /tmp/prometheus.sh\n              subPath: prometheus.sh\n              readOnly: true\n            - name: storage\n              mountPath: /var/lib/prometheus/data\n{{- if .Values.tls_configs }}\n            - name: tls-configs\n              mountPath: /tls_configs\n{{- end }}\n{{ if $mounts_prometheus.volumeMounts }}{{ toYaml $mounts_prometheus.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: etcprometheus\n          emptyDir: {}\n        - name: rulesprometheus\n          emptyDir: {}\n        - name: prometheus-etc\n          secret:\n            secretName: {{ printf \"%s-%s\" $envAll.Release.Name \"prometheus-etc\" | quote }}\n            defaultMode: 0444\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.monitoring.prometheus.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n        - name: prometheus-bin\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"prometheus-bin\" | quote }}\n            defaultMode: 0555\n{{- if .Values.tls_configs }}\n        - name: tls-configs\n          secret:\n            secretName: {{ printf \"%s-%s\" $envAll.Release.Name \"tls-configs\" | quote }}\n            defaultMode: 0444\n{{- end }}\n{{ if $mounts_prometheus.volumes }}{{ toYaml $mounts_prometheus.volumes | indent 8 }}{{ end }}\n{{- if not .Values.storage.enabled }}\n{{- if .Values.storage.use_local_path.enabled }}\n        - name: storage\n          hostPath:\n            path: {{ .Values.storage.use_local_path.host_path }}\n            type: DirectoryOrCreate\n{{- else }}\n        - name: storage\n          emptyDir: {}\n{{- end }}\n{{- else }}\n  volumeClaimTemplates:\n    - metadata:\n        name: storage\n      spec:\n        accessModes: {{ .Values.storage.pvc.access_mode }}\n        resources:\n          requests:\n            storage: {{ .Values.storage.requests.storage  }}\n        storageClassName: {{ .Values.storage.storage_class }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "prometheus/templates/utils/_command_line_flags.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n# This function generates the command line flags passed to Prometheus at time of\n# execution. This allows the Prometheus service configuration to be flexible, as\n# the only way to define Prometheus's configuration is via command line flags.\n# The yaml definition for these flags uses the full yaml path as the key, and\n# replaces underscores with hyphens to match the syntax required for the flags\n# generated (This is required due to Go's yaml parsing capabilities).\n# For example:\n#\n# conf:\n#   prometheus:\n#     command_line_flags:\n#       storage.tsdb.max_block_duration: 2h\n#\n# Will generate the following flag:\n#   --storage.tsdb.max-block-duration=2h\n#\n# Prometheus's command flags can be found by either running 'prometheus -h' or\n# 'prometheus --help-man'\n\n{{- define \"prometheus.utils.command_line_flags\" -}}\n{{- range $flag, $value := . -}}\n{{- $flag := $flag | replace \"_\" \"-\" }}\n{{- if eq $flag \"web.enable-admin-api\" \"web.enable-lifecycle\" \"storage.tsdb.wal-compression\" -}}\n{{- if $value }}\n{{- printf \" --%s \" $flag -}}\n{{- end -}}\n{{- else -}}\n{{- $value := $value | toString }}\n{{- printf \" --%s=%s \" $flag $value }}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "prometheus/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for prometheus.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nimages:\n  tags:\n    apache_proxy: docker.io/library/httpd:2.4\n    prometheus: docker.io/prom/prometheus:v2.25.0\n    helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  prometheus:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\npod:\n  env:\n    prometheus: null\n  security_context:\n    api:\n      pod:\n        runAsUser: 65534\n      container:\n        prometheus_perms:\n          runAsUser: 0\n          readOnlyRootFilesystem: false\n        apache_proxy:\n          runAsUser: 0\n          readOnlyRootFilesystem: false\n        prometheus:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    test:\n      pod:\n        runAsUser: 65534\n      container:\n        prometheus_helm_tests:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  mounts:\n    prometheus:\n      prometheus:\n      init_container: null\n  replicas:\n    prometheus: 1\n  lifecycle:\n    upgrades:\n      statefulsets:\n        pod_replacement_strategy: RollingUpdate\n    termination_grace_period:\n      prometheus:\n        timeout: 30\n  resources:\n    enabled: false\n    prometheus:\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n      requests:\n        memory: \"128Mi\"\n        cpu: \"500m\"\n    jobs:\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n  probes:\n    prometheus:\n      prometheus:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            timeoutSeconds: 30\n        liveness:\n          enabled: false\n          params:\n            initialDelaySeconds: 120\n            timeoutSeconds: 30\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      prometheus:\n        username: prometheus\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  monitoring:\n    name: prometheus\n    namespace: null\n    auth:\n      admin:\n        username: admin\n        password: changeme\n      federate:\n        username: federate\n        password: changeme\n    hosts:\n      default: prom-metrics\n      public: prometheus\n    host_fqdn_override:\n      default: null\n      # NOTE(srwilkers): this chart supports TLS for fqdn over-ridden public\n      # endpoints using the following format:\n      # public:\n      #   host: null\n      #   tls:\n      #     crt: null\n      #     key: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 9090\n      http:\n        default: 80\n  alertmanager:\n    name: prometheus-alertmanager\n    namespace: null\n    hosts:\n      default: alerts-engine\n      public: prometheus-alertmanager\n      discovery: prometheus-alertmanager-discovery\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 9093\n        public: 80\n      mesh:\n        default: 9094\n  ldap:\n    hosts:\n      default: ldap\n    auth:\n      admin:\n        bind: \"cn=admin,dc=cluster,dc=local\"\n        password: password\n    host_fqdn_override:\n      default: null\n    path:\n      default: \"/ou=People,dc=cluster,dc=local\"\n    scheme:\n      default: ldap\n    port:\n      ldap:\n        default: 389\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - prometheus-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    prometheus:\n      services: null\n    tests:\n      services:\n        - endpoint: internal\n          service: monitoring\n\nmonitoring:\n  prometheus:\n    enabled: true\n    prometheus:\n      scrape: true\n\nnetwork:\n  prometheus:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        nginx.ingress.kubernetes.io/affinity: cookie\n        nginx.ingress.kubernetes.io/session-cookie-name: kube-ingress-session-prometheus\n        nginx.ingress.kubernetes.io/session-cookie-hash: sha1\n        nginx.ingress.kubernetes.io/session-cookie-expires: \"600\"\n        nginx.ingress.kubernetes.io/session-cookie-max-age: \"600\"\n        haproxy.org/path-rewrite: /\n        haproxy.org/cookie-persistence: \"kube-ingress-session-prometheus\"\n    node_port:\n      enabled: false\n      port: 30900\n\nnetwork_policy:\n  prometheus:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nproc_launch:\n  prometheus:\n    default: true\n    custom_launch: |\n        while true\n        do\n          echo \"If 'proc_launch.prometheus.default: false'.\"\n          echo \"Your custom shell script code you can put here.\"\n          sleep 10\n        done\n\nsecrets:\n  oci_image_registry:\n    prometheus: prometheus-oci-image-registry-key\n  tls:\n    monitoring:\n      prometheus:\n        public: prometheus-tls-public\n        internal: prometheus-tls-api\n\ntls_configs:\n  # If client certificates are required to connect to metrics endpoints, they\n  # can be configured here. They will be mounted in the pod under /tls_configs\n  # and can be referenced in scrape configs.\n  # The filenames will be the key and subkey concatenanted with a \".\", e.g.:\n  #   /tls_configs/kubernetes-etcd.ca.pem\n  #   /tls_configs/kubernetes-etcd.crt.pem\n  #   /tls_configs/kubernetes-etcd.key.pem\n  # From the following:\n  # kubernetes-etcd:\n  #   ca.pem: |\n  #     -----BEGIN CERTIFICATE-----\n  #     -----END CERTIFICATE-----\n  #   crt.pem: |\n  #     -----BEGIN CERTIFICATE-----\n  #     -----END CERTIFICATE-----\n  #   key.pem: |\n  #     -----BEGIN RSA PRIVATE KEY-----\n  #     -----END RSA PRIVATE KEY-----\n\nstorage:\n  enabled: true\n  pvc:\n    name: prometheus-pvc\n    access_mode: [\"ReadWriteOnce\"]\n  requests:\n    storage: 5Gi\n  storage_class: general\n  use_local_path:\n    enabled: false\n    host_path: /var/lib/prometheus-data\n\nmanifests:\n  certificates: false\n  configmap_bin: true\n  configmap_etc: true\n  ingress: true\n  helm_tests: true\n  job_image_repo_sync: true\n  network_policy: true\n  secret_ingress_tls: true\n  secret_prometheus: true\n  secret_registry: true\n  service_ingress: true\n  service: true\n  statefulset_prometheus: true\n\nconf:\n  httpd: |\n    ServerRoot \"/usr/local/apache2\"\n\n    Listen 80\n\n    LoadModule mpm_event_module modules/mod_mpm_event.so\n    LoadModule authn_file_module modules/mod_authn_file.so\n    LoadModule authn_core_module modules/mod_authn_core.so\n    LoadModule authz_host_module modules/mod_authz_host.so\n    LoadModule authz_groupfile_module modules/mod_authz_groupfile.so\n    LoadModule authz_user_module modules/mod_authz_user.so\n    LoadModule authz_core_module modules/mod_authz_core.so\n    LoadModule access_compat_module modules/mod_access_compat.so\n    LoadModule auth_basic_module modules/mod_auth_basic.so\n    LoadModule ldap_module modules/mod_ldap.so\n    LoadModule authnz_ldap_module modules/mod_authnz_ldap.so\n    LoadModule reqtimeout_module modules/mod_reqtimeout.so\n    LoadModule filter_module modules/mod_filter.so\n    LoadModule proxy_html_module modules/mod_proxy_html.so\n    LoadModule log_config_module modules/mod_log_config.so\n    LoadModule env_module modules/mod_env.so\n    LoadModule headers_module modules/mod_headers.so\n    LoadModule setenvif_module modules/mod_setenvif.so\n    LoadModule version_module modules/mod_version.so\n    LoadModule proxy_module modules/mod_proxy.so\n    LoadModule proxy_connect_module modules/mod_proxy_connect.so\n    LoadModule proxy_http_module modules/mod_proxy_http.so\n    LoadModule proxy_balancer_module modules/mod_proxy_balancer.so\n    LoadModule slotmem_shm_module modules/mod_slotmem_shm.so\n    LoadModule slotmem_plain_module modules/mod_slotmem_plain.so\n    LoadModule unixd_module modules/mod_unixd.so\n    LoadModule status_module modules/mod_status.so\n    LoadModule autoindex_module modules/mod_autoindex.so\n\n    <IfModule unixd_module>\n    User daemon\n    Group daemon\n    </IfModule>\n\n    <Directory />\n        AllowOverride none\n        Require all denied\n    </Directory>\n\n    <Files \".ht*\">\n        Require all denied\n    </Files>\n\n    ErrorLog /dev/stderr\n\n    LogLevel warn\n\n    <IfModule log_config_module>\n        LogFormat \"%a %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" combined\n        LogFormat \"%{X-Forwarded-For}i %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" proxy\n        LogFormat \"%h %l %u %t \\\"%r\\\" %>s %b\" common\n\n        <IfModule logio_module>\n          LogFormat \"%a %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\" %I %O\" combinedio\n        </IfModule>\n\n        SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n        CustomLog /dev/stdout common\n        CustomLog /dev/stdout combined\n        CustomLog /dev/stdout proxy env=forwarded\n    </IfModule>\n\n    <Directory \"/usr/local/apache2/cgi-bin\">\n        AllowOverride None\n        Options None\n        Require all granted\n    </Directory>\n\n    <IfModule headers_module>\n        RequestHeader unset Proxy early\n    </IfModule>\n\n    <IfModule proxy_html_module>\n    Include conf/extra/proxy-html.conf\n    </IfModule>\n\n    <VirtualHost *:80>\n      # Expose metrics to all users, as this is not sensitive information and\n      # circumvents the inability of Prometheus to interpolate environment vars\n      # in its configuration file\n      <Location /metrics>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/metrics\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/metrics\n          Satisfy Any\n          Allow from all\n      </Location>\n      # Expose the /federate endpoint to all users, as this is also not\n      # sensitive information and circumvents the inability of Prometheus to\n      # interpolate environment vars in its configuration file\n      <Location /federate>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/metrics\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/metrics\n          Satisfy Any\n          Allow from all\n      </Location>\n      # Restrict general user (LDAP) access to the /graph endpoint, as general trusted\n      # users should only be able to query Prometheus for metrics and not have access\n      # to information like targets, configuration, flags or build info for Prometheus\n      <Location />\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file ldap\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          AuthLDAPBindDN {{ .Values.endpoints.ldap.auth.admin.bind }}\n          AuthLDAPBindPassword {{ .Values.endpoints.ldap.auth.admin.password }}\n          AuthLDAPURL {{ tuple \"ldap\" \"default\" \"ldap\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | quote }}\n          Require valid-user\n      </Location>\n      <Location /graph>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/graph\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/graph\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file ldap\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          AuthLDAPBindDN {{ .Values.endpoints.ldap.auth.admin.bind }}\n          AuthLDAPBindPassword {{ .Values.endpoints.ldap.auth.admin.password }}\n          AuthLDAPURL {{ tuple \"ldap\" \"default\" \"ldap\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | quote }}\n          Require valid-user\n      </Location>\n      # Restrict access to the /config (dashboard) and /api/v1/status/config (http) endpoints\n      # to the admin user\n      <Location /config>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/config\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/config\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          Require valid-user\n      </Location>\n      <Location /api/v1/status/config>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/status/config\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/status/config\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          Require valid-user\n      </Location>\n      # Restrict access to the /flags (dashboard) and /api/v1/status/flags (http) endpoints\n      # to the admin user\n      <Location /flags>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/flags\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/flags\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          Require valid-user\n      </Location>\n      <Location /api/v1/status/flags>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/status/flags\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/status/flags\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          Require valid-user\n      </Location>\n      # Restrict access to the /status (dashboard) endpoint to the admin user\n      <Location /status>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/status\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/status\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          Require valid-user\n      </Location>\n      # Restrict access to the /rules (dashboard) endpoint to the admin user\n      <Location /rules>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/rules\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/rules\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          Require valid-user\n      </Location>\n      # Restrict access to the /targets (dashboard) and /api/v1/targets (http) endpoints\n      # to the admin user\n      <Location /targets>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/targets\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/targets\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          Require valid-user\n      </Location>\n      <Location /api/v1/targets>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/targets\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/targets\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          Require valid-user\n      </Location>\n      # Restrict access to the /api/v1/admin/tsdb/ endpoints (http) to the admin user.\n      # These endpoints are disabled by default, but are included here to ensure only\n      # an admin user has access to these endpoints when enabled\n      <Location /api/v1/admin/tsdb/>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/admin/tsdb/\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/admin/tsdb/\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          Require valid-user\n      </Location>\n    </VirtualHost>\n  prometheus:\n    # Consumed by a prometheus helper function to generate the command line flags\n    # for configuring the prometheus service\n    command_line_flags:\n      log.level: info\n      query.max_concurrency: 20\n      query.timeout: 2m\n      storage.tsdb.path: /var/lib/prometheus/data\n      storage.tsdb.retention.time: 7d\n      # NOTE(srwilkers): These settings default to false, but they are\n      # exposed here to allow enabling if desired. Please note the security\n      # impacts of enabling these flags. More information regarding the impacts\n      # can be found here: https://prometheus.io/docs/operating/security/\n      #\n      # If set to true, all administrative functionality is exposed via the http\n      # /api/*/admin/ path\n      web.enable_admin_api: false\n      # If set to true, allows for http reloads and shutdown of Prometheus\n      web.enable_lifecycle: false\n    scrape_configs:\n      template: |\n        {{- $promHost := tuple \"monitoring\" \"public\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\n        {{- if not (empty .Values.conf.prometheus.rules)}}\n        rule_files:\n        {{- $rulesKeys := keys .Values.conf.prometheus.rules -}}\n        {{- range $rule := $rulesKeys }}\n          {{ printf \"- /etc/config/rules/%s.rules\" $rule }}\n        {{- end }}\n        {{- end }}\n        global:\n          scrape_interval: 60s\n          evaluation_interval: 60s\n          external_labels:\n            prometheus_host: {{$promHost}}\n        scrape_configs:\n          - job_name: kubelet\n            scheme: https\n            # This TLS & bearer token file config is used to connect to the actual scrape\n            # endpoints for cluster components. This is separate to discovery auth\n            # configuration because discovery & scraping are two separate concerns in\n            # Prometheus. The discovery auth config is automatic if Prometheus runs inside\n            # the cluster. Otherwise, more config options have to be provided within the\n            # <kubernetes_sd_config>.\n            tls_config:\n              ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n            bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token\n            kubernetes_sd_configs:\n            - role: node\n            scrape_interval: 45s\n            relabel_configs:\n            - action: labelmap\n              regex: __meta_kubernetes_node_label_(.+)\n            - target_label: __address__\n              replacement: kubernetes.default.svc:443\n            - source_labels:\n                - __meta_kubernetes_node_name\n              regex: (.+)\n              target_label: __metrics_path__\n              replacement: /api/v1/nodes/${1}/proxy/metrics\n            - source_labels:\n                - __meta_kubernetes_node_name\n              action: replace\n              target_label: kubernetes_io_hostname\n            # Scrape config for Kubelet cAdvisor.\n            #\n            # This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics\n            # (those whose names begin with 'container_') have been removed from the\n            # Kubelet metrics endpoint.  This job scrapes the cAdvisor endpoint to\n            # retrieve those metrics.\n            #\n            # In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor\n            # HTTP endpoint; use \"replacement: /api/v1/nodes/${1}:4194/proxy/metrics\"\n            # in that case (and ensure cAdvisor's HTTP server hasn't been disabled with\n            # the --cadvisor-port=0 Kubelet flag).\n            #\n            # This job is not necessary and should be removed in Kubernetes 1.6 and\n            # earlier versions, or it will cause the metrics to be scraped twice.\n          - job_name: 'kubernetes-cadvisor'\n\n            # Default to scraping over https. If required, just disable this or change to\n            # `http`.\n            scheme: https\n\n            # This TLS & bearer token file config is used to connect to the actual scrape\n            # endpoints for cluster components. This is separate to discovery auth\n            # configuration because discovery & scraping are two separate concerns in\n            # Prometheus. The discovery auth config is automatic if Prometheus runs inside\n            # the cluster. Otherwise, more config options have to be provided within the\n            # <kubernetes_sd_config>.\n            tls_config:\n              ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n            bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token\n\n            kubernetes_sd_configs:\n            - role: node\n\n            relabel_configs:\n            - action: labelmap\n              regex: __meta_kubernetes_node_label_(.+)\n            - target_label: __address__\n              replacement: kubernetes.default.svc:443\n            - source_labels:\n                - __meta_kubernetes_node_name\n              regex: (.+)\n              target_label: __metrics_path__\n              replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor\n            metric_relabel_configs:\n            - source_labels:\n                - __name__\n              regex: 'container_network_tcp_usage_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_tasks_state'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_network_udp_usage_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_memory_failures_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_cpu_load_average_10s'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_cpu_system_seconds_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_cpu_user_seconds_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_fs_inodes_free'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_fs_inodes_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_fs_io_current'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_fs_io_time_seconds_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_fs_io_time_weighted_seconds_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_fs_read_seconds_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_fs_reads_merged_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_fs_reads_merged_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_fs_reads_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_fs_sector_reads_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_fs_sector_writes_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_fs_write_seconds_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_fs_writes_bytes_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_fs_writes_merged_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_fs_writes_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_last_seen'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_memory_cache'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_memory_failcnt'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_memory_max_usage_bytes'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_memory_rss'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_memory_swap'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_memory_usage_bytes'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_network_receive_errors_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_network_receive_packets_dropped_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_network_receive_packets_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_network_transmit_errors_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_network_transmit_packets_dropped_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_network_transmit_packets_total'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_spec_cpu_period'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_spec_cpu_shares'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_spec_memory_limit_bytes'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_spec_memory_reservation_limit_bytes'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_spec_memory_swap_limit_bytes'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'container_start_time_seconds'\n              action: drop\n            # Scrape config for API servers.\n            #\n            # Kubernetes exposes API servers as endpoints to the default/kubernetes\n            # service so this uses `endpoints` role and uses relabelling to only keep\n            # the endpoints associated with the default/kubernetes service using the\n            # default named port `https`. This works for single API server deployments as\n            # well as HA API server deployments.\n          - job_name: 'apiserver'\n            kubernetes_sd_configs:\n            - role: endpoints\n            scrape_interval: 45s\n            # Default to scraping over https. If required, just disable this or change to\n            # `http`.\n            scheme: https\n            # This TLS & bearer token file config is used to connect to the actual scrape\n            # endpoints for cluster components. This is separate to discovery auth\n            # configuration because discovery & scraping are two separate concerns in\n            # Prometheus. The discovery auth config is automatic if Prometheus runs inside\n            # the cluster. Otherwise, more config options have to be provided within the\n            # <kubernetes_sd_config>.\n            tls_config:\n              ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n              # If your node certificates are self-signed or use a different CA to the\n              # master CA, then disable certificate verification below. Note that\n              # certificate verification is an integral part of a secure infrastructure\n              # so this should only be disabled in a controlled environment. You can\n              # disable certificate verification by uncommenting the line below.\n              #\n              # insecure_skip_verify: true\n            bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token\n            # Keep only the default/kubernetes service endpoints for the https port. This\n            # will add targets for each API server which Kubernetes adds an endpoint to\n            # the default/kubernetes service.\n            relabel_configs:\n            - source_labels:\n                - __meta_kubernetes_namespace\n                - __meta_kubernetes_service_name\n                - __meta_kubernetes_endpoint_port_name\n              action: keep\n              regex: default;kubernetes;https\n            metric_relabel_configs:\n            - source_labels:\n                - __name__\n              regex: 'apiserver_admission_controller_admission_latencies_seconds_bucket'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'rest_client_request_latency_seconds_bucket'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'apiserver_response_sizes_bucket'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'apiserver_admission_step_admission_latencies_seconds_bucket'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'apiserver_admission_controller_admission_latencies_seconds_count'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'apiserver_admission_controller_admission_latencies_seconds_sum'\n              action: drop\n            - source_labels:\n                - __name__\n              regex: 'apiserver_request_latencies_summary'\n              action: drop\n          # Scrape config for service endpoints.\n          #\n          # The relabeling allows the actual service scrape endpoint to be configured\n          # via the following annotations:\n          #\n          # * `prometheus.io/scrape`: Only scrape services that have a value of `true`\n          # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need\n          # to set this to `https` & most likely set the `tls_config` of the scrape config.\n          # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.\n          # * `prometheus.io/port`: If the metrics are exposed on a different port to the\n          # service then set this appropriately.\n          - job_name: 'openstack-exporter'\n            kubernetes_sd_configs:\n            - role: endpoints\n            scrape_interval: 60s\n            relabel_configs:\n            - source_labels:\n                - __meta_kubernetes_service_name\n              action: keep\n              regex: \"openstack-metrics\"\n            - source_labels:\n                - __meta_kubernetes_service_annotation_prometheus_io_scrape\n              action: keep\n              regex: true\n            - source_labels:\n                - __meta_kubernetes_service_annotation_prometheus_io_scheme\n              action: replace\n              target_label: __scheme__\n              regex: (https?)\n            - source_labels:\n                - __meta_kubernetes_service_annotation_prometheus_io_path\n              action: replace\n              target_label: __metrics_path__\n              regex: (.+)\n            - source_labels:\n                - __address__\n                - __meta_kubernetes_service_annotation_prometheus_io_port\n              action: replace\n              target_label: __address__\n              regex: ([^:]+)(?::\\d+)?;(\\d+)\n              replacement: $1:$2\n            - action: labelmap\n              regex: __meta_kubernetes_service_label_(.+)\n            - source_labels:\n                - __meta_kubernetes_namespace\n              action: replace\n              target_label: kubernetes_namespace\n            - source_labels:\n                - __meta_kubernetes_service_name\n              action: replace\n              target_label: instance\n            - source_labels:\n                - __meta_kubernetes_service_name\n              action: replace\n              target_label: kubernetes_name\n            - source_labels:\n                - __meta_kubernetes_service_name\n              target_label: job\n              replacement: ${1}\n          - job_name: 'node-exporter'\n            kubernetes_sd_configs:\n            - role: endpoints\n            scrape_interval: 60s\n            relabel_configs:\n            - source_labels:\n                - __meta_kubernetes_service_name\n              action: keep\n              regex: 'node-exporter'\n            - source_labels:\n                - __meta_kubernetes_pod_node_name\n              action: replace\n              target_label: hostname\n          - job_name: 'kubernetes-service-endpoints'\n            kubernetes_sd_configs:\n            - role: endpoints\n            scrape_interval: 60s\n            relabel_configs:\n            - source_labels:\n                - __meta_kubernetes_service_name\n              action: drop\n              regex: '(openstack-metrics|prom-metrics|ceph-mgr|node-exporter)'\n            - source_labels:\n                - __meta_kubernetes_service_annotation_prometheus_io_scrape\n              action: keep\n              regex: true\n            - source_labels:\n                - __meta_kubernetes_service_annotation_prometheus_io_scheme\n              action: replace\n              target_label: __scheme__\n              regex: (https?)\n            - source_labels:\n                - __meta_kubernetes_service_annotation_prometheus_io_path\n              action: replace\n              target_label: __metrics_path__\n              regex: (.+)\n            - source_labels:\n                - __address__\n                - __meta_kubernetes_service_annotation_prometheus_io_port\n              action: replace\n              target_label: __address__\n              regex: ([^:]+)(?::\\d+)?;(\\d+)\n              replacement: $1:$2\n            - action: labelmap\n              regex: __meta_kubernetes_service_label_(.+)\n            - source_labels:\n                - __meta_kubernetes_namespace\n              action: replace\n              target_label: kubernetes_namespace\n            - source_labels:\n                - __meta_kubernetes_service_name\n              action: replace\n              target_label: kubernetes_name\n            - source_labels:\n                - __meta_kubernetes_service_name\n              target_label: job\n              replacement: ${1}\n          # Example scrape config for pods\n          #\n          # The relabeling allows the actual pod scrape endpoint to be configured via the\n          # following annotations:\n          #\n          # * `prometheus.io/scrape`: Only scrape pods that have a value of `true`\n          # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.\n          # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the\n          # pod's declared ports (default is a port-free target if none are declared).\n          - job_name: 'kubernetes-pods'\n            kubernetes_sd_configs:\n            - role: pod\n            relabel_configs:\n            - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]\n              action: keep\n              regex: true\n            - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]\n              action: replace\n              target_label: __metrics_path__\n              regex: (.+)\n            - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]\n              action: replace\n              regex: ([^:]+)(?::\\d+)?;(\\d+)\n              replacement: $1:$2\n              target_label: __address__\n            - action: labelmap\n              regex: __meta_kubernetes_pod_label_(.+)\n            - source_labels: [__meta_kubernetes_namespace]\n              action: replace\n              target_label: kubernetes_namespace\n            - source_labels: [__meta_kubernetes_pod_name]\n              action: replace\n              target_label: kubernetes_pod_name\n          - job_name: calico-etcd\n            kubernetes_sd_configs:\n            - role: service\n            scrape_interval: 20s\n            relabel_configs:\n            - action: labelmap\n              regex: __meta_kubernetes_service_label_(.+)\n            - action: keep\n              source_labels:\n                - __meta_kubernetes_service_name\n              regex: \"calico-etcd\"\n            - action: keep\n              source_labels:\n                - __meta_kubernetes_namespace\n              regex: kube-system\n              target_label: namespace\n            - source_labels:\n                - __meta_kubernetes_pod_name\n              target_label: pod\n            - source_labels:\n                - __meta_kubernetes_service_name\n              target_label: service\n            - source_labels:\n                - __meta_kubernetes_service_name\n              target_label: job\n              replacement: ${1}\n            - source_labels:\n                - __meta_kubernetes_service_label\n              target_label: job\n              regex: calico-etcd\n              replacement: ${1}\n            - target_label: endpoint\n              replacement: \"calico-etcd\"\n          - job_name: ceph-mgr\n            kubernetes_sd_configs:\n            - role: service\n            scrape_interval: 20s\n            relabel_configs:\n            - action: labelmap\n              regex: __meta_kubernetes_service_label_(.+)\n            - action: keep\n              source_labels:\n                - __meta_kubernetes_service_name\n              regex: \"ceph-mgr\"\n            - source_labels:\n                - __meta_kubernetes_service_port_name\n              action: drop\n              regex: 'ceph-mgr'\n            - action: keep\n              source_labels:\n                - __meta_kubernetes_namespace\n              regex: ceph\n              target_label: namespace\n            - source_labels:\n                - __meta_kubernetes_pod_name\n              target_label: pod\n            - source_labels:\n                - __meta_kubernetes_service_name\n              target_label: service\n            - source_labels:\n                - __meta_kubernetes_service_name\n              target_label: job\n              replacement: ${1}\n            - source_labels:\n                - __meta_kubernetes_service_label\n              target_label: job\n              regex: ceph-mgr\n              replacement: ${1}\n            - target_label: endpoint\n              replacement: \"ceph-mgr\"\n        alerting:\n          alertmanagers:\n          - kubernetes_sd_configs:\n              - role: pod\n            tls_config:\n              ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n            bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token\n            relabel_configs:\n            - source_labels: [__meta_kubernetes_pod_label_application]\n              regex: prometheus-alertmanager\n              action: keep\n            - source_labels: [__meta_kubernetes_pod_container_port_name]\n              regex: alerts-api\n              action: keep\n            - source_labels: [__meta_kubernetes_pod_container_port_name]\n              regex: peer-mesh\n              action: drop\n    rules: []\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "prometheus-alertmanager/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v0.20.0\ndescription: OpenStack-Helm Alertmanager for Prometheus\nname: prometheus-alertmanager\nversion: 2025.2.0\nhome: https://prometheus.io/docs/alerting/alertmanager/\nsources:\n  - https://github.com/prometheus/alertmanager\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "prometheus-alertmanager/templates/bin/_alertmanager.sh.tpl",
    "content": "#!/bin/sh\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec /bin/alertmanager \\\n    --config.file=/etc/alertmanager/config.yml \\\n{{- range $flag, $value := .Values.conf.command_flags.alertmanager }}\n{{- $flag := $flag | replace \"_\" \"-\" }}\n{{ printf \"--%s=%s\" $flag $value | indent 4 }} \\\n{{- end }}\n    $(generate_peers)\n}\n\nfunction generate_peers () {\n  final_pod_suffix=$(( {{ .Values.pod.replicas.alertmanager }}-1 ))\n  for pod_suffix in `seq 0 \"$final_pod_suffix\"`\n  do\n    echo --cluster.peer=prometheus-alertmanager-$pod_suffix.$DISCOVERY_SVC:$MESH_PORT\n  done\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "prometheus-alertmanager/templates/bin/_apache.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -exv\n\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n\n  if [ -f /etc/apache2/envvars ]; then\n     # Loading Apache2 ENV variables\n     source /etc/httpd/apache2/envvars\n  fi\n  # Apache gets grumpy about PID files pre-existing\n  rm -f /etc/httpd/logs/httpd.pid\n\n  if [ -f /usr/local/apache2/conf/.htpasswd ]; then\n    htpasswd -b /usr/local/apache2/conf/.htpasswd \"$ALERTMANAGER_USERNAME\" \"$ALERTMANAGER_PASSWORD\"\n  else\n    htpasswd -cb /usr/local/apache2/conf/.htpasswd \"$ALERTMANAGER_USERNAME\" \"$ALERTMANAGER_PASSWORD\"\n  fi\n\n  #Launch Apache on Foreground\n  exec httpd -DFOREGROUND\n}\n\nfunction stop () {\n  apachectl -k graceful-stop\n}\n\n$COMMAND\n"
  },
  {
    "path": "prometheus-alertmanager/templates/clusterrolebinding.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.clusterrolebinding }}\n{{- $envAll := . }}\n{{- $serviceAccountName := printf \"%s\" \"prometheus-alertmanager\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: run-alertmanager\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: cluster-admin\n  apiGroup: rbac.authorization.k8s.io\n{{- end }}\n"
  },
  {
    "path": "prometheus-alertmanager/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.Release.Name \"alertmanager-bin\" | quote }}\ndata:\n  apache.sh: |\n{{ tuple \"bin/_apache.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  alertmanager.sh: |\n{{ tuple \"bin/_alertmanager.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-alertmanager/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.Release.Name \"alertmanager-etc\" | quote }}\ndata:\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.alertmanager \"key\" \"config.yml\" \"format\" \"Secret\") | indent 2 }}\n{{- if .Values.conf.alert_templates }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.alert_templates \"key\" \"alert-templates.tmpl\" \"format\" \"Secret\") | indent 2 }}\n{{- end }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.conf.httpd \"key\" \"httpd.conf\" \"format\" \"Secret\") | indent 2 }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-alertmanager/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "prometheus-alertmanager/templates/ingress-alertmanager.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress .Values.network.alertmanager.ingress.public }}\n{{- $ingressOpts := dict \"envAll\" . \"backendService\" \"alertmanager\" \"backendServiceType\" \"alertmanager\" \"backendPort\" \"http\" -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-alertmanager/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"alertmanager\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-alertmanager/templates/network_policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License. */}}\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"alertmanager\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "prometheus-alertmanager/templates/secret-admin-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_admin_user }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.Release.Name \"admin-user\" | quote }}\ntype: Opaque\ndata:\n  ALERTMANAGER_USERNAME: {{ .Values.endpoints.alertmanager.auth.admin.username | b64enc }}\n  ALERTMANAGER_PASSWORD: {{ .Values.endpoints.alertmanager.auth.admin.password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-alertmanager/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{- include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"alertmanager\" \"backendService\" \"alertmanager\") }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-alertmanager/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-alertmanager/templates/service-discovery.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_discovery }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"alertmanager\" \"discovery\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  type: ClusterIP\n  clusterIP: None\n  ports:\n  - name: peer-mesh\n    port: {{ tuple \"alertmanager\" \"internal\" \"mesh\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"prometheus-alertmanager\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-alertmanager/templates/service-ingress-alertmanager.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress .Values.network.alertmanager.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"alertmanager\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-alertmanager/templates/service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service }}\n{{- $envAll := . }}\n{{- $prometheus_annotations := $envAll.Values.monitoring.prometheus.prometheus }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"alertmanager\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  annotations:\n{{- if .Values.monitoring.prometheus.enabled }}\n{{ tuple $prometheus_annotations | include \"helm-toolkit.snippets.prometheus_service_annotations\" | indent 4 }}\n{{- end }}\nspec:\n  ports:\n  - name: http\n    port: {{ tuple \"alertmanager\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.alertmanager.node_port.enabled }}\n    nodePort: {{ .Values.network.alertmanager.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"prometheus-alertmanager\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.alertmanager.node_port.enabled }}\n  type: NodePort\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-alertmanager/templates/statefulset.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.statefulset }}\n{{- $envAll := . }}\n\n{{- $mounts_alertmanager := .Values.pod.mounts.alertmanager.alertmanager }}\n{{- $mounts_alertmanager_init := .Values.pod.mounts.alertmanager.init_container }}\n\n{{- $serviceAccountName := \"prometheus-alertmanager\" }}\n{{ tuple $envAll \"prometheus-alertmanager\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  name: prometheus-alertmanager\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"prometheus-alertmanager\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  serviceName: {{ tuple \"alertmanager\" \"discovery\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  podManagementPolicy: \"Parallel\"\n  replicas: {{ .Values.pod.replicas.alertmanager }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"prometheus-alertmanager\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"prometheus-alertmanager\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" $serviceAccountName \"containerNames\" (list \"prometheus-alertmanager\" \"prometheus-alertmanager-perms\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"server\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"prometheus-alertmanager\" \"server\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.alertmanager.node_selector_key }}: {{ .Values.labels.alertmanager.node_selector_value | quote }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.alertmanager.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"alertmanager\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: prometheus-alertmanager-perms\n{{ tuple $envAll \"prometheus-alertmanager\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.alertmanager | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"server\" \"container\" \"prometheus_alertmanager_perms\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - chown\n            - -R\n            - \"nobody:\"\n            - /var/lib/alertmanager/data\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: alertmanager-data\n              mountPath: /var/lib/alertmanager/data\n      containers:\n        - name: apache-proxy\n{{ tuple $envAll \"apache_proxy\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.apache_proxy | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"server\" \"container\" \"apache_proxy\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/apache.sh\n            - start\n          ports:\n            - name: http\n              containerPort: 80\n          env:\n            - name: ALERTMANAGER_PORT\n              value: {{ tuple \"alertmanager\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: ALERTMANAGER_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ printf \"%s-%s\" $envAll.Release.Name \"admin-user\" | quote }}\n                  key: ALERTMANAGER_USERNAME\n            - name: ALERTMANAGER_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ printf \"%s-%s\" $envAll.Release.Name \"admin-user\" | quote }}\n                  key: ALERTMANAGER_PASSWORD\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: alertmanager-bin\n              mountPath: /tmp/apache.sh\n              subPath: apache.sh\n              readOnly: true\n            - name: alertmanager-etc\n              mountPath: /usr/local/apache2/conf/httpd.conf\n              subPath: httpd.conf\n              readOnly: true\n        - name: prometheus-alertmanager\n{{ tuple $envAll \"prometheus-alertmanager\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.alertmanager | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"server\" \"container\" \"prometheus_alertmanager\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/alertmanager.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/alertmanager.sh\n                  - stop\n          env:\n            - name: DISCOVERY_SVC\n              value: {{ tuple \"alertmanager\" \"discovery\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\n            - name: MESH_PORT\n              value: {{ tuple \"alertmanager\" \"internal\" \"mesh\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n          ports:\n            - name: alerts-api\n              containerPort: {{ tuple \"alertmanager\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            - name: peer-mesh\n              containerPort: {{ tuple \"alertmanager\" \"internal\" \"mesh\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            httpGet:\n              path: /#/status\n              port: {{ tuple \"alertmanager\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 30\n            timeoutSeconds: 30\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etc-alertmanager\n              mountPath: /etc/config\n            {{- if .Values.conf.alert_templates }}\n            - name: alertmanager-etc\n              mountPath: /etc/alertmanager/template/alert-templates.tmpl\n              subPath: alert-templates.tmpl\n              readOnly: true\n            {{- end }}\n            - name: alertmanager-etc\n              mountPath: /etc/alertmanager/config.yml\n              subPath: config.yml\n              readOnly: true\n            - name: alertmanager-bin\n              mountPath: /tmp/alertmanager.sh\n              subPath: alertmanager.sh\n              readOnly: true\n            - name: alertmanager-data\n              mountPath: /var/lib/alertmanager/data\n{{ if $mounts_alertmanager.volumeMounts }}{{ toYaml $mounts_alertmanager.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: etc-alertmanager\n          emptyDir: {}\n        - name: alertmanager-etc\n          secret:\n            secretName: {{ printf \"%s-%s\" $envAll.Release.Name \"alertmanager-etc\" | quote }}\n            defaultMode: 0444\n        - name: alertmanager-bin\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.Release.Name \"alertmanager-bin\" | quote }}\n            defaultMode: 0555\n{{ if $mounts_alertmanager.volumes }}{{ toYaml $mounts_alertmanager.volumes | indent 8 }}{{ end }}\n{{- if not .Values.storage.alertmanager.enabled }}\n{{- if .Values.storage.alertmanager.use_local_path.enabled }}\n        - name: alertmanager-data\n          hostPath:\n            path: {{ .Values.storage.alertmanager.use_local_path.host_path }}\n            type: DirectoryOrCreate\n{{- else }}\n        - name: alertmanager-data\n          emptyDir: {}\n{{- end }}\n{{- else }}\n  volumeClaimTemplates:\n    - metadata:\n        name: alertmanager-data\n      spec:\n        accessModes: {{ .Values.storage.alertmanager.pvc.access_mode }}\n        resources:\n          requests:\n            storage: {{ .Values.storage.alertmanager.requests.storage  }}\n        storageClassName: {{ .Values.storage.alertmanager.storage_class }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-alertmanager/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for alertmanager.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nimages:\n  tags:\n    apache_proxy: docker.io/library/httpd:2.4\n    prometheus-alertmanager: docker.io/prom/alertmanager:v0.20.0\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  alertmanager:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\npod:\n  security_context:\n    server:\n      pod:\n        runAsUser: 65534\n      container:\n        prometheus_alertmanager_perms:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        apache_proxy:\n          runAsUser: 0\n          readOnlyRootFilesystem: false\n        prometheus_alertmanager:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  mounts:\n    alertmanager:\n      alertmanager:\n      init_container: null\n  replicas:\n    alertmanager: 1\n  lifecycle:\n    upgrades:\n      deployment:\n        pod_replacement_strategy: RollingUpdate\n      statefulsets:\n        pod_replacement_strategy: RollingUpdate\n    termination_grace_period:\n      alertmanager:\n        timeout: 30\n  resources:\n    enabled: false\n    apache_proxy:\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n    alertmanager:\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n      requests:\n        memory: \"128Mi\"\n        cpu: \"500m\"\n    jobs:\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      prometheus-alertmanager:\n        username: prometheus-alertmanager\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  alertmanager:\n    name: prometheus-alertmanager\n    namespace: null\n    auth:\n      admin:\n        username: admin\n        password: changeme\n    hosts:\n      default: alerts-engine\n      public: prometheus-alertmanager\n      discovery: prometheus-alertmanager-discovery\n    host_fqdn_override:\n      default: null\n      # NOTE(srwilkers): this chart supports TLS for fqdn over-ridden public\n      # endpoints using the following format:\n      # public:\n      #   host: null\n      #   tls:\n      #     crt: null\n      #     key: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 9093\n        public: 80\n      mesh:\n        default: 9094\n      http:\n        default: 80\n  ldap:\n    hosts:\n      default: ldap\n    auth:\n      admin:\n        bind: \"cn=admin,dc=cluster,dc=local\"\n        password: password\n    host_fqdn_override:\n      default: null\n    path:\n      default: \"/ou=People,dc=cluster,dc=local\"\n    scheme:\n      default: ldap\n    port:\n      ldap:\n        default: 389\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - alertmanager-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    alertmanager:\n      services: null\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\nnetwork:\n  alertmanager:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    node_port:\n      enabled: false\n      port: 30903\n\nsecrets:\n  oci_image_registry:\n    prometheus-alertmanager: prometheus-alertmanager-oci-image-registry-key\n  tls:\n    alertmanager:\n      alertmanager:\n        public: alerts-tls-public\n\nstorage:\n  alertmanager:\n    enabled: true\n    pvc:\n      access_mode: [\"ReadWriteOnce\"]\n    requests:\n      storage: 5Gi\n    storage_class: general\n    use_local_path:\n      enabled: false\n      host_path: /var/lib/prometheus-alertmanager-data\n\nmanifests:\n  clusterrolebinding: true\n  configmap_bin: true\n  configmap_etc: true\n  ingress: true\n  job_image_repo_sync: true\n  network_policy: false\n  secret_admin_user: true\n  secret_ingress_tls: true\n  secret_registry: true\n  service: true\n  service_discovery: true\n  service_ingress: true\n  statefulset: true\n\nnetwork_policy:\n  alertmanager:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nmonitoring:\n  prometheus:\n    enabled: true\n    prometheus:\n      scrape: true\n\nconf:\n  httpd: |\n    ServerRoot \"/usr/local/apache2\"\n\n    Listen 80\n\n    LoadModule mpm_event_module modules/mod_mpm_event.so\n    LoadModule authn_file_module modules/mod_authn_file.so\n    LoadModule authn_core_module modules/mod_authn_core.so\n    LoadModule authz_host_module modules/mod_authz_host.so\n    LoadModule authz_groupfile_module modules/mod_authz_groupfile.so\n    LoadModule authz_user_module modules/mod_authz_user.so\n    LoadModule authz_core_module modules/mod_authz_core.so\n    LoadModule access_compat_module modules/mod_access_compat.so\n    LoadModule auth_basic_module modules/mod_auth_basic.so\n    LoadModule ldap_module modules/mod_ldap.so\n    LoadModule authnz_ldap_module modules/mod_authnz_ldap.so\n    LoadModule reqtimeout_module modules/mod_reqtimeout.so\n    LoadModule filter_module modules/mod_filter.so\n    LoadModule proxy_html_module modules/mod_proxy_html.so\n    LoadModule log_config_module modules/mod_log_config.so\n    LoadModule env_module modules/mod_env.so\n    LoadModule headers_module modules/mod_headers.so\n    LoadModule setenvif_module modules/mod_setenvif.so\n    LoadModule version_module modules/mod_version.so\n    LoadModule proxy_module modules/mod_proxy.so\n    LoadModule proxy_connect_module modules/mod_proxy_connect.so\n    LoadModule proxy_http_module modules/mod_proxy_http.so\n    LoadModule proxy_balancer_module modules/mod_proxy_balancer.so\n    LoadModule remoteip_module modules/mod_remoteip.so\n    LoadModule slotmem_shm_module modules/mod_slotmem_shm.so\n    LoadModule slotmem_plain_module modules/mod_slotmem_plain.so\n    LoadModule unixd_module modules/mod_unixd.so\n    LoadModule status_module modules/mod_status.so\n    LoadModule autoindex_module modules/mod_autoindex.so\n\n    <IfModule unixd_module>\n    User daemon\n    Group daemon\n    </IfModule>\n\n    <Directory />\n        AllowOverride none\n        Require all denied\n    </Directory>\n\n    <Files \".ht*\">\n        Require all denied\n    </Files>\n\n    ErrorLog /dev/stderr\n\n    LogLevel warn\n\n    <IfModule log_config_module>\n        LogFormat \"%a %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" combined\n        LogFormat \"%{X-Forwarded-For}i %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" proxy\n        LogFormat \"%h %l %u %t \\\"%r\\\" %>s %b\" common\n\n        <IfModule logio_module>\n          LogFormat \"%a %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\" %I %O\" combinedio\n        </IfModule>\n\n        SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n        CustomLog /dev/stdout common\n        CustomLog /dev/stdout combined\n        CustomLog /dev/stdout proxy env=forwarded\n    </IfModule>\n\n    <Directory \"/usr/local/apache2/cgi-bin\">\n        AllowOverride None\n        Options None\n        Require all granted\n    </Directory>\n\n    <IfModule headers_module>\n        RequestHeader unset Proxy early\n    </IfModule>\n\n    <IfModule proxy_html_module>\n    Include conf/extra/proxy-html.conf\n    </IfModule>\n\n    <VirtualHost *:80>\n      RemoteIPHeader X-Original-Forwarded-For\n      <Location />\n          ProxyPass http://localhost:{{ tuple \"alertmanager\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/\n          ProxyPassReverse http://localhost:{{ tuple \"alertmanager\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/\n      </Location>\n      <Proxy *>\n          AuthName \"Alertmanager\"\n          AuthType Basic\n          AuthBasicProvider file ldap\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          AuthLDAPBindDN {{ .Values.endpoints.ldap.auth.admin.bind }}\n          AuthLDAPBindPassword {{ .Values.endpoints.ldap.auth.admin.password }}\n          AuthLDAPURL {{ tuple \"ldap\" \"default\" \"ldap\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | quote }}\n          Require valid-user\n      </Proxy>\n    </VirtualHost>\n  command_flags:\n    alertmanager:\n      storage.path: /var/lib/alertmanager/data\n      cluster.listen_address: \"0.0.0.0:9094\"\n  alertmanager: |\n    global:\n      # The smarthost and SMTP sender used for mail notifications.\n      smtp_smarthost: 'localhost:25'\n      smtp_from: 'alertmanager@example.org'\n      smtp_auth_username: 'alertmanager'\n      smtp_auth_password: 'password'\n      # The auth token for Hipchat.\n      hipchat_auth_token: '1234556789'\n      # Alternative host for Hipchat.\n      hipchat_api_url: 'https://hipchat.foobar.org/'\n    # The directory from which notification templates are read.\n    templates:\n      - '/etc/alertmanager/template/*.tmpl'\n    # The root route on which each incoming alert enters.\n    route:\n      # The labels by which incoming alerts are grouped together. For example,\n      # multiple alerts coming in for cluster=A and alertname=LatencyHigh would\n      # be batched into a single group.\n      group_by:\n        - alertname\n        - cluster\n        - service\n      # When a new group of alerts is created by an incoming alert, wait at\n      # least 'group_wait' to send the initial notification.\n      # This way ensures that you get multiple alerts for the same group that start\n      # firing shortly after another are batched together on the first\n      # notification.\n      group_wait: 30s\n      # When the first notification was sent, wait 'group_interval' to send a batch\n      # of new alerts that started firing for that group.\n      group_interval: 5m\n      # If an alert has successfully been sent, wait 'repeat_interval' to\n      # resend them.\n      repeat_interval: 3h\n      # A default receiver\n      # receiver: team-X-mails\n      receiver: 'team-X-mails'\n      # All the above attributes are inherited by all child routes and can\n      # overwritten on each.\n      # The child route trees.\n      routes:\n        # This routes performs a regular expression match on alert\n        # labels to catch alerts that are related to a list of\n        # services.\n        - receiver: 'team-X-mails'\n          continue: true\n        - match_re:\n            service: ^(foo1|foo2|baz)$\n          receiver: team-X-mails\n          # The service has a sub-route for critical alerts, any alerts\n          # that do not match, i.e. severity != critical, fall-back to the\n          # parent node and are sent to 'team-X-mails'\n          routes:\n            - match:\n                severity: critical\n              receiver: team-X-pager\n        - match:\n            service: files\n          receiver: team-Y-mails\n          routes:\n            - match:\n                severity: critical\n              receiver: team-Y-pager\n        # This route handles all alerts coming from a database service. If there's\n        # no team to handle it, it defaults to the DB team.\n        - match:\n            service: database\n          receiver: team-DB-pager\n          # Also group alerts by affected database.\n          group_by:\n            - alertname\n            - cluster\n            - database\n          routes:\n            - match:\n                owner: team-X\n              receiver: team-X-pager\n            - match:\n                owner: team-Y\n              receiver: team-Y-pager\n    # Inhibition rules allow to mute a set of alerts given that another alert is\n    # firing.\n    # We use this to mute any warning-level notifications if the same alert is\n    # already critical.\n    inhibit_rules:\n      - source_match:\n          severity: 'critical'\n        target_match:\n          severity: 'warning'\n        # Apply inhibition if the alertname is the same.\n        equal:\n          - alertname\n          - cluster\n          - service\n    receivers:\n      - name: 'team-X-mails'\n        email_configs:\n          - to: 'team-X+alerts@example.org'\n      - name: 'team-X-pager'\n        email_configs:\n          - to: 'team-X+alerts-critical@example.org'\n        pagerduty_configs:\n          - service_key: <team-X-key>\n      - name: 'team-Y-mails'\n        email_configs:\n          - to: 'team-Y+alerts@example.org'\n      - name: 'team-Y-pager'\n        pagerduty_configs:\n          - service_key: <team-Y-key>\n      - name: 'team-DB-pager'\n        pagerduty_configs:\n          - service_key: <team-DB-key>\n      - name: 'team-X-hipchat'\n        hipchat_configs:\n          - auth_token: <auth_token>\n            room_id: 85\n            message_format: html\n            notify: false\n  alert_templates: null\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "prometheus-blackbox-exporter/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n---\napiVersion: v2\nappVersion: v0.16.0\ndescription: OpenStack-Helm blackbox exporter for Prometheus\nname: prometheus-blackbox-exporter\nversion: 2025.2.0\nhome: https://github.com/prometheus/blackbox_exporter\nsources:\n  - https://opendev.org/openstack/openstack-helm\n  - https://github.com/prometheus/blackbox_exporter\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "prometheus-blackbox-exporter/templates/deployment.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- $envAll := . }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: prometheus-blackbox-exporter\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"prometheus-blackbox-exporter\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.prometheus_blackbox_exporter }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"prometheus-blackbox-exporter\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"prometheus-blackbox-exporter\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" \"prometheus-blackbox-exporter\" \"containerNames\" (list \"blackbox-exporter\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"prometheus_blackbox_exporter\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      nodeSelector:\n        {{ .Values.labels.blackbox_exporter.node_selector_key }}: {{ .Values.labels.blackbox_exporter.node_selector_value | quote }}\n      containers:\n      - name: blackbox-exporter\n{{ tuple $envAll \"blackbox_exporter\" | include \"helm-toolkit.snippets.image\" | indent 8 }}\n{{ tuple $envAll $envAll.Values.pod.resources.prometheus_blackbox_exporter | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"application\" \"prometheus_blackbox_exporter\" \"container\" \"blackbox_exporter\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 8 }}\n        args:\n          - \"--config.file=/config/blackbox.yaml\"\n        ports:\n          - name: metrics\n            containerPort: {{ tuple \"prometheus_blackbox_exporter\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n        livenessProbe:\n          httpGet:\n            path: /health\n            port: {{ tuple \"prometheus_blackbox_exporter\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          initialDelaySeconds: 30\n          periodSeconds: 30\n        readinessProbe:\n          httpGet:\n            path: /health\n            port: {{ tuple \"prometheus_blackbox_exporter\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          initialDelaySeconds: 20\n          periodSeconds: 30\n        volumeMounts:\n        - mountPath: /config/blackbox.yaml\n          name: config\n          subPath: blackbox.yaml\n      volumes:\n        - name: config\n          secret:\n            secretName: prometheus-blackbox-exporter-etc\n"
  },
  {
    "path": "prometheus-blackbox-exporter/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "prometheus-blackbox-exporter/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-blackbox-exporter/templates/secret.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- $envAll := . }}\n\napiVersion: v1\nkind: Secret\nmetadata:\n  name: prometheus-blackbox-exporter-etc\n  labels:\n{{ tuple $envAll \"prometheus-blackbox-exporter\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\ndata:\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" .Values.config.blackbox \"key\" \"blackbox.yaml\" \"format\" \"Secret\") | indent 2 }}\n"
  },
  {
    "path": "prometheus-blackbox-exporter/templates/service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- $envAll := . }}\n\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"prometheus_blackbox_exporter\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: metrics\n      port: {{ tuple \"prometheus_blackbox_exporter\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"prometheus-blackbox-exporter\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n"
  },
  {
    "path": "prometheus-blackbox-exporter/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for kube-state-metrics.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n---\n\nimages:\n  tags:\n    blackbox_exporter: docker.io/prom/blackbox-exporter:v0.16.0\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\nlabels:\n  blackbox_exporter:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nservice:\n  annotations: {}\n  port: 9115\n\nsecrets:\n  oci_image_registry:\n    prometheus-blackbox-exporter: prometheus-blackbox-exporter-oci-image-registry-key\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      prometheus-blackbox-exporter:\n        username: prometheus-blackbox-exporter\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  prometheus_blackbox_exporter:\n    namespace: null\n    hosts:\n      default: prometheus-blackbox-exporter\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      metrics:\n        default: 9115\n\npod:\n  security_context:\n    prometheus_blackbox_exporter:\n      pod:\n        runAsUser: 65534\n      container:\n        blackbox_exporter:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n  replicas:\n    prometheus_blackbox_exporter: 1\n  annotations:\n    prometheus.io/scrape: 'true'\n    prometheus.io/port: \"9115\"\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    termination_grace_period:\n      prometheus_blackbox_exporter:\n        timeout: 30\n  resources:\n    enabled: true\n    prometheus_blackbox_exporter:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - prometheus-openstack-exporter-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    prometheus_blackbox_exporter:\n      jobs:\n        - prometheus-openstack-exporter-ks-user\n      services:\n        - endpoint: internal\n          service: identity\n\nconfig:\n  blackbox:\n    modules:\n      http_2xx:\n        prober: http\n        timeout: 10s\n        http:\n          valid_http_versions: [\"HTTP/1.1\", \"HTTP/2.0\"]\n          no_follow_redirects: false\n          preferred_ip_protocol: \"ip4\"\n\nmanifests:\n  secret_registry: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "prometheus-kube-state-metrics/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.3.1\ndescription: OpenStack-Helm Kube-State-Metrics for Prometheus\nname: prometheus-kube-state-metrics\nversion: 2025.2.0\nhome: https://github.com/kubernetes/kube-state-metrics\nsources:\n  - https://github.com/kubernetes/kube-state-metrics\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "prometheus-kube-state-metrics/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: kube-state-metrics-bin\ndata:\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-kube-state-metrics/templates/deployment.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"kubeMetricsReadinessProbe\" }}\nhttpGet:\n  path: /metrics\n  port: {{ tuple \"kube_state_metrics\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- if .Values.manifests.deployment }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := printf \"%s-%s\" .Release.Name \"kube-state-metrics\" }}\n{{ tuple $envAll \"kube_state_metrics\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ $serviceAccountName }}\nrules:\n  - apiGroups:\n      - \"*\"\n    resources:\n      - \"*\"\n    verbs:\n      - list\n      - watch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ $serviceAccountName }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ $envAll.Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: {{ $serviceAccountName }}\n  apiGroup: rbac.authorization.k8s.io\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: kube-state-metrics\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"kube-state-metrics\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.kube_state_metrics }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"kube-state-metrics\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"kube-state-metrics\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"kube-state-metrics\" \"containerNames\" (list \"kube-state-metrics\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"kube-state-metrics\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.kube_state_metrics.node_selector_key }}: {{ .Values.labels.kube_state_metrics.node_selector_value | quote }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.kube_state_metrics.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"kube_state_metrics\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: kube-state-metrics\n{{ tuple $envAll \"kube_state_metrics\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.kube_state_metrics | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"exporter\" \"container\" \"kube_state_metrics\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          ports:\n            - name: metrics\n              containerPort: {{ tuple \"kube_state_metrics\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{ dict \"envAll\" . \"component\" \"server\" \"container\" \"kube_metrics\" \"type\" \"readiness\" \"probeTemplate\" (include \"kubeMetricsReadinessProbe\" . | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n{{- end }}\n"
  },
  {
    "path": "prometheus-kube-state-metrics/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "prometheus-kube-state-metrics/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"kube-state-metrics\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-kube-state-metrics/templates/network_policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License. */}}\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"kube-state-metrics\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "prometheus-kube-state-metrics/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-kube-state-metrics/templates/service-controller-manager.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_controller_manager }}\n{{- $envAll := . }}\n{{- $prometheus_annotations := $envAll.Values.monitoring.prometheus.kube_controller_manager }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: kube-controller-manager-discovery\n  labels:\n{{ tuple $envAll \"controller-manager\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n{{- if .Values.monitoring.prometheus.enabled }}\n{{ tuple $prometheus_annotations | include \"helm-toolkit.snippets.prometheus_service_annotations\" | indent 4 }}\n{{- end }}\nspec:\n  selector:\n    component: kube-controller-manager\n  type: ClusterIP\n  clusterIP: None\n  ports:\n  - name: http-metrics\n    port: {{ tuple \"kube_controller_manager\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    targetPort: {{ tuple \"kube_controller_manager\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    protocol: TCP\n{{- end }}\n"
  },
  {
    "path": "prometheus-kube-state-metrics/templates/service-kube-state-metrics.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_kube_state_metrics }}\n{{- $envAll := . }}\n{{- $prometheus_annotations := $envAll.Values.monitoring.prometheus.kube_state_metrics }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"kube_state_metrics\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  labels:\n{{ tuple $envAll \"kube-state-metrics\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n{{- if .Values.monitoring.prometheus.enabled }}\n{{ tuple $prometheus_annotations | include \"helm-toolkit.snippets.prometheus_service_annotations\" | indent 4 }}\n{{- end }}\nspec:\n  ports:\n  - name: http\n    port: {{ tuple \"kube_state_metrics\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    targetPort: {{ tuple \"kube_state_metrics\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"kube-state-metrics\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-kube-state-metrics/templates/service-scheduler.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_scheduler }}\n{{- $envAll := . }}\n{{- $prometheus_annotations := $envAll.Values.monitoring.prometheus.kube_scheduler }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: kube-scheduler-discovery\n  labels:\n{{ tuple $envAll \"kube-scheduler\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n{{- if .Values.monitoring.prometheus.enabled }}\n{{ tuple $prometheus_annotations | include \"helm-toolkit.snippets.prometheus_service_annotations\" | indent 4 }}\n{{- end }}\nspec:\n  selector:\n    component: kube-scheduler\n  type: ClusterIP\n  clusterIP: None\n  ports:\n  - name: http-metrics\n    port: {{ tuple \"kube_scheduler\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    targetPort: {{ tuple \"kube_scheduler\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    protocol: TCP\n{{- end }}\n"
  },
  {
    "path": "prometheus-kube-state-metrics/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for kube-state-metrics.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nimages:\n  tags:\n    kube_state_metrics: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.16.0\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  kube_state_metrics:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\npod:\n  probes:\n    server:\n      kube_metrics:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n            periodSeconds: 60\n            timeoutSeconds: 10\n  security_context:\n    exporter:\n      pod:\n        runAsUser: 65534\n      container:\n        kube_state_metrics:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  mounts:\n    kube_state_metrics:\n      kube_state_metrics:\n      init_container: null\n  replicas:\n    kube_state_metrics: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    termination_grace_period:\n      kube_state_metrics:\n        timeout: 30\n  resources:\n    enabled: false\n    kube_state_metrics:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - kube-metrics-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    kube_state_metrics:\n      services: null\n\nsecrets:\n  oci_image_registry:\n    prometheus-kube-state-metrics: prometheus-kube-state-metrics-oci-image-registry-key\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      prometheus-kube-state-metrics:\n        username: prometheus-kube-state-metrics\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  kube_state_metrics:\n    namespace: null\n    hosts:\n      default: kube-state-metrics\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      http:\n        default: 8080\n  kube_scheduler:\n    scheme:\n      default: 'http'\n    path:\n      default: /metrics\n    port:\n      metrics:\n        default: 10251\n  kube_controller_manager:\n    scheme:\n      default: 'http'\n    path:\n      default: /metrics\n    port:\n      metrics:\n        default: 10252\n\nnetwork_policy:\n  kube-state-metrics:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nmonitoring:\n  prometheus:\n    enabled: true\n    kube_state_metrics:\n      scrape: true\n    kube_scheduler:\n      scrape: true\n    kube_controller_manager:\n      scrape: true\n\nmanifests:\n  configmap_bin: true\n  deployment: true\n  job_image_repo_sync: true\n  network_policy: false\n  secret_registry: true\n  service_kube_state_metrics: true\n  service_controller_manager: true\n  service_scheduler: true\n  serviceaccount: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "prometheus-mysql-exporter/.helmignore",
    "content": "# Patterns to ignore when building packages.\n# This supports shell glob matching, relative path matching, and\n# negation (prefixed with !). Only one pattern per line.\n.DS_Store\n# Common VCS dirs\n.git/\n.gitignore\n.bzr/\n.bzrignore\n.hg/\n.hgignore\n.svn/\n# Common backup files\n*.swp\n*.bak\n*.tmp\n*~\n# Various IDEs\n.project\n.idea/\n*.tmproj\n"
  },
  {
    "path": "prometheus-mysql-exporter/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v0.12.1\ndescription: OpenStack-Helm Prometheus mysql-exporter\nname: prometheus-mysql-exporter\nversion: 2025.2.0\nhome: https://mariadb.com/kb/en/\nicon: http://badges.mariadb.org/mariadb-badge-180x60.png\nsources:\n  - https://github.com/MariaDB/server\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "prometheus-mysql-exporter/README.rst",
    "content": "openstack-helm/mariadb\n======================\n\nBy default, this chart creates a 3-member mariadb galera cluster.\n\nThis chart depends on mariadb-operator chart.\n\nThe StatefulSets all leverage PVCs to provide stateful storage to\n``/var/lib/mysql``.\n\nYou must ensure that your control nodes that should receive mariadb\ninstances are labeled with ``openstack-control-plane=enabled``, or\nwhatever you have configured in values.yaml for the label\nconfiguration:\n\n::\n\n    kubectl label nodes openstack-control-plane=enabled --all\n"
  },
  {
    "path": "prometheus-mysql-exporter/templates/bin/_create-mysql-user.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -e\n\n  # SLAVE MONITOR\n  # Grants ability to SHOW SLAVE STATUS, SHOW REPLICA STATUS,\n  # SHOW ALL SLAVES STATUS, SHOW ALL REPLICAS STATUS, SHOW RELAYLOG EVENTS.\n  # New privilege added in MariaDB Enterprise Server 10.5.8-5. Alias for REPLICA MONITOR.\n  #\n  # REPLICATION CLIENT\n  # Grants ability to SHOW MASTER STATUS, SHOW SLAVE STATUS, SHOW BINARY LOGS. In ES10.5,\n  # is an alias for BINLOG MONITOR and the capabilities have changed. BINLOG MONITOR grants\n  # ability to SHOW MASTER STATUS, SHOW BINARY LOGS, SHOW BINLOG EVENTS, and SHOW BINLOG STATUS.\n\n  mariadb_version=$(mysql --defaults-file=/etc/mysql/admin_user.cnf -e \"status\" | grep -E '^Server\\s+version:')\n  echo \"Current database ${mariadb_version}\"\n\n  if [[ ! -z ${mariadb_version} && -z $(grep -E '10.2|10.3|10.4' <<< ${mariadb_version}) ]]; then\n    # In case MariaDB version is 10.2.x-10.4.x - we use old privileges definitions\n    if ! mysql --defaults-file=/etc/mysql/admin_user.cnf -e \\\n      \"CREATE OR REPLACE USER '${EXPORTER_USER}'@'%' IDENTIFIED BY '${EXPORTER_PASSWORD}'; \\\n      GRANT PROCESS, BINLOG MONITOR, SLAVE MONITOR, SELECT ON *.* TO '${EXPORTER_USER}'@'%' ${MARIADB_X509}; \\\n      FLUSH PRIVILEGES;\" ; then\n      echo \"ERROR: Could not create user: ${EXPORTER_USER}\"\n      exit 1\n    fi\n  else\n    # here we use new MariaDB privileges definitions defines since version 10.5\n    if ! mysql --defaults-file=/etc/mysql/admin_user.cnf -e \\\n      \"CREATE OR REPLACE USER '${EXPORTER_USER}'@'%' IDENTIFIED BY '${EXPORTER_PASSWORD}'; \\\n      GRANT PROCESS, REPLICATION CLIENT, SELECT ON *.* TO '${EXPORTER_USER}'@'%' ${MARIADB_X509}; \\\n      FLUSH PRIVILEGES;\" ; then\n      echo \"ERROR: Could not create user: ${EXPORTER_USER}\"\n      exit 1\n    fi\n  fi\n"
  },
  {
    "path": "prometheus-mysql-exporter/templates/bin/_mysqld-exporter.sh.tpl",
    "content": "#!/bin/sh\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\ncompareVersions() {\necho $1 $2 | \\\nawk '{ split($1, a, \".\");\n       split($2, b, \".\");\n       res = -1;\n       for (i = 1; i <= 3; i++){\n           if (a[i] < b[i]) {\n               res =-1;\n               break;\n           } else if (a[i] > b[i]) {\n               res = 1;\n               break;\n           } else if (a[i] == b[i]) {\n               if (i == 3) {\n               res = 0;\n               break;\n               } else {\n               continue;\n               }\n           }\n       }\n       print res;\n     }'\n}\n\nMYSQL_EXPORTER_VER=`/bin/mysqld_exporter --version 2>&1 | grep \"mysqld_exporter\" | awk '{print $3}'`\n\n#in versions greater than 0.10.0 different configuration flags are used:\n#https://github.com/prometheus/mysqld_exporter/commit/66c41ac7eb90a74518a6ecf6c6bb06464eb68db8\ncompverResult=`compareVersions \"${MYSQL_EXPORTER_VER}\" \"0.10.0\"`\nCONFIG_FLAG_PREFIX='-'\nif [ ${compverResult} -gt 0 ]; then\n    CONFIG_FLAG_PREFIX='--'\nfi\n\nexec /bin/mysqld_exporter \\\n  ${CONFIG_FLAG_PREFIX}config.my-cnf=/etc/mysql/mysql_user.cnf \\\n  ${CONFIG_FLAG_PREFIX}web.listen-address=\"${POD_IP}:${LISTEN_PORT}\" \\\n  ${CONFIG_FLAG_PREFIX}web.telemetry-path=\"$TELEMETRY_PATH\"\n"
  },
  {
    "path": "prometheus-mysql-exporter/templates/exporter-configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.monitoring.prometheus.configmap_bin .Values.monitoring.prometheus.enabled }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: mysql-exporter-bin\ndata:\n  create-mysql-user.sh: |\n{{ tuple \"bin/_create-mysql-user.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  mysqld-exporter.sh: |\n{{ tuple \"bin/_mysqld-exporter.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-mysql-exporter/templates/exporter-deployment.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.monitoring.prometheus.deployment_exporter .Values.monitoring.prometheus.enabled }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"prometheus-mysql-exporter\" }}\n{{ tuple $envAll \"prometheus_mysql_exporter\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: prometheus-mysql-exporter\n  labels:\n{{ tuple $envAll \"prometheus-mysql-exporter\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.prometheus_mysql_exporter }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"prometheus-mysql-exporter\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"prometheus-mysql-exporter\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      namespace: {{ .Values.endpoints.prometheus_mysql_exporter.namespace }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"prometheus-mysql-exporter\" \"containerNames\" (list \"init\" \"mysql-exporter\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      shareProcessNamespace: true\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"prometheus_mysql_exporter\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      nodeSelector:\n        {{ .Values.labels.prometheus_mysql_exporter.node_selector_key }}: {{ .Values.labels.prometheus_mysql_exporter.node_selector_value }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.prometheus_mysql_exporter.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"prometheus_mysql_exporter\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: mysql-exporter\n{{ tuple $envAll \"prometheus_mysql_exporter\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"prometheus_mysql_exporter\" \"container\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.prometheus_mysql_exporter | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/mysqld-exporter.sh\n          ports:\n            - name: metrics\n              containerPort: {{ tuple \"prometheus_mysql_exporter\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          env:\n            - name: EXPORTER_USER\n              valueFrom:\n                secretKeyRef:\n                  name: mysql-exporter-secrets\n                  key: EXPORTER_USER\n            - name: EXPORTER_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: mysql-exporter-secrets\n                  key: EXPORTER_PASSWORD\n            - name: POD_IP\n              valueFrom:\n                fieldRef:\n                  fieldPath: status.podIP\n            - name: LISTEN_PORT\n              value: {{ tuple \"prometheus_mysql_exporter\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: TELEMETRY_PATH\n              value: {{ tuple \"prometheus_mysql_exporter\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.keystone_endpoint_path_lookup\" | quote }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: mysql-exporter-secrets\n              mountPath: /etc/mysql/mysql_user.cnf\n              subPath: mysql_user.cnf\n              readOnly: true\n            - name: mysql-exporter-bin\n              mountPath: /tmp/mysqld-exporter.sh\n              subPath: mysqld-exporter.sh\n              readOnly: true\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: mysql-exporter-secrets\n          secret:\n            secretName: mysql-exporter-secrets\n            defaultMode: 0444\n        - name: mysql-exporter-bin\n          configMap:\n            name: mysql-exporter-bin\n            defaultMode: 0555\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-mysql-exporter/templates/exporter-job-create-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.monitoring.prometheus.job_user_create .Values.monitoring.prometheus.enabled }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"exporter-create-sql-user\" }}\n{{ tuple $envAll \"prometheus_create_mysql_user\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: exporter-create-sql-user\n  labels:\n{{ tuple $envAll \"prometheus-mysql-exporter\" \"create-sql-user\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": \"post-install,post-upgrade\"\n    \"helm.sh/hook-weight\": \"5\"\n    \"helm.sh/hook-delete-policy\": \"before-hook-creation\"\nspec:\n  backoffLimit: {{ .Values.jobs.exporter_create_sql_user.backoffLimit }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"prometheus-mysql-exporter\" \"create-sql-user\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"create-sql-user\" \"containerNames\" (list \"init\" \"exporter-create-sql-user\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n      shareProcessNamespace: true\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"prometheus_create_mysql_user\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      activeDeadlineSeconds: {{ .Values.jobs.exporter_create_sql_user.activeDeadlineSeconds }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.prometheus_mysql_exporter.node_selector_key }}: {{ .Values.labels.prometheus_mysql_exporter.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"prometheus_create_mysql_user\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: exporter-create-sql-user\n{{ tuple $envAll \"prometheus_create_mysql_user\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"prometheus_create_mysql_user\" \"container\" \"main\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.prometheus_create_mysql_user | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/create-mysql-user.sh\n          env:\n            - name: EXPORTER_USER\n              valueFrom:\n                secretKeyRef:\n                  name: mysql-exporter-secrets\n                  key: EXPORTER_USER\n            - name: EXPORTER_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: mysql-exporter-secrets\n                  key: EXPORTER_PASSWORD\n{{- if $envAll.Values.manifests.certificates }}\n            - name: MARIADB_X509\n              value: \"REQUIRE X509\"\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: mysql-exporter-bin\n              mountPath: /tmp/create-mysql-user.sh\n              subPath: create-mysql-user.sh\n              readOnly: true\n            - name: mariadb-secrets\n              mountPath: /etc/mysql/admin_user.cnf\n              subPath: admin_user.cnf\n              readOnly: true\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: mysql-exporter-bin\n          configMap:\n            name: mysql-exporter-bin\n            defaultMode: 0555\n        - name: mariadb-secrets\n          secret:\n            secretName: mariadb-secrets\n            defaultMode: 0444\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_db.server.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-mysql-exporter/templates/exporter-network-policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.monitoring.prometheus.network_policy_exporter .Values.monitoring.prometheus.enabled -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"prometheus-mysql-exporter\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "prometheus-mysql-exporter/templates/exporter-secrets-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.monitoring.prometheus.secret_etc .Values.monitoring.prometheus.enabled }}\n{{- $envAll := . }}\n\n{{- $exporter_user := .Values.endpoints.oslo_db.auth.exporter.username }}\n{{- $exporter_password := .Values.endpoints.oslo_db.auth.exporter.password }}\n{{- $db_host := tuple \"oslo_db\" \"direct\" \"mysql\" $envAll | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }}\n{{- $data_source_name := printf \"%s:%s@(%s)/\" $exporter_user $exporter_password $db_host }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: mysql-exporter-secrets\ntype: Opaque\ndata:\n  DATA_SOURCE_NAME: {{ $data_source_name | b64enc }}\n  EXPORTER_USER: {{ .Values.endpoints.oslo_db.auth.exporter.username | b64enc }}\n  EXPORTER_PASSWORD: {{ .Values.endpoints.oslo_db.auth.exporter.password | b64enc }}\n  mysql_user.cnf: {{ tuple \"secrets/_exporter_user.cnf.tpl\" . | include \"helm-toolkit.utils.template\" | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-mysql-exporter/templates/exporter-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.monitoring.prometheus.service_exporter .Values.monitoring.prometheus.enabled }}\n{{- $envAll := . }}\n{{- $prometheus_annotations := $envAll.Values.monitoring.prometheus.mysqld_exporter }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"prometheus_mysql_exporter\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  labels:\n{{ tuple $envAll \"prometheus-mysql-exporter\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n{{- if .Values.monitoring.prometheus.enabled }}\n{{ tuple $prometheus_annotations | include \"helm-toolkit.snippets.prometheus_service_annotations\" | indent 4 }}\n{{- end }}\nspec:\n  ports:\n  - name: metrics\n    port: {{ tuple \"prometheus_mysql_exporter\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"prometheus-mysql-exporter\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-mysql-exporter/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "prometheus-mysql-exporter/templates/secrets/_exporter_user.cnf.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n[client]\nuser = {{ .Values.endpoints.oslo_db.auth.exporter.username }}\npassword = {{ .Values.endpoints.oslo_db.auth.exporter.password }}\nhost = {{ tuple \"oslo_db\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\nport = {{ tuple \"oslo_db\" \"direct\" \"mysql\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- if .Values.manifests.certificates }}\nssl-ca = /etc/mysql/certs/ca.crt\nssl-key = /etc/mysql/certs/tls.key\nssl-cert = /etc/mysql/certs/tls.crt\n{{- end }}\n"
  },
  {
    "path": "prometheus-mysql-exporter/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for mariadb.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nrelease_group: null\n\nimages:\n  tags:\n    prometheus_create_mysql_user: docker.io/library/mariadb:10.5.9-focal\n    prometheus_mysql_exporter: docker.io/prom/mysqld-exporter:v0.12.1\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  prometheus_mysql_exporter:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\npod:\n  security_context:\n    prometheus_mysql_exporter:\n      pod:\n        runAsUser: 99\n      container:\n        exporter:\n          runAsUser: 99\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    prometheus_create_mysql_user:\n      pod:\n        runAsUser: 0\n      container:\n        main:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  replicas:\n    prometheus_mysql_exporter: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    termination_grace_period:\n      prometheus_mysql_exporter:\n        timeout: 30\n  resources:\n    enabled: false\n    prometheus_mysql_exporter:\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n      requests:\n        memory: \"128Mi\"\n        cpu: \"500m\"\n    jobs:\n      prometheus_create_mysql_user:\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - mysql-exporter-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    prometheus_create_mysql_user:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    prometheus_mysql_exporter:\n      jobs:\n        - exporter-create-sql-user\n      services:\n        - endpoint: internal\n          service: oslo_db\n    prometheus_mysql_exporter_tests:\n      services:\n        - endpoint: internal\n          service: prometheus_mysql_exporter\n        - endpoint: internal\n          service: monitoring\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\njobs:\n  exporter_create_sql_user:\n    backoffLimit: 87600\n    activeDeadlineSeconds: 3600\n\nmonitoring:\n  prometheus:\n    enabled: false\n    mysqld_exporter:\n      scrape: true\n\nsecrets:\n  identity:\n    admin: keystone-admin-user\n  oci_image_registry:\n    mariadb: mariadb-oci-image-registry-key\n  tls:\n    oslo_db:\n      server:\n        public: mariadb-tls-server\n        internal: mariadb-tls-direct\n\n# typically overridden by environmental\n# values, but should include all endpoints\n# required by this chart\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      mariadb:\n        username: mariadb\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  monitoring:\n    name: prometheus\n    namespace: null\n    hosts:\n      default: prom-metrics\n      public: prometheus\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 9090\n        public: 80\n  prometheus_mysql_exporter:\n    namespace: null\n    hosts:\n      default: mysql-exporter\n    host_fqdn_override:\n      default: null\n    path:\n      default: /metrics\n    scheme:\n      default: 'http'\n    port:\n      metrics:\n        default: 9104\n  oslo_db:\n    namespace: null\n    auth:\n      admin:\n        username: root\n        password: password\n      sst:\n        username: sst\n        password: password\n      audit:\n        username: audit\n        password: password\n      exporter:\n        username: exporter\n        password: password\n    hosts:\n      default: mariadb-server-primary\n      direct: mariadb-server-internal\n      discovery: mariadb-discovery\n      server: mariadb-server\n    host_fqdn_override:\n      default: null\n    path: null\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n      wsrep:\n        default: 4567\n  kube_dns:\n    namespace: kube-system\n    name: kubernetes-dns\n    hosts:\n      default: kube-dns\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: http\n    port:\n      dns_tcp:\n        default: 53\n      dns:\n        default: 53\n        protocol: UDP\n  identity:\n    name: backup-storage-auth\n    namespace: openstack\n    auth:\n      admin:\n        # Auth URL of null indicates local authentication\n        # HTK will form the URL unless specified here\n        auth_url: null\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      mariadb-server:\n        # Auth URL of null indicates local authentication\n        # HTK will form the URL unless specified here\n        auth_url: null\n        role: admin\n        region_name: RegionOne\n        username: mariadb-backup-user\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 80\n        internal: 5000\n\nnetwork_policy:\n  prometheus-mysql-exporter:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nmanifests:\n  certificates: false\n  job_image_repo_sync: true\n  monitoring:\n    prometheus:\n      configmap_bin: false\n      deployment_exporter: false\n      job_user_create: false\n      secret_etc: false\n      service_exporter: false\n      network_policy_exporter: false\n  network_policy: false\n  secret_etc: true\n  secret_registry: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "prometheus-node-exporter/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v0.18.1\ndescription: OpenStack-Helm Node Exporter for Prometheus\nname: prometheus-node-exporter\nversion: 2025.2.0\nhome: https://github.com/prometheus/node_exporter\nsources:\n  - https://github.com/prometheus/node_exporter\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "prometheus-node-exporter/templates/bin/_node-exporter.sh.tpl",
    "content": "#!/bin/sh\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec /bin/node_exporter \\\n  {{- if .Values.conf.collectors.enable }}\n  {{ tuple \"--collector.\" .Values.conf.collectors.enable | include \"helm-toolkit.utils.joinListWithPrefix\" }} \\\n  {{- end }}\n  {{- if  .Values.conf.collectors.disable }}\n  {{ tuple \"--no-collector.\" .Values.conf.collectors.disable | include \"helm-toolkit.utils.joinListWithPrefix\" }} \\\n  {{- end }}\n  {{- if .Values.conf.collectors.textfile.directory }}\n  --collector.textfile.directory={{.Values.conf.collectors.textfile.directory }} \\\n  {{- end }}\n  {{- if .Values.conf.collectors.filesystem.ignored_mount_points }}\n  --collector.filesystem.ignored-mount-points={{ .Values.conf.collectors.filesystem.ignored_mount_points }} \\\n  {{- end }}\n  {{- if .Values.conf.collectors.filesystem.rootfs_mount_point }}\n  --path.rootfs={{ .Values.conf.collectors.filesystem.rootfs_mount_point }} \\\n  {{- end }}\n  --collector.ntp.server={{ .Values.conf.ntp_server_ip }}\n"
  },
  {
    "path": "prometheus-node-exporter/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: node-exporter-bin\ndata:\n  node-exporter.sh: |\n{{ tuple \"bin/_node-exporter.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-node-exporter/templates/daemonset.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.daemonset }}\n{{- $envAll := . }}\n\n{{- $mounts_node_exporter := .Values.pod.mounts.node_exporter.node_exporter}}\n\n{{- $serviceAccountName := printf \"%s-%s\" .Release.Name \"node-exporter\" }}\n{{ tuple $envAll \"node_exporter\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: run-node-exporter\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ .Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: cluster-admin\n  apiGroup: rbac.authorization.k8s.io\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: node-exporter\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"node_exporter\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"node_exporter\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"node_exporter\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"node_exporter\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" \"node-exporter\" \"containerNames\" (list \"node-exporter\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ if .Values.pod.tolerations.node_exporter.enabled }}\n{{ tuple $envAll \"node_exporter\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ else }}\n      nodeSelector:\n        {{ .Values.labels.node_exporter.node_selector_key }}: {{ .Values.labels.node_exporter.node_selector_value | quote }}\n{{ end }}\n      hostNetwork: true\n      hostPID: true\n      initContainers:\n{{ tuple $envAll \"node_exporter\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: node-exporter\n{{ tuple $envAll \"node_exporter\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.node_exporter | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"metrics\" \"container\" \"node_exporter\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/node-exporter.sh\n          ports:\n            - name: metrics\n              containerPort: {{ tuple \"node_metrics\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n              hostPort: {{ tuple \"node_metrics\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            httpGet:\n              port: {{ tuple \"node_metrics\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 20\n            periodSeconds: 10\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: proc\n              mountPath: /host/proc\n              readOnly: true\n            - name: sys\n              mountPath: /host/sys\n              readOnly: true\n{{ if .Values.conf.collectors.textfile.directory }}\n            - name: stats-out\n              mountPath: {{.Values.conf.collectors.textfile.directory }}\n              readOnly: true\n{{ end }}\n            - name: node-exporter-bin\n              mountPath: /tmp/node-exporter.sh\n              subPath: node-exporter.sh\n              readOnly: true\n{{ if $mounts_node_exporter.volumeMounts }}{{ toYaml $mounts_node_exporter.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: proc\n          hostPath:\n            path: /proc\n        - name: sys\n          hostPath:\n            path: /sys\n{{ if .Values.conf.collectors.textfile.directory }}\n        - name: stats-out\n          hostPath:\n            path: {{.Values.conf.collectors.textfile.directory }}\n{{ end }}\n        - name: node-exporter-bin\n          configMap:\n            name: node-exporter-bin\n            defaultMode: 0555\n{{ if $mounts_node_exporter.volumes }}{{ toYaml $mounts_node_exporter.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-node-exporter/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "prometheus-node-exporter/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"node-exporter\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-node-exporter/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-node-exporter/templates/service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service }}\n{{- $envAll := . }}\n{{- $prometheus_annotations := $envAll.Values.monitoring.prometheus.node_exporter }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"node_metrics\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  labels:\n{{ tuple $envAll \"node_exporter\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n{{- if .Values.monitoring.prometheus.enabled }}\n{{ tuple $prometheus_annotations | include \"helm-toolkit.snippets.prometheus_service_annotations\" | indent 4 }}\n{{- end }}\nspec:\n  type: ClusterIP\n  clusterIP: None\n  ports:\n  - name: metrics\n    port: {{ tuple \"node_metrics\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    targetPort: {{ tuple \"node_metrics\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"node_exporter\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-node-exporter/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for node-exporter.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nimages:\n  tags:\n    node_exporter: docker.io/prom/node-exporter:v0.18.1\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  node_exporter:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\npod:\n  security_context:\n    metrics:\n      pod:\n        runAsUser: 65534\n      container:\n        node_exporter:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n  mounts:\n    node_exporter:\n      node_exporter:\n      init_container: null\n  lifecycle:\n    upgrades:\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        node_exporter:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n    termination_grace_period:\n      node_exporter:\n        timeout: 30\n  resources:\n    enabled: false\n    node_exporter:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n  tolerations:\n    node_exporter:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n      - key: node-role.kubernetes.io/node\n        operator: Exists\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - node-exporter-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    node_exporter:\n      services: null\n\nmonitoring:\n  prometheus:\n    enabled: true\n    node_exporter:\n      scrape: true\n\nsecrets:\n  oci_image_registry:\n    prometheus-node-exporter: prometheus-node-exporter-oci-image-registry-key\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      prometheus-node-exporter:\n        username: prometheus-node-exporter\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  node_metrics:\n    namespace: null\n    hosts:\n      default: node-exporter\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      metrics:\n        default: 9100\n\nmanifests:\n  configmap_bin: true\n  daemonset: true\n  job_image_repo_sync: true\n  secret_registry: true\n  service: true\n\nconf:\n  ntp_server_ip: 127.0.0.1\n  collectors:\n    enable:\n      - ntp\n      - meminfo_numa\n      - bonding\n      - mountstats\n    disable:\n    textfile:\n      directory: /var/log/node-exporter-vfstats\n    filesystem:\n      ignored_mount_points:\n      rootfs_mount_point:\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "prometheus-openstack-exporter/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack Metrics Exporter for Prometheus\nname: prometheus-openstack-exporter\nversion: 2025.2.0\nhome: https://opendev.org/openstack/openstack-helm\nsources:\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "prometheus-openstack-exporter/templates/bin/_prometheus-openstack-exporter.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec python3 /usr/local/bin/exporter/main.py\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "prometheus-openstack-exporter/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: prometheus-openstack-exporter-bin\ndata:\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  prometheus-openstack-exporter.sh: |\n{{ tuple \"bin/_prometheus-openstack-exporter.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-openstack-exporter/templates/deployment.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment }}\n{{- $envAll := . }}\n{{- $ksUserSecret := .Values.secrets.identity.user }}\n\n{{- $serviceAccountName := \"prometheus-openstack-exporter\" }}\n{{ tuple $envAll \"prometheus_openstack_exporter\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: prometheus-openstack-exporter\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"prometheus-openstack-exporter\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.prometheus_openstack_exporter }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"prometheus-openstack-exporter\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"prometheus-openstack-exporter\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        secret-keystone-hash: {{ tuple \"secret-keystone.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        secret-registry-hash: {{ tuple \"secret-registry.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"prometheus-openstack-exporter\" \"containerNames\" (list \"openstack-metrics-exporter\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"prometheus-openstack-exporter\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.openstack_exporter.node_selector_key }}: {{ .Values.labels.openstack_exporter.node_selector_value | quote }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.prometheus_openstack_exporter.timeout | default \"30\" }}\n      initContainers:\n        {{ tuple $envAll \"prometheus_openstack_exporter\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 | trim }}\n        - name: clouds-yaml-gen\n          {{ tuple $envAll \"dep_check\" | include \"helm-toolkit.snippets.image\" | nindent 10 }}\n          {{ tuple $envAll $envAll.Values.pod.resources.clouds_yaml_gen | include \"helm-toolkit.snippets.kubernetes_resources\" | nindent 10 }}\n          {{ dict \"envAll\" $envAll \"application\" \"clouds_yaml_gen\" \"container\" \"clouds_yaml_gen\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | nindent 10 }}\n          command:\n            - /bin/sh\n            - -c\n            - |\n              cat <<EOF > /etc/openstack/clouds.yaml\n              clouds:\n                default:\n                  auth:\n                    auth_url: \"$OS_AUTH_URL\"\n                    username: \"$OS_USERNAME\"\n                    password: \"$OS_PASSWORD\"\n                    project_name: \"$OS_PROJECT_NAME\"\n                    user_domain_name: \"$OS_USER_DOMAIN_NAME\"\n                    project_domain_name: \"$OS_PROJECT_DOMAIN_NAME\"\n                  region_name: \"$OS_REGION_NAME\"\n                  interface: \"$OS_INTERFACE\"\n                  identity_api_version: \"$OS_IDENTITY_API_VERSION\"\n                  {{- if .Values.manifests.certificates }}\n                  cacert: \"/etc/ssl/certs/openstack-helm.crt\"\n                  {{- end }}\n              EOF\n          env:\n            - name: OS_AUTH_URL\n              valueFrom:\n                secretKeyRef:\n                  key: OS_AUTH_URL\n                  name: {{ .Values.secrets.identity.user | quote }}\n            - name: OS_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  key: OS_USERNAME\n                  name: {{ .Values.secrets.identity.user | quote }}\n            - name: OS_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  key: OS_PASSWORD\n                  name: {{ .Values.secrets.identity.user | quote }}\n            - name: OS_PROJECT_NAME\n              valueFrom:\n                secretKeyRef:\n                  key: OS_PROJECT_NAME\n                  name: {{ .Values.secrets.identity.user | quote }}\n            - name: OS_USER_DOMAIN_NAME\n              valueFrom:\n                secretKeyRef:\n                  key: OS_USER_DOMAIN_NAME\n                  name: {{ .Values.secrets.identity.user | quote }}\n            - name: OS_PROJECT_DOMAIN_NAME\n              valueFrom:\n                secretKeyRef:\n                  key: OS_PROJECT_DOMAIN_NAME\n                  name: {{ .Values.secrets.identity.user | quote }}\n            - name: OS_REGION_NAME\n              valueFrom:\n                secretKeyRef:\n                  key: OS_REGION_NAME\n                  name: {{ .Values.secrets.identity.user | quote }}\n            - name: OS_INTERFACE\n              valueFrom:\n                secretKeyRef:\n                  key: OS_INTERFACE\n                  name: {{ .Values.secrets.identity.user | quote }}\n            - name: OS_IDENTITY_API_VERSION\n              value: \"3\"\n          volumeMounts:\n            - name: clouds-yaml\n              mountPath: /etc/openstack\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.identity.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n          securityContext:\n            allowPrivilegeEscalation: false\n            readOnlyRootFilesystem: false\n            runAsUser: 65534\n      containers:\n        - name: openstack-metrics-exporter\n{{ tuple $envAll \"prometheus_openstack_exporter\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.prometheus_openstack_exporter | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"exporter\" \"container\" \"openstack_metrics_exporter\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /bin/openstack-exporter\n          args:\n            - --web.listen-address=:{{ tuple \"prometheus_openstack_exporter\" \"internal\" \"exporter\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            - --os-client-config=/etc/openstack/clouds.yaml\n            - default\n          ports:\n            - name: metrics\n              containerPort: {{ tuple \"prometheus_openstack_exporter\" \"internal\" \"exporter\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          livenessProbe:\n            httpGet:\n              path: /metrics\n              port: {{ tuple \"prometheus_openstack_exporter\" \"internal\" \"exporter\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 180\n            periodSeconds: 60\n            timeoutSeconds: 30\n          readinessProbe:\n            httpGet:\n              path: /metrics\n              port: {{ tuple \"prometheus_openstack_exporter\" \"internal\" \"exporter\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 30\n            periodSeconds: 30\n            timeoutSeconds: 30\n          env:\n            - name: LISTEN_PORT\n              value: {{ tuple \"prometheus_openstack_exporter\" \"internal\" \"exporter\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | quote }}\n            - name: OS_POLLING_INTERVAL\n              value: {{ .Values.conf.prometheus_openstack_exporter.OS_POLLING_INTERVAL | quote }}\n            - name: OS_RETRIES\n              value: {{ .Values.conf.prometheus_openstack_exporter.OS_RETRIES | quote }}\n            - name: TIMEOUT_SECONDS\n              value: {{ .Values.conf.prometheus_openstack_exporter.TIMEOUT_SECONDS | quote }}\n            - name: OS_IDENTITY_API_VERSION\n              value: \"3\"\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: clouds-yaml\n              mountPath: /etc/openstack\n              readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.identity.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n          workingDir: /tmp\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: clouds-yaml\n          emptyDir: {}\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.identity.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-openstack-exporter/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "prometheus-openstack-exporter/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"prometheus-openstack-exporter\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-openstack-exporter/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"prometheus-openstack-exporter-ks-user\" }}\n{{ tuple $envAll \"ks_user\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: prometheus-openstack-exporter-ks-user\n  labels:\n{{ tuple $envAll \"prometheus-openstack-exporter\" \"ks-user\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"prometheus-openstack-exporter\" \"ks-user\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" \"prometheus-openstack-exporter-ks-user\" \"containerNames\" (list \"prometheus-openstack-exporter-ks-user\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"ks_user\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value | quote }}\n      initContainers:\n{{ tuple $envAll \"ks_user\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: prometheus-openstack-exporter-ks-user\n{{ tuple $envAll \"ks_user\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"ks_user\" \"container\" \"prometheus_openstack_exporter_ks_user\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/ks-user.sh\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.ks_user | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ks-user-sh\n              mountPath: /tmp/ks-user.sh\n              subPath: ks-user.sh\n              readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.identity.api.internal | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n            - name: SERVICE_OS_SERVICE_NAME\n              value: \"prometheus-openstack-exporter\"\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.user }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 12 }}\n{{- end }}\n            - name: SERVICE_OS_ROLE\n              value: {{ .Values.endpoints.identity.auth.user.role | quote }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: ks-user-sh\n          configMap:\n            name: prometheus-openstack-exporter-bin\n            defaultMode: 0555\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.identity.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-openstack-exporter/templates/network_policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License. */}}\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"prometheus-openstack-exporter\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "prometheus-openstack-exporter/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"user\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-openstack-exporter/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-openstack-exporter/templates/service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service }}\n{{- $envAll := . }}\n{{- $prometheus_annotations := $envAll.Values.monitoring.prometheus.openstack_exporter }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"prometheus_openstack_exporter\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  labels:\n{{ tuple $envAll \"prometheus-openstack-exporter\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n{{- if .Values.monitoring.prometheus.enabled }}\n{{ tuple $prometheus_annotations | include \"helm-toolkit.snippets.prometheus_service_annotations\" | indent 4 }}\n{{- end }}\nspec:\n  ports:\n  - name: http\n    port: {{ tuple \"prometheus_openstack_exporter\" \"internal\" \"exporter\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    targetPort: {{ tuple \"prometheus_openstack_exporter\" \"internal\" \"exporter\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"prometheus-openstack-exporter\" \"exporter\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-openstack-exporter/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for prometheus-openstack-exporter.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nimages:\n  tags:\n    prometheus_openstack_exporter: ghcr.io/openstack-exporter/openstack-exporter:1.7.0\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  openstack_exporter:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\npod:\n  security_context:\n    exporter:\n      pod:\n        runAsUser: 65534\n      container:\n        openstack_metrics_exporter:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    ks_user:\n      pod:\n        runAsUser: 65534\n      container:\n        prometheus_openstack_exporter_ks_user:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    clouds_yaml_gen:\n      container:\n        prometheus_openstack_exporter_ks_user:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n  mounts:\n    prometheus_openstack_exporter:\n      prometheus_openstack_exporter:\n      init_container: null\n  replicas:\n    prometheus_openstack_exporter: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    termination_grace_period:\n      prometheus_openstack_exporter:\n        timeout: 30\n  resources:\n    enabled: false\n    prometheus_openstack_exporter:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    clouds_yaml_gen:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - prometheus-openstack-exporter-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    prometheus_openstack_exporter:\n      jobs:\n        - prometheus-openstack-exporter-ks-user\n      services:\n        - endpoint: internal\n          service: identity\n\nconf:\n  prometheus_openstack_exporter:\n    OS_POLLING_INTERVAL: 30\n    TIMEOUT_SECONDS: 20\n    OS_RETRIES: 1\n\nsecrets:\n  identity:\n    admin: prometheus-openstack-exporter-keystone-admin\n    user: prometheus-openstack-exporter-keystone-user\n  oci_image_registry:\n    prometheus-openstack-exporter: prometheus-openstack-exporter-oci-image-registry-key\n  tls:\n    identity:\n      api:\n        # This name should be same as in keystone. Keystone\n        # secret will be used in these charts\n        #\n        internal: keystone-tls-api\n\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      prometheus-openstack-exporter:\n        username: prometheus-openstack-exporter\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  prometheus_openstack_exporter:\n    namespace: null\n    hosts:\n      default: openstack-metrics\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      exporter:\n        default: 9180\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      user:\n        role: admin\n        region_name: RegionOne\n        username: prometheus-openstack-exporter\n        password: password\n        project_name: service\n        user_domain_name: default\n        project_domain_name: default\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 80\n        internal: 5000\n\nmonitoring:\n  prometheus:\n    enabled: true\n    openstack_exporter:\n      scrape: true\n\nnetwork:\n  openstack_metrics_exporter:\n    port: 9180\n\nnetwork_policy:\n  prometheus-openstack-exporter:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nmanifests:\n  certificates: false\n  configmap_bin: true\n  deployment: true\n  job_image_repo_sync: true\n  job_ks_user: true\n  network_policy: false\n  secret_keystone: true\n  secret_registry: true\n  service: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "prometheus-process-exporter/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v0.2.11\ndescription: OpenStack-Helm Process Exporter for Prometheus\nname: prometheus-process-exporter\nversion: 2025.2.0\nhome: https://opendev.org/openstack/openstack-helm\nsources:\n  - https://github.com/ncabatoff/process-exporter\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "prometheus-process-exporter/templates/daemonset.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.daemonset }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := printf \"%s-%s\" .Release.Name \"process-exporter\" }}\n{{ tuple $envAll \"process_exporter\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: run-process-exporter\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $serviceAccountName }}\n    namespace: {{ .Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: cluster-admin\n  apiGroup: rbac.authorization.k8s.io\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: process-exporter\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"process_exporter\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"process_exporter\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"process_exporter\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"process_exporter\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ dict \"envAll\" $envAll \"podName\" \"process-exporter\" \"containerNames\" (list \"process-exporter\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ if .Values.pod.tolerations.process_exporter.enabled }}\n{{ tuple $envAll \"process_exporter\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ else }}\n      nodeSelector:\n        {{ .Values.labels.process_exporter.node_selector_key }}: {{ .Values.labels.process_exporter.node_selector_value }}\n{{ end }}\n      hostNetwork: true\n      hostPID: true\n      initContainers:\n{{ tuple $envAll \"process_exporter\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: process-exporter\n{{ tuple $envAll \"process_exporter\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.process_exporter | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"metrics\" \"container\" \"process_exporter\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          args:\n{{- if hasKey .Values.conf \"children\" }}\n            - -children={{ .Values.conf.children }}\n{{- end }}\n            - -procnames\n            - {{ .Values.conf.processes }}\n          ports:\n            - name: metrics\n              containerPort: {{ tuple \"process_exporter_metrics\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n              hostPort: {{ tuple \"process_exporter_metrics\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            tcpSocket:\n              port: {{ tuple \"process_exporter_metrics\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            initialDelaySeconds: 20\n            periodSeconds: 10\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: proc\n              mountPath: /host/proc\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: proc\n          hostPath:\n            path: /proc\n{{- end }}\n"
  },
  {
    "path": "prometheus-process-exporter/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "prometheus-process-exporter/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"process-exporter\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-process-exporter/templates/network_policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License. */}}\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"process_exporter\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "prometheus-process-exporter/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-process-exporter/templates/service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service }}\n{{- $envAll := . }}\n{{- $prometheus_annotations := $envAll.Values.monitoring.prometheus.process_exporter }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"process_exporter_metrics\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  labels:\n{{ tuple $envAll \"process_exporter\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n{{- if .Values.monitoring.prometheus.enabled }}\n{{ tuple $prometheus_annotations | include \"helm-toolkit.snippets.prometheus_service_annotations\" | indent 4 }}\n{{- end }}\nspec:\n  type: ClusterIP\n  clusterIP: None\n  ports:\n  - name: metrics\n    port: {{ tuple \"process_exporter_metrics\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    targetPort: {{ tuple \"process_exporter_metrics\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"process_exporter\" \"metrics\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "prometheus-process-exporter/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for process-exporter.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nimages:\n  tags:\n    process_exporter: docker.io/ncabatoff/process-exporter:0.2.11\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nlabels:\n  process_exporter:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\npod:\n  security_context:\n    metrics:\n      pod:\n        runAsUser: 65534\n      container:\n        process_exporter:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  mounts:\n    process_exporter:\n      process_exporter:\n      init_container: null\n  lifecycle:\n    upgrades:\n      daemonsets:\n        pod_replacement_strategy: RollingUpdate\n        process_exporter:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n    termination_grace_period:\n      process_exporter:\n        timeout: 30\n  resources:\n    enabled: false\n    process_exporter:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n  tolerations:\n    process_exporter:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n      - key: node-role.kubernetes.io/node\n        operator: Exists\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - process-exporter-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    process_exporter:\n      services: null\n\nmonitoring:\n  prometheus:\n    enabled: true\n    process_exporter:\n      scrape: true\n\nsecrets:\n  oci_image_registry:\n    prometheus-process-exporter: prometheus-process-exporter-oci-image-registry-key\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      prometheus-process-exporter:\n        username: prometheus-process-exporter\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  process_exporter_metrics:\n    namespace: null\n    hosts:\n      default: process-exporter\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      metrics:\n        default: 9256\n\nnetwork_policy:\n  process_exporter:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nmanifests:\n  configmap_bin: true\n  daemonset: true\n  job_image_repo_sync: true\n  secret_registry: true\n  service: true\n\nconf:\n  processes: dockerd,kubelet,kube-proxy,bgsagent,bgscollect,bgssd\n  children: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "rabbitmq/.helmignore",
    "content": "# Patterns to ignore when building packages.\n# This supports shell glob matching, relative path matching, and\n# negation (prefixed with !). Only one pattern per line.\n.DS_Store\n# Common VCS dirs\n.git/\n.gitignore\n.bzr/\n.bzrignore\n.hg/\n.hgignore\n.svn/\n# Common backup files\n*.swp\n*.bak\n*.tmp\n*~\n# Various IDEs\n.project\n.idea/\n*.tmproj\n\n\n"
  },
  {
    "path": "rabbitmq/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v3.12.0\ndescription: OpenStack-Helm RabbitMQ\nname: rabbitmq\nversion: 2025.2.0\nhome: https://github.com/rabbitmq/rabbitmq-server\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "rabbitmq/templates/bin/_rabbitmq-cookie.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\ncp -vf /run/lib/rabbitmq/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie\nchown \"rabbitmq\" /var/lib/rabbitmq/.erlang.cookie\nchmod 0600 /var/lib/rabbitmq/.erlang.cookie\n"
  },
  {
    "path": "rabbitmq/templates/bin/_rabbitmq-liveness.sh.tpl",
    "content": "#!/usr/bin/env bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -e\n\nif [ -f /tmp/rabbit-disable-liveness-probe ]; then\n   exit 0\nelse\n   exec rabbitmqctl ping\nfi\n"
  },
  {
    "path": "rabbitmq/templates/bin/_rabbitmq-password-hash.py.tpl",
    "content": "#!/usr/bin/env python3\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nSee here for explanation:\nhttp://lists.rabbitmq.com/pipermail/rabbitmq-discuss/2011-May/012765.html\n*/}}\n\nimport base64\nimport json\nimport os\nimport hashlib\nimport re\n\nuser = os.environ['RABBITMQ_ADMIN_USERNAME']\npassword = os.environ['RABBITMQ_ADMIN_PASSWORD']\nguest_password = os.environ['RABBITMQ_GUEST_PASSWORD']\noutput_file = os.environ['RABBITMQ_DEFINITION_FILE']\n\ndef hash_rabbit_password(password):\n    salt = os.urandom(4)\n    tmp0 = salt + password.encode('utf-8')\n    tmp1 = hashlib.sha512(tmp0).digest()\n    salted_hash = salt + tmp1\n    pass_hash = base64.b64encode(salted_hash)\n    return pass_hash.decode(\"utf-8\")\n\noutput = {\n    \"users\": [{\n        \"name\": user,\n        \"password_hash\": hash_rabbit_password(password),\n        \"hashing_algorithm\": \"rabbit_password_hashing_sha512\",\n        \"tags\": \"administrator\"\n    },\n    {\n        \"name\": \"guest\",\n        \"password_hash\": hash_rabbit_password(guest_password),\n        \"hashing_algorithm\": \"rabbit_password_hashing_sha512\",\n        \"tags\": \"administrator\"\n    }\n    ]\n}\n\nif 'RABBITMQ_USERS' in os.environ:\n    output.update({'vhosts': []})\n    output.update({'permissions': []})\n    users_creds = json.loads(os.environ['RABBITMQ_USERS'])\n    for user, creds in users_creds.items():\n        if 'auth' in creds:\n            for auth_key, auth_val in creds['auth'].items():\n                username = auth_val['username']\n                password = auth_val['password']\n                user_struct = {\n                    \"name\": username,\n                    \"password_hash\": hash_rabbit_password(password),\n                    \"hashing_algorithm\": \"rabbit_password_hashing_sha512\",\n                    \"tags\": \"\"\n                }\n                output['users'].append(user_struct)\n                if 'path' in creds:\n                    for path in (\n                        creds[\"path\"]\n                        if isinstance(creds[\"path\"], list)\n                        else [creds[\"path\"]]\n                    ):\n                        vhost = re.sub(\"^/\", \"\", path)\n                        vhost_struct = {\"name\": vhost}\n\n                        perm_struct = {\n                            \"user\": username,\n                            \"vhost\": vhost,\n                            \"configure\": \".*\",\n                            \"write\": \".*\",\n                            \"read\": \".*\"\n                        }\n\n                        output['vhosts'].append(vhost_struct)\n                        output['permissions'].append(perm_struct)\n\nif 'RABBITMQ_AUXILIARY_CONFIGURATION' in os.environ:\n    aux_conf = json.loads(os.environ['RABBITMQ_AUXILIARY_CONFIGURATION'])\n    if aux_conf.get('policies', []):\n        output['policies'] = aux_conf['policies']\n    if aux_conf.get('bindings', []):\n        output['bindings'] = aux_conf['bindings']\n    if aux_conf.get('queues', []):\n        output['queues'] = aux_conf['queues']\n    if aux_conf.get('exchanges', []):\n        output['exchanges'] = aux_conf['exchanges']\n\nwith open(output_file, 'w') as f:\n    f.write(json.dumps(output))\n    f.close()\n"
  },
  {
    "path": "rabbitmq/templates/bin/_rabbitmq-readiness.sh.tpl",
    "content": "#!/usr/bin/env bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -e\n\nif [ -f /tmp/rabbit-disable-readiness ]; then\n   exit 1\nelse\n   exec rabbitmqctl ping\nfi\n"
  },
  {
    "path": "rabbitmq/templates/bin/_rabbitmq-start.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nfunction check_if_open () {\n  HOST=$1\n  PORT=$2\n  timeout 10 bash -c \"true &>/dev/null </dev/tcp/${HOST}/${PORT}\"\n}\n\nfunction check_rabbit_node_health () {\n  CLUSTER_SEED_NAME=$1\n  rabbitmq-diagnostics node_health_check -n \"${CLUSTER_SEED_NAME}\" -t 10 &>/dev/null\n}\n\nget_node_name () {\n  TARGET_POD=$1\n  POD_NAME_PREFIX=\"$(echo \"${MY_POD_NAME}\" | awk 'BEGIN{FS=OFS=\"-\"}{NF--; print}')\"\n  echo \"${RABBITMQ_NODENAME}\" | awk -F \"@${MY_POD_NAME}.\" \"{ print \\$1 \\\"@${POD_NAME_PREFIX}-${TARGET_POD}.\\\" \\$2 }\"\n}\n\nfunction check_rabbit_node_ready () {\n  TARGET_POD=$1\n  CLUSTER_SEED_NAME=\"$(get_node_name ${TARGET_POD})\"\n  CLUSTER_SEED_HOST=\"$(echo \"${CLUSTER_SEED_NAME}\" | awk -F '@' '{ print $NF }')\"\n  check_rabbit_node_health \"${CLUSTER_SEED_NAME}\" && \\\n  check_if_open \"${CLUSTER_SEED_HOST}\" \"${PORT_HTTP}\" && \\\n  check_if_open \"${CLUSTER_SEED_HOST}\" \"${PORT_AMPQ}\" && \\\n  check_if_open \"${CLUSTER_SEED_HOST}\" \"${PORT_CLUSTERING}\"\n}\n\nPOD_INCREMENT=$(echo \"${MY_POD_NAME}\" | awk -F '-' '{print $NF}')\nif ! [ \"${POD_INCREMENT}\" -eq \"0\" ] && ! [ -d \"/var/lib/rabbitmq/mnesia\" ] ; then\n  echo 'This is not the 1st rabbit pod & has not been initialised'\n  # disable liveness probe as it may take some time for the pod to come online.\n  touch /tmp/rabbit-disable-liveness-probe\n  POD_NAME_PREFIX=\"$(echo \"${MY_POD_NAME}\" | awk 'BEGIN{FS=OFS=\"-\"}{NF--; print}')\"\n  for TARGET_POD in $(seq 0 +1 $((POD_INCREMENT - 1 ))); do\n    END=$(($(date +%s) + 900))\n    while ! check_rabbit_node_ready \"${TARGET_POD}\"; do\n      sleep 5\n      if [ \"$(date +%s)\" -gt \"$END\" ]; then\n        echo \"RabbitMQ pod ${TARGET_POD} not ready in time\"\n        exit 1\n      fi\n    done\n  done\n\n  function reset_rabbit () {\n    rabbitmqctl shutdown || true\n    find /var/lib/rabbitmq/* ! -name 'definitions.json' ! -name '.erlang.cookie' -exec rm -rf {} +\n    exit 1\n  }\n\n  # Start RabbitMQ, but disable readiness from being reported so the pod is not\n  # marked as up prematurely.\n  touch /tmp/rabbit-disable-readiness\n  rabbitmq-server &\n\n  # Wait for server to start, and reset if it does not\n  END=$(($(date +%s) + 180))\n  while ! rabbitmqctl -q cluster_status; do\n      sleep 5\n      NOW=$(date +%s)\n      [ $NOW -gt $END ] && reset_rabbit\n  done\n\n  # Wait for server to join cluster, reset if it does not\n  POD_INCREMENT=$(echo \"${MY_POD_NAME}\" | awk -F '-' '{print $NF}')\n  END=$(($(date +%s) + 180))\n  while ! rabbitmqctl -l --node $(get_node_name 0) -q cluster_status | grep -q \"$(get_node_name ${POD_INCREMENT})\"; do\n    sleep 5\n    NOW=$(date +%s)\n    [ $NOW -gt $END ] && reset_rabbit\n  done\n\n  # Shutdown the inital server\n  rabbitmqctl shutdown\n\n  rm -fv /tmp/rabbit-disable-readiness /tmp/rabbit-disable-liveness-probe\nfi\n\n{{- if .Values.forceBoot.enabled }}\nif [ \"${POD_INCREMENT}\" -eq \"0\" ] && [ -d \"/var/lib/rabbitmq/mnesia/${RABBITMQ_NODENAME}\" ]; then rabbitmqctl force_boot; fi\n{{- end}}\nexec rabbitmq-server\n"
  },
  {
    "path": "rabbitmq/templates/bin/_rabbitmq-test.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n# Extract connection details\nRABBIT_HOSTNAME=`echo $RABBITMQ_ADMIN_CONNECTION | awk -F'[@]' '{print $2}' \\\n  | awk -F'[:/]' '{print $1}'`\nRABBIT_PORT=`echo $RABBITMQ_ADMIN_CONNECTION | awk -F'[@]' '{print $2}' \\\n  | awk -F'[:/]' '{print $2}'`\n\nset +x\n# Extract Admin User creadential\nRABBITMQ_ADMIN_USERNAME=`echo $RABBITMQ_ADMIN_CONNECTION | awk -F'[@]' '{print $1}' \\\n  | awk -F'[//:]' '{print $4}'`\nRABBITMQ_ADMIN_PASSWORD=`echo $RABBITMQ_ADMIN_CONNECTION | awk -F'[@]' '{print $1}' \\\n  | awk -F'[//:]' '{print $5}'`\nset -x\n\nfunction rabbitmqadmin_authed () {\n  set +x\n  rabbitmqadmin \\\n{{- if .Values.manifests.certificates }}\n    --ssl \\\n    --ssl-disable-hostname-verification \\\n    --ssl-ca-cert-file=\"/etc/rabbitmq/certs/ca.crt\" \\\n    --ssl-cert-file=\"/etc/rabbitmq/certs/tls.crt\" \\\n    --ssl-key-file=\"/etc/rabbitmq/certs/tls.key\" \\\n{{- end }}\n    --host=\"${RABBIT_HOSTNAME}\" \\\n    --port=\"${RABBIT_PORT}\" \\\n    --username=\"${RABBITMQ_ADMIN_USERNAME}\" \\\n    --password=\"${RABBITMQ_ADMIN_PASSWORD}\" \\\n    ${@}\n  set -x\n}\n\nfunction rabbit_check_node_count () {\n  echo \"Checking node count \"\n  NODES_IN_CLUSTER=$(rabbitmqadmin_authed list nodes -f bash | wc -w)\n  if [ \"$NODES_IN_CLUSTER\" -eq \"$RABBIT_REPLICA_COUNT\" ]; then\n    echo \"Number of nodes in cluster ($NODES_IN_CLUSTER) match number of desired pods ($NODES_IN_CLUSTER)\"\n  else\n    echo \"Number of nodes in cluster ($NODES_IN_CLUSTER) does not match number of desired pods ($RABBIT_REPLICA_COUNT)\"\n    exit 1\n  fi\n}\n# Check node count\nrabbit_check_node_count\n\nfunction rabbit_find_partitions () {\n  NODE_INFO=$(mktemp)\n  rabbitmqadmin_authed list nodes -f pretty_json | tee \"${NODE_INFO}\"\n  cat \"${NODE_INFO}\" | python3 -c \"\nimport json, sys, traceback\nprint('Checking cluster partitions')\nobj=json.load(sys.stdin)\nfor num, node in enumerate(obj):\n  try:\n    partition = node['partitions']\n    if partition:\n      raise Exception('cluster partition found: %s' % partition)\n  except KeyError:\n    print('Error: partition key not found for node %s' % node)\nprint('No cluster partitions found')\n  \"\n  rm -vf \"${NODE_INFO}\"\n}\nrabbit_find_partitions\n\nfunction rabbit_check_users_match () {\n  echo \"Checking users match on all nodes\"\n  NODES=$(rabbitmqadmin_authed list nodes -f bash)\n  USER_LIST=$(mktemp --directory)\n  echo \"Found the following nodes: ${NODES}\"\n  for NODE in ${NODES}; do\n    echo \"Checking Node: ${NODE#*@}\"\n    rabbitmqadmin_authed list users -f bash > ${USER_LIST}/${NODE#*@}\n  done\n  cd ${USER_LIST}; diff -q --from-file $(ls ${USER_LIST})\n  echo \"User lists match for all nodes\"\n}\n# Check users match on all nodes\nrabbit_check_users_match\n"
  },
  {
    "path": "rabbitmq/templates/bin/_rabbitmq-wait-for-cluster.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -e\n\n# Extract connection details\nRABBIT_HOSTNAME=`echo $RABBITMQ_ADMIN_CONNECTION | awk -F'[@]' '{print $2}' \\\n  | awk -F'[:/]' '{print $1}'`\nRABBIT_PORT=`echo $RABBITMQ_ADMIN_CONNECTION | awk -F'[@]' '{print $2}' \\\n  | awk -F'[:/]' '{print $2}'`\n\n# Extract Admin User creadential\nRABBITMQ_ADMIN_USERNAME=`echo $RABBITMQ_ADMIN_CONNECTION | awk -F'[@]' '{print $1}' \\\n  | awk -F'[//:]' '{print $4}'`\nRABBITMQ_ADMIN_PASSWORD=`echo $RABBITMQ_ADMIN_CONNECTION | awk -F'[@]' '{print $1}' \\\n  | awk -F'[//:]' '{print $5}'`\n\nset -ex\n\nfunction rabbitmqadmin_authed () {\n  set +x\n  rabbitmqadmin \\\n{{- if .Values.manifests.certificates }}\n    --ssl \\\n    --ssl-disable-hostname-verification \\\n    --ssl-ca-cert-file=\"/etc/rabbitmq/certs/ca.crt\" \\\n    --ssl-cert-file=\"/etc/rabbitmq/certs/tls.crt\" \\\n    --ssl-key-file=\"/etc/rabbitmq/certs/tls.key\" \\\n{{- end }}\n    --host=\"${RABBIT_HOSTNAME}\" \\\n    --port=\"${RABBIT_PORT}\" \\\n    --username=\"${RABBITMQ_ADMIN_USERNAME}\" \\\n    --password=\"${RABBITMQ_ADMIN_PASSWORD}\" \\\n    ${@}\n  set -x\n}\n\nfunction active_rabbit_nodes () {\n  rabbitmqadmin_authed list nodes -f bash | wc -w\n}\n\nuntil test \"$(active_rabbit_nodes)\" -ge \"$RABBIT_REPLICA_COUNT\"; do\n    echo \"Waiting for number of nodes in cluster to meet or exceed number of desired pods ($RABBIT_REPLICA_COUNT)\"\n    sleep 10\ndone\n\nfunction sorted_node_list () {\n  rabbitmqadmin_authed list nodes -f bash | tr ' ' '\\n' | sort | tr '\\n' ' '\n}\n\nif test \"$(active_rabbit_nodes)\" -gt \"$RABBIT_REPLICA_COUNT\"; then\n    echo \"There are more nodes registed in the cluster than desired, pruning the cluster\"\n    PRIMARY_NODE=\"$(sorted_node_list | awk '{ print $1; exit }')\"\n    until rabbitmqctl -l -n \"${PRIMARY_NODE}\" cluster_status >/dev/null 2>&1 ; do\n      echo \"Waiting for primary node to return cluster status\"\n      sleep 10\n    done\n    echo \"Current cluster:\"\n    rabbitmqctl -l -n \"${PRIMARY_NODE}\" cluster_status\n    NODES_TO_REMOVE=\"$(sorted_node_list | awk \"{print substr(\\$0, index(\\$0,\\$$((RABBIT_REPLICA_COUNT+1))))}\")\"\n    for NODE in ${NODES_TO_REMOVE}; do\n      rabbitmqctl -l -n \"${NODE}\" stop_app || true\n      rabbitmqctl -l -n \"${PRIMARY_NODE}\" forget_cluster_node \"${NODE}\"\n    done\n    echo \"Updated cluster:\"\n    rabbitmqctl -l -n \"${PRIMARY_NODE}\" cluster_status\nfi\n"
  },
  {
    "path": "rabbitmq/templates/certificates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.certificates -}}\n{{ dict \"envAll\" . \"service\" \"oslo_messaging\" \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end -}}\n"
  },
  {
    "path": "rabbitmq/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ printf \"%s-%s\" .deployment_name \"rabbitmq-bin\" | quote }}\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n  rabbitmq-test.sh: |\n{{ tuple \"bin/_rabbitmq-test.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  rabbitmq-liveness.sh: |\n{{ tuple \"bin/_rabbitmq-liveness.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n  rabbitmq-readiness.sh: |\n{{ tuple \"bin/_rabbitmq-readiness.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n  rabbitmq-start.sh: |\n{{ tuple \"bin/_rabbitmq-start.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n  rabbitmq-cookie.sh: |\n{{ tuple \"bin/_rabbitmq-cookie.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n  rabbitmq-password-hash.py: |\n{{ tuple \"bin/_rabbitmq-password-hash.py.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n  rabbitmq-wait-for-cluster.sh: |\n{{ tuple \"bin/_rabbitmq-wait-for-cluster.sh.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n{{ end }}\n"
  },
  {
    "path": "rabbitmq/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{/*\n(aostapenko) rounds cpu limit in any permissible format to integer value (min 1)\n\"100m\"   -> 1\n\"1100m\"  -> 1\n\"10900m\" -> 10\n0.3      -> 1\n5.4      -> 5\n*/}}\n{{- define \"get_erlvm_scheduler_num\" -}}\n{{- $val := . | toString -}}\n{{- if regexMatch \"^[0-9]*m$\" $val -}}\n{{- $val = div (float64 (trimSuffix \"m\" $val)) 1000 -}}\n{{- end -}}\n{{/* NOTE(aostapenko) String with floating number does not convert well to int*/}}\n{{- $val | float64 | int | default 1 -}}\n{{- end -}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n{{- if empty $envAll.Values.conf.rabbitmq.cluster_formation.k8s.host -}}\n{{- $_ := print \"kubernetes.default.svc.\" $envAll.Values.endpoints.cluster_domain_suffix | set $envAll.Values.conf.rabbitmq.cluster_formation.k8s \"host\" -}}\n{{- end -}}\n\n{{- if .Values.manifests.certificates }}\n{{- $_ := print \"none\" | set $envAll.Values.conf.rabbitmq.listeners \"tcp\" -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"amqp\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set $envAll.Values.conf.rabbitmq.listeners \"ssl.1\" -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"https\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set $envAll.Values.conf.rabbitmq \"management.ssl.port\" -}}\n{{- else }}\n{{- $_ := print $envAll.Values.conf.bind_address \":\" ( tuple \"oslo_messaging\" \"internal\" \"amqp\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\") | set $envAll.Values.conf.rabbitmq.listeners.tcp \"1\" -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"http\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set $envAll.Values.conf.rabbit_additonal_conf \"management.listener.port\" -}}\n{{- end }}\n\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.deployment_name \"rabbitmq-etc\" | quote }}\ndata:\n  enabled_plugins: |\n{{ tuple \"etc/_enabled_plugins.tpl\" . | include  \"helm-toolkit.utils.template\" | indent 4 }}\n  rabbitmq.conf: |\n{{ include \"rabbitmq.utils.to_rabbit_config\" $envAll.Values.conf.rabbitmq | indent 4 }}\n{{- if not .Values.manifests.certificates }}\n{{ include \"rabbitmq.utils.to_rabbit_config\" $envAll.Values.conf.rabbit_additonal_conf | indent 4 }}\n{{- end }}\n\n{{- if .Values.conf.rabbit_advanced_config.enabled }}\n  advanced.config: |\n    [\n    {rabbit, [\n          {default_consumer_prefetch, {false,{{ .Values.conf.rabbit_advanced_config.default_consumer_prefetch }}}}\n        ]\n    }\n    ].\n{{- end }}\n\n{{- $erlvm_scheduler_num := include \"get_erlvm_scheduler_num\" .Values.pod.resources.server.limits.cpu }}\n{{- $erlvm_scheduler_conf := printf \"+S %s:%s\" $erlvm_scheduler_num $erlvm_scheduler_num }}\n{{- if .Values.manifests.config_ipv6 }}\n  rabbitmq-env.conf: |\n    SERVER_ADDITIONAL_ERL_ARGS={{ printf \"+A 128 -kernel inetrc '/etc/rabbitmq/erl_inetrc' -proto_dist inet6_tcp %s\" $erlvm_scheduler_conf | quote }}\n    CTL_ERL_ARGS=\"-proto_dist inet6_tcp\"\n  erl_inetrc: |\n    {inet6, true}.\n{{- else }}\n  rabbitmq-env.conf: |\n    SERVER_ADDITIONAL_ERL_ARGS={{ $erlvm_scheduler_conf | quote }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "rabbitmq/templates/etc/_enabled_plugins.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n[{{ include \"helm-toolkit.utils.joinListWithComma\" .Values.conf.enabled_plugins }}].\n"
  },
  {
    "path": "rabbitmq/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "rabbitmq/templates/ingress-management.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if and .Values.manifests.ingress_management .Values.network.management.ingress.public }}\n{{- $envAll := . }}\n{{- if empty $envAll.Values.endpoints.oslo_messaging.hosts.public }}\n{{- $service_public_name := .deployment_name | trunc 12 }}\n{{- $_ := set $envAll.Values.endpoints.oslo_messaging.hosts \"public\" ( printf \"%s-%s-%s\" $service_public_name \"mgr\" ( $service_public_name | sha256sum | trunc 6 )) }}\n{{- end }}\n{{- $ingressOpts := dict \"envAll\" . \"backendService\" \"management\" \"backendServiceType\" \"oslo_messaging\" \"backendPort\" \"http\" -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "rabbitmq/templates/job-cluster-wait.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if .Values.manifests.job_cluster_wait }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := print .deployment_name \"-cluster-wait\" }}\n{{ tuple $envAll \"cluster_wait\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $protocol := \"http\" }}\n{{- if $envAll.Values.manifests.certificates }}\n{{- $protocol = \"https\" }}\n{{- end }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: \"{{.deployment_name}}-cluster-wait\"\n  labels:\n{{ tuple $envAll \"rabbitmq\" \"cluster-wait\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n    \"helm.sh/hook\": \"post-install,post-upgrade\"\n    \"helm.sh/hook-weight\": \"5\"\n    \"helm.sh/hook-delete-policy\": before-hook-creation\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"rabbitmq\" \"cluster-wait\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"rabbitmq-cluster-wait\" \"containerNames\" (list \"init\" \"rabbitmq-cookie\" \"rabbitmq-rabbitmq-cluster-wait\" ) | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"cluster_wait\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n{{ if $envAll.Values.pod.tolerations.rabbitmq.enabled }}\n{{ tuple $envAll \"rabbitmq\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ $envAll.Values.labels.jobs.node_selector_key }}: {{ $envAll.Values.labels.test.node_selector_value | quote }}\n      initContainers:\n{{ tuple $envAll \"cluster_wait\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: rabbitmq-cookie\n{{ tuple $envAll \"scripted_test\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cluster_wait\" \"container\" \"rabbitmq_cookie\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/rabbitmq-cookie.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: rabbitmq-bin\n              mountPath: /tmp/rabbitmq-cookie.sh\n              subPath: rabbitmq-cookie.sh\n              readOnly: true\n            - name: rabbitmq-data\n              mountPath: /var/lib/rabbitmq\n            - name: rabbitmq-erlang-cookie\n              mountPath: /var/run/lib/rabbitmq/.erlang.cookie\n              subPath: erlang_cookie\n              readOnly: true\n      containers:\n        - name: rabbitmq-rabbitmq-cluster-wait\n{{ tuple $envAll \"scripted_test\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"cluster_wait\" \"container\" \"rabbitmq_cluster_wait\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: RABBITMQ_ADMIN_CONNECTION\n              value: {{ tuple \"oslo_messaging\" \"internal\" \"user\" $protocol $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | quote }}\n            - name: RABBIT_REPLICA_COUNT\n              value: {{ $envAll.Values.pod.replicas.server | quote }}\n          command:\n            - /tmp/rabbitmq-wait-for-cluster.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: rabbitmq-bin\n              mountPath: /tmp/rabbitmq-wait-for-cluster.sh\n              subPath: rabbitmq-wait-for-cluster.sh\n              readOnly: true\n            - name: rabbitmq-data\n              mountPath: /var/lib/rabbitmq\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_messaging.server.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: rabbitmq-data\n          emptyDir: {}\n        - name: rabbitmq-bin\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.deployment_name \"rabbitmq-bin\" | quote }}\n            defaultMode: 0555\n        - name: rabbitmq-erlang-cookie\n          secret:\n            secretName: {{ printf \"%s-%s\" $envAll.deployment_name \"erlang-cookie\" | quote }}\n            defaultMode: 0444\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_messaging.server.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "rabbitmq/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"rabbitmq\" -}}\n{{- if .Values.pod.tolerations.rabbitmq.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "rabbitmq/templates/network_policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"rabbitmq\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "rabbitmq/templates/pod-test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if .Values.manifests.pod_test }}\n{{- $envAll := . }}\n\n{{ if kindIs \"string\" $envAll.Values.dependencies.static.tests.jobs }}\n{{ if eq $envAll.Values.dependencies.static.tests.jobs \"cluster_wait\" }}\n{{ $_ := set $envAll.Values.dependencies.static.tests \"jobs\" ( list ( print $envAll.deployment_name \"-cluster-wait\" ) ) }}\n{{ end }}\n{{ end }}\n\n{{- $serviceAccountName := print .deployment_name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $protocol := \"http\" }}\n{{- if $envAll.Values.manifests.certificates }}\n{{- $protocol = \"https\" }}\n{{- end }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: \"{{.deployment_name}}-test\"\n  labels:\n{{ tuple $envAll \"rabbitmq\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n    \"helm.sh/hook\": test-success\n{{ dict \"envAll\" $envAll \"podName\" \"rabbitmq-rabbitmq-test\" \"containerNames\" (list \"init\" \"rabbitmq-rabbitmq-test\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n{{ dict \"envAll\" $envAll \"application\" \"test\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 2 }}\n  serviceAccountName: {{ $serviceAccountName }}\n{{ if $envAll.Values.pod.tolerations.rabbitmq.enabled }}\n{{ tuple $envAll \"rabbitmq\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 2 }}\n{{ end }}\n  nodeSelector:\n    {{ $envAll.Values.labels.test.node_selector_key }}: {{ $envAll.Values.labels.test.node_selector_value | quote }}\n  restartPolicy: Never\n  initContainers:\n{{ tuple $envAll \"tests\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n  containers:\n    - name: rabbitmq-rabbitmq-test\n{{ tuple $envAll \"scripted_test\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" \"container\" \"rabbitmq_test\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      env:\n        - name: RABBITMQ_ADMIN_CONNECTION\n          value: {{ tuple \"oslo_messaging\" \"internal\" \"user\" $protocol $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | quote }}\n        - name: RABBIT_REPLICA_COUNT\n          value: {{ $envAll.Values.pod.replicas.server | quote }}\n      command:\n        - /tmp/rabbitmq-test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: rabbitmq-bin\n          mountPath: /tmp/rabbitmq-test.sh\n          subPath: rabbitmq-test.sh\n          readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.oslo_messaging.server.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 8 }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: rabbitmq-bin\n      configMap:\n        name: {{ printf \"%s-%s\" $envAll.deployment_name \"rabbitmq-bin\" | quote }}\n        defaultMode: 0555\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.oslo_messaging.server.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "rabbitmq/templates/secret-erlang-cookie.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if .Values.manifests.secret_erlang_cookie }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.deployment_name \"erlang-cookie\" | quote }}\ntype: Opaque\ndata:\n  erlang_cookie: {{ $envAll.Values.endpoints.oslo_messaging.auth.erlang_cookie | b64enc -}}\n{{- end }}\n"
  },
  {
    "path": "rabbitmq/templates/secret-rabbit-admin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if .Values.manifests.secret_admin_user }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.deployment_name \"admin-user\" | quote }}\ntype: Opaque\ndata:\n  RABBITMQ_ADMIN_USERNAME: {{ $envAll.Values.endpoints.oslo_messaging.auth.user.username | b64enc }}\n  RABBITMQ_ADMIN_PASSWORD: {{ $envAll.Values.endpoints.oslo_messaging.auth.user.password | b64enc }}\n  RABBITMQ_GUEST_PASSWORD: {{ $envAll.Values.endpoints.oslo_messaging.auth.guest.password | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "rabbitmq/templates/secret-rabbitmq-users-credentials.yaml",
    "content": "{{/*\nCopyright 2019 Mirantis Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.conf.users }}\n\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ printf \"%s-%s\" $envAll.deployment_name \"users-credentials\" | quote }}\n  labels:\n{{ tuple $envAll \"rabbitmq\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_USERS: {{ toJson .Values.conf.users | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "rabbitmq/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "rabbitmq/templates/service-ingress-management.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- if and .Values.manifests.service_ingress_management .Values.network.management.ingress.public }}\n{{- $envAll := . }}\n{{- if empty $envAll.Values.endpoints.oslo_messaging.hosts.public }}\n{{- $service_public_name := .deployment_name | trunc 12 }}\n{{- $_ := set $envAll.Values.endpoints.oslo_messaging.hosts \"public\" ( printf \"%s-%s-%s\" $service_public_name \"mgr\" ( $service_public_name | sha256sum | trunc 6 )) }}\n{{- end }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendService\" \"management\" \"backendServiceType\" \"oslo_messaging\" \"backendPort\" \"http\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "rabbitmq/templates/service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service }}\n{{- $envAll := . }}\n{{- $prometheus_annotations := $envAll.Values.monitoring.prometheus.rabbitmq }}\n{{- $protocol := \"http\" }}\n{{- if $envAll.Values.manifests.certificates }}\n{{- $protocol = \"https\" }}\n{{- end }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"oslo_messaging\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  labels:\n{{ tuple $envAll \"rabbitmq\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n{{- if $envAll.Values.monitoring.prometheus.enabled }}\n{{ tuple $prometheus_annotations | include \"helm-toolkit.snippets.prometheus_service_annotations\" | indent 4 }}\n{{- end }}\nspec:\n  clusterIP: None\n  ports:\n    - port: {{ tuple \"oslo_messaging\" \"internal\" \"amqp\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n      name: amqp\n    - port: {{ add (tuple \"oslo_messaging\" \"internal\" \"amqp\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\") 20000 }}\n      name: clustering\n    - port: {{ tuple \"oslo_messaging\" \"internal\" $protocol . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n      name: {{  printf \"%s\" $protocol }}\n    - name: metrics\n      port: {{ tuple \"oslo_messaging\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n  selector:\n{{ tuple $envAll \"rabbitmq\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{ end }}\n"
  },
  {
    "path": "rabbitmq/templates/statefulset.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- define \"rabbitmqReadinessProbeTemplate\" }}\nexec:\n  command:\n    - /tmp/rabbitmq-readiness.sh\n{{- end }}\n{{- define \"rabbitmqLivenessProbeTemplate\" }}\nexec:\n  command:\n    - /tmp/rabbitmq-liveness.sh\n{{- end }}\n\n{{/*\n(aostapenko) rounds cpu limit in any permissible format to integer value (min 1)\n\"100m\"   -> 1\n\"1100m\"  -> 1\n\"10900m\" -> 10\n0.3      -> 1\n5.4      -> 5\n*/}}\n{{- define \"get_erlvm_scheduler_num\" -}}\n{{- $val := . | toString -}}\n{{- if regexMatch \"^[0-9]*m$\" $val -}}\n{{- $val = div (float64 (trimSuffix \"m\" $val)) 1000 -}}\n{{- end -}}\n{{/* NOTE(aostapenko) String with floating number does not convert well to int */}}\n{{- $val | float64 | int | default 1 -}}\n{{- end -}}\n\n{{- if .Values.manifests.statefulset }}\n{{- $envAll := . }}\n\n{{- $rcControllerName := printf \"%s-%s\" $envAll.deployment_name \"rabbitmq\" }}\n{{ tuple $envAll \"rabbitmq\" $rcControllerName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $protocol := \"http\" }}\n{{- if $envAll.Values.manifests.certificates }}\n{{- $protocol = \"https\" }}\n{{- end }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ $rcControllerName | quote }}\n  namespace: {{ .Release.Namespace }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ $rcControllerName | quote }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ $rcControllerName | quote }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ $rcControllerName | quote }}\n  namespace: {{ .Release.Namespace }}\nrules:\n  - apiGroups:\n      - \"\"\n      - extensions\n      - batch\n      - apps\n    verbs:\n      - get\n      - list\n    resources:\n      - services\n      - endpoints\n---\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  name: {{ $rcControllerName | quote }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"rabbitmq\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  serviceName: {{ tuple \"oslo_messaging\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  replicas: {{ $envAll.Values.pod.replicas.server }}\n  podManagementPolicy: \"Parallel\"\n  selector:\n    matchLabels:\n{{ tuple $envAll \"rabbitmq\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"rabbitmq\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        secret-rabbit-admin-hash: {{ tuple \"secret-rabbit-admin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        secret-erlang-cookie-hash: {{ tuple \"secret-erlang-cookie.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"rabbitmq\" \"containerNames\" (list \"init\" \"rabbitmq-password\" \"rabbitmq-cookie\" \"rabbitmq-perms\" \"rabbitmq\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"server\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $rcControllerName | quote }}\n      affinity:\n{{ tuple $envAll \"rabbitmq\" \"server\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n{{ if $envAll.Values.pod.tolerations.rabbitmq.enabled }}\n{{ tuple $envAll \"rabbitmq\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ $envAll.Values.labels.server.node_selector_key }}: {{ $envAll.Values.labels.server.node_selector_value | quote }}\n      initContainers:\n{{ tuple $envAll \"rabbitmq\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: rabbitmq-password\n{{ tuple $envAll \"rabbitmq_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.server | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"server\" \"container\" \"rabbitmq_password\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/rabbitmq-password-hash.py\n          env:\n            - name: RABBITMQ_ADMIN_USERNAME\n              valueFrom:\n                secretKeyRef:\n                  name: {{ printf \"%s-%s\" $envAll.deployment_name \"admin-user\" | quote }}\n                  key: RABBITMQ_ADMIN_USERNAME\n            - name: RABBITMQ_ADMIN_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ printf \"%s-%s\" $envAll.deployment_name \"admin-user\" | quote }}\n                  key: RABBITMQ_ADMIN_PASSWORD\n            - name: RABBITMQ_GUEST_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: {{ printf \"%s-%s\" $envAll.deployment_name \"admin-user\" | quote }}\n                  key: RABBITMQ_GUEST_PASSWORD\n            - name: RABBITMQ_DEFINITION_FILE\n              value: \"{{ index $envAll.Values.conf.rabbitmq \"management.load_definitions\" }}\"\n{{- if .Values.conf.users }}\n            - name: RABBITMQ_USERS\n              valueFrom:\n                secretKeyRef:\n                  name: {{ printf \"%s-%s\" $envAll.deployment_name \"users-credentials\" | quote }}\n                  key: RABBITMQ_USERS\n{{- end }}\n{{- if .Values.conf.aux_conf }}\n            - name: RABBITMQ_AUXILIARY_CONFIGURATION\n              value: {{ toJson $envAll.Values.conf.aux_conf | quote }}\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: rabbitmq-data\n              mountPath: /var/lib/rabbitmq\n            - name: rabbitmq-bin\n              mountPath: /tmp/rabbitmq-password-hash.py\n              subPath: rabbitmq-password-hash.py\n              readOnly: true\n        - name: rabbitmq-cookie\n{{ tuple $envAll \"rabbitmq\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.server | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"server\" \"container\" \"rabbitmq_cookie\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/rabbitmq-cookie.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: rabbitmq-bin\n              mountPath: /tmp/rabbitmq-cookie.sh\n              subPath: rabbitmq-cookie.sh\n              readOnly: true\n            - name: rabbitmq-data\n              mountPath: /var/lib/rabbitmq\n            - name: rabbitmq-erlang-cookie\n              mountPath: /var/run/lib/rabbitmq/.erlang.cookie\n              subPath: erlang_cookie\n              readOnly: true\n{{- if $envAll.Values.volume.chown_on_start }}\n        - name: rabbitmq-perms\n{{ tuple $envAll \"rabbitmq\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.server | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"server\" \"container\" \"rabbitmq_perms\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - chown\n            - -R\n            - \"{{ $envAll.Values.pod.security_context.server.pod.runAsUser }}\"\n            - /var/lib/rabbitmq\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: rabbitmq-data\n              mountPath: /var/lib/rabbitmq\n{{- end }}\n      containers:\n        - name: rabbitmq\n{{ tuple $envAll \"rabbitmq\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.server | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"server\" \"container\" \"rabbitmq\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/rabbitmq-start.sh\n          ports:\n            - name: {{  printf \"%s\" $protocol }}\n              protocol: TCP\n              containerPort: {{ tuple \"oslo_messaging\" \"internal\" $protocol . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            {{- if .Values.network.host_namespace }}\n              hostPort: {{ tuple \"oslo_messaging\" \"internal\" $protocol . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            {{- end }}\n            - name: amqp\n              protocol: TCP\n              containerPort: {{ tuple \"oslo_messaging\" \"internal\" \"amqp\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            {{- if .Values.network.host_namespace }}\n              hostPort: {{ tuple \"oslo_messaging\" \"internal\" \"amqp\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n            {{- end }}\n            - name: clustering\n              protocol: TCP\n              containerPort: {{ add (tuple \"oslo_messaging\" \"internal\" \"amqp\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\") 20000 }}\n            {{- if .Values.network.host_namespace }}\n              hostPort: {{ add (tuple \"oslo_messaging\" \"internal\" \"amqp\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\") 20000 }}\n            {{- end }}\n            - name: metrics\n              containerPort: {{ tuple \"oslo_messaging\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n              protocol: TCP\n          env:\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  fieldPath: status.podIP\n            - name: RABBITMQ_USE_LONGNAME\n              value: \"true\"\n            - name: RABBITMQ_NODENAME\n              value: \"rabbit@$(MY_POD_NAME).{{ tuple \"oslo_messaging\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\"\n            - name: K8S_SERVICE_NAME\n              value: {{ tuple \"oslo_messaging\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n            - name: K8S_HOSTNAME_SUFFIX\n              value: \".{{ tuple \"oslo_messaging\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\"\n            - name: RABBITMQ_ERLANG_COOKIE\n              value: \"{{ $envAll.Values.endpoints.oslo_messaging.auth.erlang_cookie }}\"\n            - name: PORT_HTTP\n              value: \"{{ tuple \"oslo_messaging\" \"internal\" $protocol . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\"\n            - name: PORT_AMPQ\n              value: \"{{ tuple \"oslo_messaging\" \"internal\" \"amqp\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\"\n            - name: PORT_CLUSTERING\n              value: \"{{ add (tuple \"oslo_messaging\" \"internal\" \"amqp\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\") 20000 }}\"\n{{- if ne (.Values.conf.feature_flags | default \"\") \"default\" }}\n            - name: RABBITMQ_FEATURE_FLAGS\n              value: \"{{ .Values.conf.feature_flags }}\"\n{{- end }}\n{{ dict \"envAll\" $envAll \"component\" \"rabbitmq\" \"container\" \"rabbitmq\" \"type\" \"readiness\" \"probeTemplate\" (include \"rabbitmqReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | trim | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"rabbitmq\" \"container\" \"rabbitmq\" \"type\" \"liveness\" \"probeTemplate\" (include \"rabbitmqLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | trim | indent 10 }}\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - rabbitmqctl\n                  - stop_app\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: rabbitmq-data\n              mountPath: /var/lib/rabbitmq\n            - name: rabbitmq-bin\n              mountPath: /tmp/rabbitmq-start.sh\n              subPath: rabbitmq-start.sh\n              readOnly: true\n            - name: rabbitmq-bin\n              mountPath: /tmp/rabbitmq-readiness.sh\n              subPath: rabbitmq-readiness.sh\n              readOnly: true\n            - name: rabbitmq-bin\n              mountPath: /tmp/rabbitmq-liveness.sh\n              subPath: rabbitmq-liveness.sh\n              readOnly: true\n            - name: rabbitmq-etc\n              mountPath: /etc/rabbitmq/enabled_plugins\n              subPath: enabled_plugins\n              readOnly: true\n            - name: rabbitmq-etc\n              mountPath: /etc/rabbitmq/rabbitmq.conf\n              subPath: rabbitmq.conf\n              readOnly: true\n{{- if .Values.conf.rabbit_advanced_config.enabled }}\n            - name: rabbitmq-etc\n              mountPath: /etc/rabbitmq/advanced.config\n              subPath: advanced.config\n              readOnly: true\n{{- end }}\n            - name: rabbitmq-etc\n              mountPath: /etc/rabbitmq/rabbitmq-env.conf\n              subPath: rabbitmq-env.conf\n              readOnly: true\n{{- if .Values.manifests.config_ipv6 }}\n            - name: rabbitmq-etc\n              mountPath: /etc/rabbitmq/erl_inetrc\n              subPath: erl_inetrc\n              readOnly: true\n{{- end }}\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_messaging.server.internal \"path\" \"/etc/rabbitmq/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: rabbitmq-bin\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.deployment_name \"rabbitmq-bin\" | quote }}\n            defaultMode: 0555\n        - name: rabbitmq-etc\n          configMap:\n            name: {{ printf \"%s-%s\" $envAll.deployment_name \"rabbitmq-etc\" | quote }}\n            defaultMode: 0444\n        - name: rabbitmq-erlang-cookie\n          secret:\n            secretName: {{ printf \"%s-%s\" $envAll.deployment_name \"erlang-cookie\" | quote }}\n            defaultMode: 0444\n{{ dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $envAll.Values.secrets.tls.oslo_messaging.server.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n        {{- if not $envAll.Values.volume.enabled }}\n        - name: rabbitmq-data\n        {{- if .Values.volume.use_local_path.enabled }}\n          hostPath:\n            path:  {{ .Values.volume.use_local_path.host_path }}\n            type: DirectoryOrCreate\n        {{- else }}\n          emptyDir: {}\n        {{- end }}\n        {{- end }}\n{{- if $envAll.Values.volume.enabled }}\n  volumeClaimTemplates:\n    - metadata:\n        name: rabbitmq-data\n      spec:\n        accessModes: [\"ReadWriteOnce\"]\n        resources:\n          requests:\n            storage: {{ $envAll.Values.volume.size }}\n        {{- if ne .Values.volume.class_name \"default\" }}\n        storageClassName: {{ $envAll.Values.volume.class_name }}\n        {{- end }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "rabbitmq/templates/utils/_to_rabbit_config.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"rabbitmq.utils.to_rabbit_config\" -}}\n{{- range $top_key, $top_value :=  . }}\n{{- if kindIs \"map\" $top_value -}}\n{{- range $second_key, $second_value :=  . }}\n{{- if kindIs \"map\" $second_value -}}\n{{- range $third_key, $third_value :=  . }}\n{{- if kindIs \"map\" $third_value -}}\n{{ $top_key }}.{{ $second_key }}.{{ $third_key }} = wow\n{{ else -}}\n{{ $top_key }}.{{ $second_key }}.{{ $third_key }} = {{ $third_value }}\n{{ end -}}\n{{- end -}}\n{{ else -}}\n{{ $top_key }}.{{ $second_key }} = {{ $second_value }}\n{{ end -}}\n{{- end -}}\n{{ else -}}\n{{ $top_key }} = {{ $top_value }}\n{{ end -}}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "rabbitmq/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for rabbitmq.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nlabels:\n  server:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  jobs:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  tags:\n    rabbitmq_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    rabbitmq: quay.io/airshipit/rabbitmq:3.10.18\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    scripted_test: quay.io/airshipit/rabbitmq:3.10.18-management\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\n# forceBoot: executes 'rabbitmqctl force_boot' to force boot on\n# cluster shut down unexpectedly in an unknown order.\n# ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot\nforceBoot:\n  enabled: false\n\npod:\n  probes:\n    rabbitmq:\n      rabbitmq:\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 10\n            periodSeconds: 10\n            timeoutSeconds: 10\n            successThreshold: 1\n            failureThreshold: 3\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 60\n            periodSeconds: 10\n            timeoutSeconds: 10\n            successThreshold: 1\n            failureThreshold: 5\n  security_context:\n    server:\n      pod:\n        runAsUser: 999\n      container:\n        rabbitmq_password:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        rabbitmq_cookie:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        rabbitmq_perms:\n          runAsUser: 0\n          readOnlyRootFilesystem: true\n        rabbitmq:\n          allowPrivilegeEscalation: false\n          runAsUser: 999\n          readOnlyRootFilesystem: false\n    cluster_wait:\n      pod:\n        runAsUser: 999\n      container:\n        rabbitmq_cluster_wait:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n        rabbitmq_cookie:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n    test:\n      pod:\n        runAsUser: 999\n      container:\n        rabbitmq_test:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    rabbitmq:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n        effect: NoSchedule\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n        effect: NoSchedule\n  replicas:\n    server: 2\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    disruption_budget:\n      mariadb:\n        min_available: 0\n  resources:\n    enabled: false\n    server:\n      limits:\n        memory: \"128Mi\"\n        cpu: \"500m\"\n      requests:\n        memory: \"128Mi\"\n        cpu: \"500m\"\n    jobs:\n      tests:\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nconf:\n  enabled_plugins:\n    - rabbitmq_management\n    - rabbitmq_peer_discovery_k8s\n    - rabbitmq_prometheus\n  # This IP could be IPv4/IPv6 and the tcp port will be appended to it and eventually it is set to rabbitmq.listeners.tcp.1\n  bind_address: \"::\"\n  rabbitmq:\n    listeners:\n      tcp:\n        # NOTE(portdirect): This is always defined via the endpoints section.\n        1: null\n    cluster_formation:\n      peer_discovery_backend: rabbit_peer_discovery_k8s\n      k8s:\n        address_type: hostname\n      node_cleanup:\n        interval: \"10\"\n        only_log_warning: \"true\"\n    cluster_partition_handling: autoheal\n    queue_master_locator: min-masters\n    loopback_users.guest: \"false\"\n    management.load_definitions: \"/var/lib/rabbitmq/definitions.json\"\n  rabbit_additonal_conf:\n    # This confinguration is used for non TLS deployments\n    management.listener.ip: \"::\"\n    management.listener.port: null\n  rabbit_advanced_config:\n    enabled: false\n    default_consumer_prefetch: 250\n  # Feature Flags is introduced in RabbitMQ 3.8.0\n  # To deploy with standard list of feature, leave as default\n  # To deploy with specific feature, separate each feature with comma\n  # To deploy with all features disabled, leave blank or empty\n  feature_flags: default\n  users: {}\n  # define users in the section below which have to be\n  # created by rabbitmq at start up stage through definitions.json\n  # file and enable job_users_create manifest.\n  #  users:\n  #    keystone_service:\n  #      auth:\n  #        keystone_username:\n  #          username: keystone\n  #          password: password\n  #      path: /keystone\n  aux_conf: {}\n  # aux_conf can be used to pass additional options to definitions.json, allowed keys are:\n  #           - policies\n  #           - bindings\n  #           - parameters\n  #           - queues\n  #           - exchanges\n  #          vhosts,users and permissions are created in users section of values.\n  #  aux_conf:\n  #    policies:\n  #      - vhost: \"keystone\"\n  #        name: \"ha_ttl_keystone\"\n  #        definition:\n  #          #mirror messges to other nodes in rmq cluster\n  #          ha-mode: \"all\"\n  #          ha-sync-mode: \"automatic\"\n  #          #70s\n  #          message-ttl: 70000\n  #        priority: 0\n  #        apply-to: all\n  #        pattern: '^(?!amq\\.).*'\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - rabbitmq-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    rabbitmq:\n      jobs: null\n    tests:\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n      # NOTE (portdirect): this key is somewhat special, if set to the string\n      # `cluster_wait` then the job dep will be populated with a single value\n      # containing the generated name for the `cluster_wait` job name.\n      jobs: cluster_wait\n    cluster_wait:\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\nmonitoring:\n  prometheus:\n    enabled: true\n    rabbitmq:\n      scrape: true\n\nnetwork:\n  host_namespace: false\n  management:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n\nsecrets:\n  oci_image_registry:\n    rabbitmq: rabbitmq-oci-image-registry-key\n  tls:\n    oslo_messaging:\n      server:\n        internal: rabbitmq-tls-direct\n\n# typically overridden by environmental\n# values, but should include all endpoints\n# required by this chart\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      rabbitmq:\n        username: rabbitmq\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  monitoring:\n    name: prometheus\n    namespace: null\n    hosts:\n      default: prom-metrics\n      public: prometheus\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 9090\n        public: 80\n  oslo_messaging:\n    auth:\n      erlang_cookie: openstack-cookie\n      user:\n        username: rabbitmq\n        password: password\n      guest:\n        password: password\n    hosts:\n      default: rabbitmq\n      # NOTE(portdirect): the public host is only used to the management WUI\n      # If left empty, the release name sha suffixed with mgr, will be used to\n      # produce an unique hostname.\n      public: null\n    host_fqdn_override:\n      default: null\n    path: /\n    scheme: rabbit\n    port:\n      clustering:\n        # NOTE(portdirect): the value for this port is driven by amqp+20000\n        # it should not be set manually.\n        default: null\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n        public: 80\n      metrics:\n        default: 15692\n  kube_dns:\n    namespace: kube-system\n    name: kubernetes-dns\n    hosts:\n      default: kube-dns\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: http\n    port:\n      dns_tcp:\n        default: 53\n      dns:\n        default: 53\n        protocol: UDP\n\nnetwork_policy:\n  rabbitmq:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nvolume:\n  use_local_path:\n    enabled: false\n    host_path: /var/lib/rabbitmq\n  chown_on_start: true\n  enabled: true\n  class_name: general\n  size: 768Mi\n\nmanifests:\n  certificates: false\n  configmap_bin: true\n  configmap_etc: true\n  config_ipv6: false\n  ingress_management: true\n  job_cluster_wait: true\n  job_image_repo_sync: true\n  monitoring:\n    prometheus:\n      configmap_bin: false\n  network_policy: false\n  pod_test: true\n  secret_admin_user: true\n  secret_erlang_cookie: true\n  secret_registry: true\n  service_discovery: true\n  service_ingress_management: true\n  service: true\n  statefulset: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "rally/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm rally\nname: rally\nversion: 2025.2.0\nhome: https://docs.openstack.org/developer/rally\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/rally/OpenStack_Project_rally_vertical.png\nsources:\n  - https://opendev.org/openstack/rally\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "rally/README.rst",
    "content": "=====\nRally\n=====\n\nThis chart provides a benchmarking tool for OpenStack services that\nallows us to test our cloud at scale. This chart leverages the Kolla\nimage for Rally and includes a templated configuration file that\nallows configuration overrides similar to other charts in OpenStack-Helm.\nYou can choose which services to benchmark by changing the services\nlisted in the ``values.yaml`` file under the ``enabled_tests`` key.\n\nInstallation\n------------\n\nThis chart can be deployed by running the following command:\n\n::\n    helm install --name=rally ./rally --namespace=openstack\n\n\nThis will install Rally into your cluster appropriately. When you run\nthis install command, the chart will bring up a few jobs that will\ncomplete the benchmarking of the OpenStack services that you have\nspecified.\n"
  },
  {
    "path": "rally/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "rally/templates/bin/_manage-db.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nfunction create_or_update_db () {\n  revisionResults=$(rally db revision)\n  if [ $revisionResults = \"None\"  ]\n  then\n    rally db create\n  else\n    rally db upgrade\n  fi\n}\n\ncreate_or_update_db\n"
  },
  {
    "path": "rally/templates/bin/_run-task.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n\n: ${RALLY_ENV_NAME:=\"openstack-helm\"}\n\nfunction run_rally () {\n  CURRENT_TEST=$1\n  rally deployment use ${RALLY_ENV_NAME}\n  rally deployment check\n  rally task validate /tasks/rally/${CURRENT_TEST}.yaml\n  rally task start /tasks/rally/${CURRENT_TEST}.yaml\n  rally task list\n  rally task report --out /var/lib/rally/data/${CURRENT_TEST}.html\n  rally task sla-check\n\n}\n\nfunction create_deployment () {\n  listResults=$(rally deployment list)\n\n  if [ $(echo $listResults | awk '{print $1;}') = \"There\"  ]\n  then\n    rally deployment create --fromenv --name ${RALLY_ENV_NAME}\n  fi\n}\n\ncreate_deployment\n\nIFS=','; for TEST in $ENABLED_TESTS; do\n  run_rally $TEST\ndone\n\nexit 0\n"
  },
  {
    "path": "rally/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: rally-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  manage-db.sh: |\n{{ tuple \"bin/_manage-db.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  run-task.sh: |\n{{ tuple \"bin/_run-task.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "rally/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n{{- if empty .Values.conf.rally.keystone_authtoken.auth_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.rally.keystone_authtoken \"auth_uri\" -}}\n{{- end -}}\n{{- if empty .Values.conf.rally.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.rally.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.rally.keystone_authtoken.region_name -}}\n{{- $_ := set .Values.conf.rally.keystone_authtoken \"region_name\" .Values.endpoints.identity.auth.rally.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.rally.keystone_authtoken.project_name -}}\n{{- $_ := set .Values.conf.rally.keystone_authtoken \"project_name\" .Values.endpoints.identity.auth.rally.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.rally.keystone_authtoken.project_domain_name -}}\n{{- $_ := set .Values.conf.rally.keystone_authtoken \"project_domain_name\" .Values.endpoints.identity.auth.rally.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.rally.keystone_authtoken.user_domain_name -}}\n{{- $_ := set .Values.conf.rally.keystone_authtoken \"user_domain_name\" .Values.endpoints.identity.auth.rally.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.rally.keystone_authtoken.username -}}\n{{- $_ := set .Values.conf.rally.keystone_authtoken \"username\" .Values.endpoints.identity.auth.rally.username -}}\n{{- end -}}\n{{- if empty .Values.conf.rally.keystone_authtoken.password -}}\n{{- $_ := set .Values.conf.rally.keystone_authtoken \"password\" .Values.endpoints.identity.auth.rally.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.rally.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.rally.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n{{- if empty .Values.conf.rally.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.rally.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.rally.database.connection)) (empty .Values.conf.rally.database.connection) -}}\n{{- $_ := tuple \"oslo_db\" \"internal\" \"rally\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | set .Values.conf.rally.database \"connection\" -}}\n{{- end -}}\n\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: rally-etc\ntype: Opaque\ndata:\n  rally.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.rally | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "rally/templates/configmap-tasks.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_tasks }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: rally-tasks\ndata:\n  authenticate.yaml: |\n{{ toYaml .Values.conf.rally_tasks.authenticate_task | indent 4 }}\n  ceilometer.yaml: |\n{{ toYaml .Values.conf.rally_tasks.ceilometer_task | indent 4 }}\n  cinder.yaml: |\n{{ toYaml .Values.conf.rally_tasks.cinder_task | indent 4 }}\n  glance.yaml: |\n{{ toYaml .Values.conf.rally_tasks.glance_task | indent 4 }}\n  heat.yaml: |\n{{ toYaml .Values.conf.rally_tasks.heat_task | indent 4 }}\n  keystone.yaml: |\n{{ toYaml .Values.conf.rally_tasks.keystone_task | indent 4 }}\n  magnum.yaml: |\n{{ toYaml .Values.conf.rally_tasks.magnum_task | indent 4 }}\n  neutron.yaml: |\n{{ toYaml .Values.conf.rally_tasks.neutron_task | indent 4 }}\n  nova.yaml: |\n{{ toYaml .Values.conf.rally_tasks.nova_task | indent 4 }}\n  swift.yaml: |\n{{ toYaml .Values.conf.rally_tasks.swift_task | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "rally/templates/configmap-test-templates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_test_templates }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: heat-tasks-test-templates\ndata:\n{{- range $key, $value := $envAll.Values.conf.rally_tasks.heat_tests }}\n{{- $file := printf \"%s.%s\" (replace \"_\" \"-\"  $key) \"yaml\" }}\n{{- include \"helm-toolkit.snippets.values_template_renderer\" (dict \"envAll\" $envAll \"template\" (index $envAll.Values.conf.rally_tasks.heat_tests $key ) \"key\" $file ) | indent 2 }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "rally/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "rally/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_bootstrap }}\n{{- $envAll := . }}\n{{- if .Values.bootstrap.enabled }}\n\n{{- $mounts_rally_bootstrap := .Values.pod.mounts.rally_bootstrap.rally_bootstrap }}\n{{- $mounts_rally_bootstrap_init := .Values.pod.mounts.rally_bootstrap.init_container }}\n\n{{- $serviceAccountName := \"rally-bootstrap\" }}\n{{ tuple $envAll \"bootstrap\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n\n{{- $tlsSecret := \"\" -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $tlsSecret = .Values.secrets.tls.identity.api.internal | default \"\" -}}\n{{- end -}}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: rally-bootstrap\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"rally\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"bootstrap\" $mounts_rally_bootstrap_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n      containers:\n        - name: rally-bootstrap\n{{ tuple $envAll \"bootstrap\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" (ne $tlsSecret \"\") }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n          command:\n            - /tmp/bootstrap.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: rally-bin\n              mountPath: /tmp/bootstrap.sh\n              subPath: bootstrap.sh\n              readOnly: true\n{{ dict \"enabled\" (ne $tlsSecret \"\") \"name\" $tlsSecret | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_rally_bootstrap.volumeMounts }}{{ toYaml $mounts_rally_bootstrap.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: rally-bin\n          configMap:\n            name: rally-bin\n            defaultMode: 0555\n{{- dict \"enabled\" (ne $tlsSecret \"\") \"name\" $tlsSecret | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_rally_bootstrap.volumes }}{{ toYaml $mounts_rally_bootstrap.volumes | indent 8 }}{{ end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "rally/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"rally\" -}}\n\n{{- $_ := set $dbInitJob \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) }}\n\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "rally/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.repo_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\n{{- end }}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"rally\" -}}\n\n{{- $_ := $imageRepoSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.repo_sync\" . | fromYaml) }}\n\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "rally/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"rally-ks-endpoints\" }}\n{{ tuple $envAll \"ks_endpoints\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\n{{- $tlsSecret := \"\" -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $tlsSecret = .Values.secrets.tls.identity.api.internal | default \"\" -}}\n{{- end }}\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: rally-ks-endpoints\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n    helm.sh/hook: post-install,post-upgrade\n    helm.sh/hook-weight: \"-1\"\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"rally\" \"ks-endpoints\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"ks_endpoints\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n{{- range $key1, $osServiceType := tuple \"benchmark\" }}\n{{- range $key2, $osServiceEndPoint := tuple \"admin\" \"internal\" \"public\" }}\n        - name: {{ $osServiceType }}-ks-endpoints-{{ $osServiceEndPoint }}\n{{ tuple $envAll \"ks_endpoints\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.ks_endpoints | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/ks-endpoints.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ks-endpoints-sh\n              mountPath: /tmp/ks-endpoints.sh\n              subPath: ks-endpoints.sh\n              readOnly: true\n{{ dict \"enabled\" (ne $tlsSecret \"\") \"name\" $tlsSecret | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" $envAll.Values.secrets.identity.admin \"useCA\" (ne $tlsSecret \"\") }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n            - name: OS_SVC_ENDPOINT\n              value: {{ $osServiceEndPoint }}\n            - name: OS_SERVICE_NAME\n              value: {{ tuple $osServiceType $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_name_lookup\" }}\n            - name: OS_SERVICE_TYPE\n              value: {{ $osServiceType }}\n            - name: OS_SERVICE_ENDPOINT\n              value: {{ tuple $osServiceType $osServiceEndPoint \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" }}\n{{- end }}\n{{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: ks-endpoints-sh\n          configMap:\n            name: rally-bin\n            defaultMode: 0555\n{{- dict \"enabled\" (ne $tlsSecret \"\") \"name\" $tlsSecret | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "rally/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"rally-ks-service\" }}\n{{ tuple $envAll \"ks_service\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\n{{- $tlsSecret := \"\" -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $tlsSecret = .Values.secrets.tls.identity.api.internal | default \"\" -}}\n{{- end }}\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: rally-ks-service\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n    helm.sh/hook: post-install,post-upgrade\n    helm.sh/hook-weight: \"-2\"\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"rally\" \"ks-service\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"ks_service\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n{{- range $key1, $osServiceType := tuple \"benchmark\" }}\n        - name: {{ $osServiceType }}-ks-service-registration\n{{ tuple $envAll \"ks_service\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.ks_service | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/ks-service.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: ks-service-sh\n              mountPath: /tmp/ks-service.sh\n              subPath: ks-service.sh\n              readOnly: true\n{{ dict \"enabled\" (ne $tlsSecret \"\") \"name\" $tlsSecret | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" $envAll.Values.secrets.identity.admin \"useCA\" (ne $tlsSecret \"\") }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n            - name: OS_SERVICE_NAME\n              value: {{ tuple $osServiceType $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_name_lookup\" }}\n            - name: OS_SERVICE_TYPE\n              value: {{ $osServiceType }}\n{{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: ks-service-sh\n          configMap:\n            name: rally-bin\n            defaultMode: 0555\n{{- dict \"enabled\" (ne $tlsSecret \"\") \"name\" $tlsSecret | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "rally/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"rally\" -}}\n\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksUserJob \"tlsSecret\" .Values.secrets.tls.identity.api.internal -}}\n{{- end -}}\n\n{{- $_ := set $ksUserJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) }}\n\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "rally/templates/job-manage-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_manage_db }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"rally-manage-db\" }}\n{{ tuple $envAll \"manage_db\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: rally-manage-db\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n    helm.sh/hook: post-install,post-upgrade\n    helm.sh/hook-weight: \"2\"\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"rally\" \"manage-db\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"manage_db\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: rally-manage-db\n{{ tuple $envAll \"rally_db_sync\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.manage_db | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/manage-db.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: rally-bin\n              mountPath: /tmp/manage-db.sh\n              subPath: manage-db.sh\n              readOnly: true\n            - name: etcrally\n              mountPath: /etc/rally\n            - name: rally-etc\n              mountPath: /etc/rally/rally.conf\n              subPath: rally.conf\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: etcrally\n          emptyDir: {}\n        - name: rally-etc\n          secret:\n            secretName: rally-etc\n            defaultMode: 0444\n        - name: rally-bin\n          configMap:\n            name: rally-bin\n            defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "rally/templates/job-run-task.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_run_task }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"rally-run-task\" }}\n{{ tuple $envAll \"run_task\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: rally-run-task\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n    helm.sh/hook: post-install,post-upgrade\n    helm.sh/hook-weight: \"3\"\nspec:\n  backoffLimit: {{ .Values.jobs.run_tasks.backoffLimit }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"rally\" \"run-task\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: {{ .Values.jobs.run_tasks.restartPolicy }}\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"run_task\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: rally-run-task-init\n{{ tuple $envAll \"run_task\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n      {{ tuple $envAll $envAll.Values.pod.resources.jobs.run_task | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          securityContext:\n            runAsUser: 0\n          command:\n            - chown\n            - -R\n            - \"rally:\"\n            - /var/lib/rally/data\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: rally-reports\n              mountPath: /var/lib/rally/data\n      containers:\n        - name: rally-run-task\n{{ tuple $envAll \"run_task\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.run_task | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/run-task.sh\n          env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/rally/certs/ca.crt\"\n{{- end }}\n            - name: ENABLED_TESTS\n              value: {{ include \"helm-toolkit.utils.joinListWithComma\" .Values.enabled_tasks }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: rally-bin\n              mountPath: /tmp/run-task.sh\n              subPath: run-task.sh\n              readOnly: true\n            - name: etcrally\n              mountPath: /etc/rally\n            - name: rally-etc\n              mountPath: /etc/rally/rally.conf\n              subPath: rally.conf\n              readOnly: true\n            - name: rally-tasks\n              mountPath: /tasks/rally\n              readOnly: true\n            - name: heat-tasks-test-templates\n              mountPath: /tmp/tasks/test-templates\n              readOnly: true\n            - name: rally-reports\n              mountPath: /var/lib/rally/data\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.identity.api.internal \"path\" \"/etc/rally/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: etcrally\n          emptyDir: {}\n        - name: rally-etc\n          secret:\n            secretName: rally-etc\n            defaultMode: 0444\n        - name: rally-tasks\n          configMap:\n            name: rally-tasks\n            defaultMode: 0444\n        - name: rally-bin\n          configMap:\n            name: rally-bin\n            defaultMode: 0555\n        - name: heat-tasks-test-templates\n          configMap:\n            name: heat-tasks-test-templates\n        - name: rally-reports\n          persistentVolumeClaim:\n            claimName: {{ .Values.pvc.name }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.identity.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "rally/templates/pdb-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_api }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: rally-api\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.rally.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"rally\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "rally/templates/pvc-rally.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pvc_rally }}\n---\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: {{ .Values.pvc.name }}\nspec:\n  accessModes:\n    - ReadWriteOnce\n  resources:\n    requests:\n      storage: {{ .Values.pvc.requests.storage }}\n  storageClassName: {{ .Values.pvc.storage_class }}\n{{- end }}\n"
  },
  {
    "path": "rally/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"rally\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  DB_CONNECTION: {{ tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "rally/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"rally\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "rally/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "rally/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for rally.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nrelease_group: null\n\nlabels:\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  tags:\n    bootstrap: docker.io/xrally/xrally-openstack:2.0.0\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    db_init: docker.io/xrally/xrally-openstack:2.0.0\n    rally_db_sync: docker.io/xrally/xrally-openstack:2.0.0\n    run_task: docker.io/xrally/xrally-openstack:2.0.0\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    image_repo_sync: docker.io/docker:17.07.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nenabled_tasks:\n  # NOTE(alraddarla): not enabled\n  # - ceilometerNOTE(alraddarla): not enabled\n  - cinder\n  - glance\n  - heat\n  - keystone\n  - magnum\n  # NOTE(alraddarla): need a network setup in the gate to fully test\n  # - neutron\n  - nova\n  # NOTE(alraddarla): not enabled\n  # - swift\n\npod:\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  mounts:\n    rally_api:\n      init_container: null\n      rally_api:\n        volumeMounts:\n        volumes:\n    rally_bootstrap:\n      init_container: null\n      rally_bootstrap:\n        volumeMounts:\n        volumes:\n  resources:\n    enabled: false\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      manage_db:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      run_task:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nbootstrap:\n  enabled: false\n  script: |\n    openstack token issue\n\njobs:\n  run_tasks:\n    backoffLimit: 6\n    restartPolicy: OnFailure\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - rally-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    ks_endpoints:\n      jobs:\n        - rally-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    manage_db:\n      jobs:\n        - rally-ks-user\n        - rally-ks-endpoints\n        - rally-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n    run_task:\n      jobs:\n        - rally-manage-db\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\n# Names of secrets used by bootstrap and environmental checks\nsecrets:\n  identity:\n    admin: rally-keystone-admin\n    rally: rally-keystone-user\n  oslo_db:\n    admin: rally-db-admin\n    rally: rally-db-user\n  oci_image_registry:\n    rally: rally-oci-image-registry\n  tls:\n    identity:\n      api:\n        public: keystone-tls-public\n        internal: keystone-tls-api\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      rally:\n        username: rally\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      rally:\n        role: admin\n        region_name: RegionOne\n        username: rally\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  benchmark:\n    name: rally\n    hosts:\n      default: rally-api\n      public: rally\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v1\n    scheme:\n      default: http\n    port:\n      api:\n        default: 9312\n        public: 80\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n      rally:\n        username: rally\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /rally\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_cache:\n    auth:\n      keystone_authtoken:\n        secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n\npvc:\n  name: pvc-rally\n  requests:\n    storage: 2Gi\n  storage_class: general\n\nconf:\n  rally:\n    keystone_authtoken:\n      auth_type: password\n      auth_version: v3\n    rally_api:\n      bind_port: 9312\n    database:\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n  rally_tasks:\n    heat_tests:\n      autoscaling_group:\n        heat_template_version: '2013-05-23'\n        outputs:\n          scaling_url:\n            value:\n              get_attr:\n                - scaling_policy\n                - alarm_url\n        parameters:\n          flavor:\n            constraints:\n              - custom_constraint: nova.flavor\n            default: m1.tiny\n            type: string\n          image:\n            constraints:\n              - custom_constraint: glance.image\n            default: cirros-0.3.4-x86_64-uec\n            type: string\n          max_size:\n            constraints:\n              - range:\n                  min: 1\n            default: 5\n            type: number\n          scaling_adjustment:\n            default: 1\n            type: number\n        resources:\n          asg:\n            properties:\n              desired_capacity: 3\n              max_size:\n                get_param: max_size\n              min_size: 1\n              resource:\n                properties:\n                  flavor:\n                    get_param: flavor\n                  image:\n                    get_param: image\n                type: 'OS::Nova::Server'\n            type: 'OS::Heat::AutoScalingGroup'\n          scaling_policy:\n            properties:\n              adjustment_type: change_in_capacity\n              auto_scaling_group_id:\n                get_resource: asg\n              scaling_adjustment:\n                get_param: scaling_adjustment\n            type: 'OS::Heat::ScalingPolicy'\n      autoscaling_policy:\n        heat_template_version: '2013-05-23'\n        resources:\n          test_group:\n            properties:\n              desired_capacity: 0\n              max_size: 0\n              min_size: 0\n              resource:\n                type: 'OS::Heat::RandomString'\n            type: 'OS::Heat::AutoScalingGroup'\n          test_policy:\n            properties:\n              adjustment_type: change_in_capacity\n              auto_scaling_group_id:\n                get_resource: test_group\n              scaling_adjustment: 1\n            type: 'OS::Heat::ScalingPolicy'\n      default:\n        heat_template_version: '2014-10-16'\n      random_strings:\n        description: Test template for rally create-update-delete scenario\n        heat_template_version: '2014-10-16'\n        resources:\n          test_string_one:\n            properties:\n              length: 20\n            type: 'OS::Heat::RandomString'\n          test_string_two:\n            properties:\n              length: 20\n            type: 'OS::Heat::RandomString'\n      resource_group:\n        description: Test template for rally create-update-delete scenario\n        heat_template_version: '2014-10-16'\n        resources:\n          test_group:\n            properties:\n              count: 2\n              resource_def:\n                properties:\n                  length: 20\n                type: 'OS::Heat::RandomString'\n            type: 'OS::Heat::ResourceGroup'\n      resource_group_server_with_volume:\n        description: |\n          Test template that creates a resource group with servers and volumes.\n          The template allows to create a lot of nested stacks with standard configuration:\n          nova instance, cinder volume attached to that instance\n        heat_template_version: '2014-10-16'\n        parameters:\n          instance_availability_zone:\n            default: nova\n            description: The Availability Zone to launch the instance.\n            type: string\n          instance_flavor:\n            default: m1.tiny\n            description: Type of the instance to be created.\n            type: string\n          instance_image:\n            default: cirros-0.3.4-x86_64-uec\n            type: string\n          instance_volume_size:\n            constraints:\n              - range:\n                  max: 1024\n                  min: 1\n            default: 1\n            description: Size of volume to attach to instance\n            type: number\n          num_instances:\n            constraints:\n              - range:\n                  min: 1\n            description: number of instances that should be created in resource group\n            type: number\n        resources:\n          group_of_volumes:\n            properties:\n              count:\n                get_param: num_instances\n              resource_def:\n                properties:\n                  availability_zone:\n                    get_param: instance_availability_zone\n                  flavor:\n                    get_param: instance_flavor\n                  image:\n                    get_param: instance_image\n                  volume_size:\n                    get_param: instance_volume_size\n                type: templates/server-with-volume.yaml.template\n            type: 'OS::Heat::ResourceGroup'\n      resource_group_with_constraint:\n        description: Template for testing caching.\n        heat_template_version: '2013-05-23'\n        parameters:\n          count:\n            default: 40\n            type: number\n          delay:\n            default: 0.1\n            type: number\n        resources:\n          rg:\n            properties:\n              count:\n                get_param: count\n              resource_def:\n                properties:\n                  constraint_prop_secs:\n                    get_param: delay\n                type: 'OS::Heat::TestResource'\n            type: 'OS::Heat::ResourceGroup'\n      resource_group_with_outputs:\n        heat_template_version: '2013-05-23'\n        outputs:\n          val1:\n            value:\n              get_attr:\n                - rg\n                - resource.0.output\n          val10:\n            value:\n              get_attr:\n                - rg\n                - resource.9.output\n          val2:\n            value:\n              get_attr:\n                - rg\n                - resource.1.output\n          val3:\n            value:\n              get_attr:\n                - rg\n                - resource.2.output\n          val4:\n            value:\n              get_attr:\n                - rg\n                - resource.3.output\n          val5:\n            value:\n              get_attr:\n                - rg\n                - resource.4.output\n          val6:\n            value:\n              get_attr:\n                - rg\n                - resource.5.output\n          val7:\n            value:\n              get_attr:\n                - rg\n                - resource.6.output\n          val8:\n            value:\n              get_attr:\n                - rg\n                - resource.7.output\n          val9:\n            value:\n              get_attr:\n                - rg\n                - resource.8.output\n        parameters:\n          attr_wait_secs:\n            default: 0.5\n            type: number\n        resources:\n          rg:\n            properties:\n              count: 10\n              resource_def:\n                properties:\n                  attr_wait_secs:\n                    get_param: attr_wait_secs\n                type: 'OS::Heat::TestResource'\n            type: 'OS::Heat::ResourceGroup'\n      server_with_ports:\n        heat_template_version: '2013-05-23'\n        parameters:\n          cidr:\n            default: 11.11.11.0/24\n            type: string\n          flavor:\n            default: m1.tiny\n            type: string\n          image:\n            default: cirros-0.3.4-x86_64-uec\n            type: string\n          public_net:\n            default: public\n            type: string\n        resources:\n          port_security_group:\n            properties:\n              description: |\n                Default security group assigned to port. The neutron default group\n                is not used because neutron creates several groups with the same name=default\n                and nova cannot chooses which one should it use.\n              name: default_port_security_group\n            type: 'OS::Neutron::SecurityGroup'\n          private_net:\n            type: 'OS::Neutron::Net'\n          private_subnet:\n            properties:\n              cidr:\n                get_param: cidr\n              network:\n                get_resource: private_net\n            type: 'OS::Neutron::Subnet'\n          router:\n            properties:\n              external_gateway_info:\n                network:\n                  get_param: public_net\n            type: 'OS::Neutron::Router'\n          router_interface:\n            properties:\n              router_id:\n                get_resource: router\n              subnet_id:\n                get_resource: private_subnet\n            type: 'OS::Neutron::RouterInterface'\n          server:\n            properties:\n              flavor:\n                get_param: flavor\n              image:\n                get_param: image\n              networks:\n                - port:\n                    get_resource: server_port\n            type: 'OS::Nova::Server'\n          server_port:\n            properties:\n              fixed_ips:\n                - subnet:\n                    get_resource: private_subnet\n              network:\n                get_resource: private_net\n              security_groups:\n                - get_resource: port_security_group\n            type: 'OS::Neutron::Port'\n      server_with_volume:\n        heat_template_version: '2013-05-23'\n        parameters:\n          availability_zone:\n            default: nova\n            description: The Availability Zone to launch the instance.\n            type: string\n          flavor:\n            default: m1.tiny\n            type: string\n          image:\n            default: cirros-0.3.4-x86_64-uec\n            type: string\n          volume_size:\n            constraints:\n              - description: must be between 1 and 1024 Gb.\n                range:\n                  max: 1024\n                  min: 1\n            default: 1\n            description: Size of the volume to be created.\n            type: number\n        resources:\n          cinder_volume:\n            properties:\n              availability_zone:\n                get_param: availability_zone\n              size:\n                get_param: volume_size\n            type: 'OS::Cinder::Volume'\n          server:\n            properties:\n              flavor:\n                get_param: flavor\n              image:\n                get_param: image\n            type: 'OS::Nova::Server'\n          volume_attachment:\n            properties:\n              instance_uuid:\n                get_resource: server\n              mountpoint: /dev/vdc\n              volume_id:\n                get_resource: cinder_volume\n            type: 'OS::Cinder::VolumeAttachment'\n      updated_random_strings_add:\n        description: |\n          Test template for create-update-delete-stack scenario in rally. The\n          template updates the stack defined by random-strings.yaml.template with additional\n          resource.\n        heat_template_version: '2014-10-16'\n        resources:\n          test_string_one:\n            properties:\n              length: 20\n            type: 'OS::Heat::RandomString'\n          test_string_three:\n            properties:\n              length: 20\n            type: 'OS::Heat::RandomString'\n          test_string_two:\n            properties:\n              length: 20\n            type: 'OS::Heat::RandomString'\n      updated_random_strings_delete:\n        description: |\n          Test template for create-update-delete-stack scenario in rally. The\n          template deletes one resource from the stack defined by random-strings.yaml.template.\n        heat_template_version: '2014-10-16'\n        resources:\n          test_string_one:\n            properties:\n              length: 20\n            type: 'OS::Heat::RandomString'\n      updated_random_strings_replace:\n        description: |\n          Test template for create-update-delete-stack scenario in rally. The\n          template deletes one resource from the stack defined by random-strings.yaml.template\n          and re-creates it with the updated parameters (so-called update-replace). That happens\n          because some parameters cannot be changed without resource re-creation. The template\n          allows to measure performance of update-replace operation.\n        heat_template_version: '2014-10-16'\n        resources:\n          test_string_one:\n            properties:\n              length: 20\n            type: 'OS::Heat::RandomString'\n          test_string_two:\n            properties:\n              length: 40\n            type: 'OS::Heat::RandomString'\n      updated_resource_group_increase:\n        description: |\n          Test template for create-update-delete-stack scenario in rally. The\n          template updates one resource from the stack defined by resource-group.yaml.template\n          and adds children resources to that resource.\n        heat_template_version: '2014-10-16'\n        resources:\n          test_group:\n            properties:\n              count: 3\n              resource_def:\n                properties:\n                  length: 20\n                type: 'OS::Heat::RandomString'\n            type: 'OS::Heat::ResourceGroup'\n      updated_resource_group_reduce:\n        description: |\n          Test template for create-update-delete-stack scenario in rally.\n          The template updates one resource from the stack defined by resource-group.yaml.template\n          and deletes children resources from that resource.\n        heat_template_version: '2014-10-16'\n        resources:\n          test_group:\n            properties:\n              count: 1\n              resource_def:\n                properties:\n                  length: 20\n                type: 'OS::Heat::RandomString'\n            type: 'OS::Heat::ResourceGroup'\n    authenticate_task:\n      Authenticate.keystone:\n        -\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 50\n          runner:\n            concurrency: 5\n            times: 100\n            type: constant\n      # NOTE(alraddarla): not enabled yet\n      # Authenticate.validate_ceilometer:\n      #   -\n      #     args:\n      #       repetitions: 2\n      #     context:\n      #       users:\n      #         tenants: 3\n      #         users_per_tenant: 5\n      #     runner:\n      #       concurrency: 5\n      #       times: 10\n      #       type: constant\n      Authenticate.validate_cinder:\n        -\n          args:\n            repetitions: 2\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 5\n          runner:\n            concurrency: 5\n            times: 10\n            type: constant\n      Authenticate.validate_glance:\n        -\n          args:\n            repetitions: 2\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 5\n          runner:\n            concurrency: 5\n            times: 10\n            type: constant\n      Authenticate.validate_heat:\n        -\n          args:\n            repetitions: 2\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 5\n          runner:\n            concurrency: 5\n            times: 10\n            type: constant\n      Authenticate.validate_neutron:\n        -\n          args:\n            repetitions: 2\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 5\n          runner:\n            concurrency: 5\n            times: 10\n            type: constant\n      Authenticate.validate_nova:\n        -\n          args:\n            repetitions: 2\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 5\n          runner:\n            concurrency: 5\n            times: 10\n            type: constant\n    ceilometer_task:\n      CeilometerAlarms.create_alarm:\n        -\n          args:\n            alarm_actions:\n              - \"http://localhost:8776/alarm\"\n            insufficient_data_actions:\n              - \"http://localhost:8776/notok\"\n            meter_name: ram_util\n            ok_actions:\n              - \"http://localhost:8776/ok\"\n            statistic: avg\n            threshold: 10.0\n            type: threshold\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 1\n            times: 10\n            type: constant\n      CeilometerAlarms.create_alarm_and_get_history:\n        -\n          args:\n            alarm_actions:\n              - \"http://localhost:8776/alarm\"\n            insufficient_data_actions:\n              - \"http://localhost:8776/notok\"\n            meter_name: ram_util\n            ok_actions:\n              - \"http://localhost:8776/ok\"\n            state: ok\n            statistic: avg\n            threshold: 10.0\n            type: threshold\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 5\n            times: 10\n            type: constant\n      CeilometerAlarms.create_and_delete_alarm:\n        -\n          args:\n            alarm_actions:\n              - \"http://localhost:8776/alarm\"\n            insufficient_data_actions:\n              - \"http://localhost:8776/notok\"\n            meter_name: ram_util\n            ok_actions:\n              - \"http://localhost:8776/ok\"\n            statistic: avg\n            threshold: 10.0\n            type: threshold\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 1\n            times: 10\n            type: constant\n      CeilometerAlarms.create_and_get_alarm:\n        -\n          args:\n            alarm_actions:\n              - \"http://localhost:8776/alarm\"\n            insufficient_data_actions:\n              - \"http://localhost:8776/notok\"\n            meter_name: ram_util\n            ok_actions:\n              - \"http://localhost:8776/ok\"\n            statistic: avg\n            threshold: 10.0\n            type: threshold\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      CeilometerAlarms.create_and_list_alarm:\n        -\n          args:\n            alarm_actions:\n              - \"http://localhost:8776/alarm\"\n            insufficient_data_actions:\n              - \"http://localhost:8776/notok\"\n            meter_name: ram_util\n            ok_actions:\n              - \"http://localhost:8776/ok\"\n            statistic: avg\n            threshold: 10.0\n            type: threshold\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 1\n            times: 10\n            type: constant\n      CeilometerAlarms.create_and_update_alarm:\n        -\n          args:\n            alarm_actions:\n              - \"http://localhost:8776/alarm\"\n            insufficient_data_actions:\n              - \"http://localhost:8776/notok\"\n            meter_name: ram_util\n            ok_actions:\n              - \"http://localhost:8776/ok\"\n            statistic: avg\n            threshold: 10.0\n            type: threshold\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 1\n            times: 10\n            type: constant\n      CeilometerAlarms.list_alarms:\n        -\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 1\n            times: 10\n            type: constant\n      CeilometerEvents.create_user_and_get_event:\n        -\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 10\n            times: 10\n            type: constant\n      CeilometerEvents.create_user_and_list_event_types:\n        -\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 10\n            times: 10\n            type: constant\n      CeilometerEvents.create_user_and_list_events:\n        -\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 10\n            times: 10\n            type: constant\n      CeilometerMeters.list_matched_meters:\n        -\n          args:\n            filter_by_project_id: true\n            filter_by_resource_id: true\n            filter_by_user_id: true\n            limit: 50\n            metadata_query:\n              status: terminated\n          context:\n            ceilometer:\n              counter_name: benchmark_meter\n              counter_type: gauge\n              counter_unit: \"%\"\n              counter_volume: 100\n              metadata_list:\n                -\n                  deleted: \"false\"\n                  name: \"rally benchmark on\"\n                  status: active\n                -\n                  deleted: \"true\"\n                  name: \"rally benchmark off\"\n                  status: terminated\n              resources_per_tenant: 100\n              samples_per_resource: 100\n              timestamp_interval: 10\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 1\n            times: 10\n            type: constant\n      CeilometerMeters.list_meters:\n        -\n          args:\n            limit: 50\n            metadata_query:\n              status: terminated\n          context:\n            ceilometer:\n              counter_name: benchmark_meter\n              counter_type: gauge\n              counter_unit: \"%\"\n              counter_volume: 100\n              metadata_list:\n                -\n                  deleted: \"false\"\n                  name: \"rally benchmark on\"\n                  status: active\n                -\n                  deleted: \"true\"\n                  name: \"rally benchmark off\"\n                  status: terminated\n              resources_per_tenant: 100\n              samples_per_resource: 100\n              timestamp_interval: 10\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 1\n            times: 10\n            type: constant\n      CeilometerQueries.create_and_query_alarm_history:\n        -\n          args:\n            alarm_actions:\n              - \"http://localhost:8776/alarm\"\n            insufficient_data_actions:\n              - \"http://localhost:8776/notok\"\n            limit: ~\n            meter_name: ram_util\n            ok_actions:\n              - \"http://localhost:8776/ok\"\n            orderby: ~\n            statistic: avg\n            threshold: 10.0\n            type: threshold\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      CeilometerQueries.create_and_query_alarms:\n        -\n          args:\n            alarm_actions:\n              - \"http://localhost:8776/alarm\"\n            filter:\n              and:\n                -\n                  ? \"!=\"\n                  :\n                    state: dummy_state\n                -\n                  ? \"=\"\n                  :\n                    type: threshold\n            insufficient_data_actions:\n              - \"http://localhost:8776/notok\"\n            limit: 10\n            meter_name: ram_util\n            ok_actions:\n              - \"http://localhost:8776/ok\"\n            orderby: ~\n            statistic: avg\n            threshold: 10.0\n            type: threshold\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      CeilometerQueries.create_and_query_samples:\n        -\n          args:\n            counter_name: cpu_util\n            counter_type: gauge\n            counter_unit: instance\n            counter_volume: 1.0\n            filter:\n              ? \"=\"\n              :\n                counter_unit: instance\n            limit: 10\n            orderby: ~\n            resource_id: resource_id\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      CeilometerResource.get_tenant_resources:\n        -\n          context:\n            ceilometer:\n              counter_name: cpu_util\n              counter_type: gauge\n              counter_unit: instance\n              counter_volume: 1.0\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 5\n            times: 10\n            type: constant\n      CeilometerResource.list_matched_resources:\n        -\n          args:\n            filter_by_project_id: true\n            filter_by_user_id: true\n            limit: 50\n            metadata_query:\n              status: terminated\n          context:\n            ceilometer:\n              counter_name: benchmark_meter\n              counter_type: gauge\n              counter_unit: \"%\"\n              counter_volume: 100\n              metadata_list:\n                -\n                  deleted: \"false\"\n                  name: \"rally benchmark on\"\n                  status: active\n                -\n                  deleted: \"true\"\n                  name: \"rally benchmark off\"\n                  status: terminated\n              resources_per_tenant: 100\n              samples_per_resource: 100\n              timestamp_interval: 10\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 1\n            times: 10\n            type: constant\n      CeilometerResource.list_resources:\n        -\n          args:\n            limit: 50\n            metadata_query:\n              status: terminated\n          context:\n            ceilometer:\n              counter_name: benchmark_meter\n              counter_type: gauge\n              counter_unit: \"%\"\n              counter_volume: 100\n              metadata_list:\n                -\n                  deleted: \"false\"\n                  name: \"rally benchmark on\"\n                  status: active\n                -\n                  deleted: \"true\"\n                  name: \"rally benchmark off\"\n                  status: terminated\n              resources_per_tenant: 100\n              samples_per_resource: 100\n              timestamp_interval: 10\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 1\n            times: 10\n            type: constant\n      CeilometerSamples.list_matched_samples:\n        -\n          args:\n            filter_by_project_id: true\n            filter_by_resource_id: true\n            filter_by_user_id: true\n            limit: 50\n            metadata_query:\n              status: not_active\n          context:\n            ceilometer:\n              counter_name: cpu_util\n              counter_type: gauge\n              counter_unit: instance\n              counter_volume: 1.0\n              metadata_list:\n                -\n                  created_at: \"2015-09-04T12:34:19.000000\"\n                  deleted: \"False\"\n                  name: fake_resource\n                  status: active\n                -\n                  created_at: \"2015-09-10T06:55:12.000000\"\n                  deleted: \"False\"\n                  name: fake_resource_1\n                  status: not_active\n              resources_per_tenant: 100\n              samples_per_resource: 100\n              timestamp_interval: 60\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      CeilometerSamples.list_samples:\n        -\n          args:\n            limit: 50\n            metadata_query:\n              status: not_active\n          context:\n            ceilometer:\n              batch_size: 5\n              counter_name: cpu_util\n              counter_type: gauge\n              counter_unit: instance\n              counter_volume: 1.0\n              metadata_list:\n                -\n                  created_at: \"2015-09-04T12:34:19.000000\"\n                  deleted: \"False\"\n                  name: fake_resource\n                  status: active\n                -\n                  created_at: \"2015-09-10T06:55:12.000000\"\n                  deleted: \"False\"\n                  name: fake_resource_1\n                  status: not_active\n              resources_per_tenant: 100\n              samples_per_resource: 100\n              timestamp_interval: 60\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      CeilometerStats.create_meter_and_get_stats:\n        -\n          args:\n            counter_type: cumulative\n            counter_unit: \"\"\n            counter_volume: 1.0\n            resource_id: resource-id\n            user_id: user-id\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 5\n            times: 200\n            type: constant\n      CeilometerStats.get_stats:\n        -\n          args:\n            filter_by_project_id: true\n            filter_by_resource_id: true\n            filter_by_user_id: true\n            groupby: resource_id\n            metadata_query:\n              status: terminated\n            meter_name: benchmark_meter\n            period: 300\n          context:\n            ceilometer:\n              counter_name: benchmark_meter\n              counter_type: gauge\n              counter_unit: \"%\"\n              counter_volume: 100\n              metadata_list:\n                -\n                  deleted: \"false\"\n                  name: \"rally benchmark on\"\n                  status: active\n                -\n                  deleted: \"true\"\n                  name: \"rally benchmark off\"\n                  status: terminated\n              resources_per_tenant: 100\n              samples_per_resource: 100\n              timestamp_interval: 10\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 1\n            times: 10\n            type: constant\n      CeilometerTraits.create_user_and_list_trait_descriptions:\n        -\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 10\n            times: 10\n            type: constant\n      CeilometerTraits.create_user_and_list_traits:\n        -\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 10\n            times: 10\n            type: constant\n    cinder_task:\n      CinderVolumeTypes.create_and_delete_volume_type:\n        -\n          args: {}\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 5\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      CinderVolumeTypes.create_and_list_encryption_type:\n        -\n          args:\n            specs:\n              cipher: aes-xts-plain64\n              control_location: front-end\n              key_size: 512\n              provider: LuksEncryptor\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 4\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      CinderVolumeTypes.create_volume_type_and_encryption_type:\n        -\n          args:\n            specs:\n              cipher: aes-xts-plain64\n              control_location: front-end\n              key_size: 512\n              provider: LuksEncryptor\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 5\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      CinderVolumes.create_and_accept_transfer:\n        -\n          args:\n            size: 1\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 3\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      CinderVolumes.create_and_attach_volume:\n        -\n          args:\n            create_volume_params:\n              availability_zone: nova\n            flavor:\n              name: m1.tiny\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n            size: 10\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 5\n            type: constant\n        -\n          args:\n            create_volume_params:\n              availability_zone: nova\n            flavor:\n              name: m1.tiny\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n            size:\n              max: 5\n              min: 1\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 5\n            type: constant\n      CinderVolumes.create_and_delete_snapshot:\n        -\n          args:\n            force: false\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n            volumes:\n              size: 1\n          runner:\n            concurrency: 2\n            times: 3\n            type: constant\n      CinderVolumes.create_and_delete_volume:\n        -\n          args:\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n            size: 1\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 2\n            type: constant\n      CinderVolumes.create_and_extend_volume:\n        -\n          args:\n            new_size: 2\n            size: 1\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 2\n            type: constant\n        -\n          args:\n            new_size:\n              max: 10\n              min: 6\n            size:\n              max: 5\n              min: 1\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 2\n            type: constant\n      CinderVolumes.create_and_get_volume:\n        -\n          args:\n            size: 1\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 5\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n        -\n          args:\n            size:\n              max: 5\n              min: 1\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 5\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      CinderVolumes.create_and_list_snapshots:\n        -\n          args:\n            detailed: true\n            force: false\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n            volumes:\n              size: 1\n          runner:\n            concurrency: 2\n            times: 2\n            type: constant\n      CinderVolumes.create_and_list_volume:\n        -\n          args:\n            detailed: true\n            size: 1\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 3\n            type: constant\n        -\n          args:\n            detailed: true\n            size:\n              max: 5\n              min: 1\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 3\n            type: constant\n      CinderVolumes.create_and_list_volume_backups:\n        -\n          args:\n            create_backup_kwargs: {}\n            create_volume_kwargs: {}\n            detailed: true\n            do_delete: true\n            size: 1\n          context:\n            roles:\n              - member\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 2\n            type: constant\n      CinderVolumes.create_and_restore_volume_backup:\n        -\n          args:\n            create_backup_kwargs: {}\n            create_volume_kwargs: {}\n            do_delete: true\n            size: 1\n          context:\n            roles:\n              - member\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 2\n            type: constant\n      CinderVolumes.create_and_upload_volume_to_image:\n        -\n          args:\n            container_format: bare\n            disk_format: raw\n            do_delete: true\n            force: false\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n            size: 1\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 3\n            type: constant\n        -\n          args:\n            container_format: bare\n            disk_format: raw\n            do_delete: true\n            force: false\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n            size:\n              max: 5\n              min: 1\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 3\n            type: constant\n      CinderVolumes.create_from_volume_and_delete_volume:\n        -\n          args:\n            size: 1\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n            volumes:\n              size: 1\n          runner:\n            concurrency: 2\n            times: 2\n            type: constant\n        -\n          args:\n            size:\n              max: 5\n              min: 1\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n            volumes:\n              size: 1\n          runner:\n            concurrency: 2\n            times: 2\n            type: constant\n      CinderVolumes.create_nested_snapshots_and_attach_volume:\n        -\n          args:\n            nested_level: 5\n            size:\n              max: 5\n              min: 1\n          context:\n            servers:\n              flavor:\n                name: m1.tiny\n              image:\n                name: cirros-0.6.2-x86_64-disk.img\n              servers_per_tenant: 2\n            users:\n              tenants: 2\n              users_per_tenant: 1\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n      CinderVolumes.create_snapshot_and_attach_volume:\n        -\n          args:\n            size:\n              max: 5\n              min: 1\n            volume_type: false\n          context:\n            servers:\n              flavor:\n                name: m1.tiny\n              image:\n                name: cirros-0.6.2-x86_64-disk.img\n              servers_per_tenant: 2\n            users:\n              tenants: 2\n              users_per_tenant: 1\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 4\n            type: constant\n        -\n          args:\n            size:\n              max: 5\n              min: 1\n            volume_type: true\n          context:\n            servers:\n              flavor:\n                name: m1.tiny\n              image:\n                name: cirros-0.6.2-x86_64-disk.img\n              servers_per_tenant: 2\n            users:\n              tenants: 2\n              users_per_tenant: 1\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 4\n            type: constant\n      CinderVolumes.create_volume_and_clone:\n        -\n          args:\n            size: 1\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 3\n            type: constant\n        -\n          args:\n            nested_level: 3\n            size:\n              max: 5\n              min: 1\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 3\n            type: constant\n      CinderVolumes.create_volume_backup:\n        -\n          args:\n            create_backup_kwargs: {}\n            create_volume_kwargs: {}\n            do_delete: true\n            size: 1\n          context:\n            roles:\n              - member\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 2\n            type: constant\n      CinderVolumes.create_volume_from_snapshot:\n        -\n          args:\n            do_delete: true\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n            volumes:\n              size: 1\n          runner:\n            concurrency: 2\n            times: 3\n            type: constant\n      CinderVolumes.list_transfers:\n        -\n          args:\n            detailed: true\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 3\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      CinderVolumes.list_volumes:\n        -\n          args:\n            detailed: true\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n            volumes:\n              size: 1\n              volumes_per_tenant: 4\n          runner:\n            concurrency: 1\n            times: 100\n            type: constant\n      CinderVolumes.modify_volume_metadata:\n        -\n          args: {}\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n            volumes:\n              size: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      CinderVolumeBackups.create_incremental_volume_backup:\n        -\n          args:\n            create_backup_kwargs: {}\n            create_volume_kwargs: {}\n            size: 1\n          context:\n            roles:\n              - admin\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 5\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      CinderVolumes.create_and_update_volume:\n        -\n          args:\n            create_volume_kwargs: {}\n            size: 1\n            update_volume_kwargs:\n              display_description: desc_updated\n              display_name: name_updated\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 3\n            type: constant\n      CinderVolumes.create_volume_and_update_readonly_flag:\n        -\n          args:\n            read_only: true\n            size: 1\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 3\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      CinderVolumes.list_types:\n        -\n          args:\n            is_public: true\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 3\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n    glance_task:\n      GlanceImages.create_and_delete_image:\n        -\n          args:\n            container_format: bare\n            disk_format: qcow2\n            image_location: \"http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img\"\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 3\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      GlanceImages.create_and_list_image:\n        -\n          args:\n            container_format: bare\n            disk_format: qcow2\n            image_location: \"http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img\"\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 10\n            type: constant\n      GlanceImages.create_image_and_boot_instances:\n        -\n          args:\n            container_format: bare\n            disk_format: qcow2\n            flavor:\n              name: m1.tiny\n            image_location: \"http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img\"\n            number_instances: 2\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 5\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      GlanceImages.list_images:\n        -\n          context:\n            images:\n              image_container: bare\n              image_type: qcow2\n              image_url: \"http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img\"\n              images_per_tenant: 4\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 1\n            times: 10\n            type: constant\n    heat_task:\n      HeatStacks.create_and_delete_stack:\n        -\n          args:\n            template_path: /tmp/tasks/test-templates/server-with-ports.yaml\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 3\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      HeatStacks.create_and_list_stack:\n        -\n          args:\n            template_path: /tmp/tasks/test-templates/default.yaml\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 10\n            type: constant\n      HeatStacks.create_check_delete_stack:\n        -\n          args:\n            template_path: /tmp/tasks/test-templates/random-strings.yaml\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 3\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      HeatStacks.create_snapshot_restore_delete_stack:\n        -\n          args:\n            template_path: /tmp/tasks/test-templates/random-strings.yaml\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      HeatStacks.create_stack_and_list_output:\n        -\n          args:\n            template_path: /tmp/tasks/test-templates/resource-group-with-outputs.yaml\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 5\n            type: constant\n      HeatStacks.create_stack_and_list_output_via_API:\n        -\n          args:\n            template_path: /tmp/tasks/test-templates/resource-group-with-outputs.yaml\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 5\n            type: constant\n      HeatStacks.create_stack_and_scale:\n        -\n          args:\n            delta: 1\n            output_key: scaling_url\n            template_path: /tmp/tasks/test-templates/autoscaling-group.yaml\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 3\n            type: constant\n      HeatStacks.create_stack_and_show_output:\n        -\n          args:\n            output_key: val1\n            template_path: /tmp/tasks/test-templates/resource-group-with-outputs.yaml\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 5\n            type: constant\n      HeatStacks.create_stack_and_show_output_via_API:\n        -\n          args:\n            output_key: val1\n            template_path: /tmp/tasks/test-templates/resource-group-with-outputs.yaml\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 1\n            times: 5\n            type: constant\n      HeatStacks.create_suspend_resume_delete_stack:\n        -\n          args:\n            template_path: /tmp/tasks/test-templates/random-strings.yaml\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      HeatStacks.create_update_delete_stack:\n        -\n          args:\n            template_path: /tmp/tasks/test-templates/resource-group.yaml\n            updated_template_path: /tmp/tasks/test-templates/updated-resource-group-reduce.yaml\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 3\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      HeatStacks.list_stacks_and_events:\n        -\n          context:\n            stacks:\n              resources_per_stack: 10\n              stacks_per_tenant: 2\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 10\n            type: constant\n      HeatStacks.list_stacks_and_resources:\n        -\n          context:\n            stacks:\n              resources_per_stack: 10\n              stacks_per_tenant: 2\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 10\n            type: constant\n    keystone_task:\n      KeystoneBasic.add_and_remove_user_role:\n        -\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      KeystoneBasic.authenticate_user_and_validate_token:\n        -\n          args: {}\n          runner:\n            concurrency: 5\n            times: 20\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      KeystoneBasic.create_add_and_list_user_roles:\n        -\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      KeystoneBasic.create_and_delete_ec2credential:\n        -\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 5\n            times: 10\n            type: constant\n      KeystoneBasic.create_and_delete_role:\n        -\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      KeystoneBasic.create_and_delete_service:\n        -\n          args:\n            description: test_description\n            service_type: Rally_test_type\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      KeystoneBasic.create_and_get_role:\n        -\n          args: {}\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      KeystoneBasic.create_and_list_ec2credentials:\n        -\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 5\n            times: 10\n            type: constant\n      KeystoneBasic.create_and_list_services:\n        -\n          args:\n            description: test_description\n            service_type: Rally_test_type\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      KeystoneBasic.create_and_list_tenants:\n        -\n          args: {}\n          runner:\n            concurrency: 1\n            times: 10\n            type: constant\n      KeystoneBasic.create_and_list_users:\n        -\n          args: {}\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      KeystoneBasic.create_delete_user:\n        -\n          args: {}\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      KeystoneBasic.create_tenant:\n        -\n          args: {}\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      KeystoneBasic.create_tenant_with_users:\n        -\n          args:\n            users_per_tenant: 10\n          runner:\n            concurrency: 10\n            times: 10\n            type: constant\n      KeystoneBasic.create_update_and_delete_tenant:\n        -\n          args: {}\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      KeystoneBasic.create_user:\n        -\n          args: {}\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      KeystoneBasic.create_user_set_enabled_and_delete:\n        -\n          args:\n            enabled: true\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n        -\n          args:\n            enabled: false\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      KeystoneBasic.create_user_update_password:\n        -\n          args: {}\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      KeystoneBasic.get_entities:\n        -\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n    magnum_task:\n      MagnumClusterTemplates.list_cluster_templates:\n        -\n          context:\n            cluster_templates:\n              coe: kubernetes\n              dns_nameserver: \"8.8.8.8\"\n              docker_volume_size: 5\n              external_network_id: public\n              flavor_id: m1.small\n              image_id: fedora-atomic-latest\n              network_driver: flannel\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n        -\n          context:\n            cluster_templates:\n              coe: swarm\n              dns_nameserver: \"8.8.8.8\"\n              docker_volume_size: 5\n              external_network_id: public\n              flavor_id: m1.small\n              image_id: fedora-atomic-latest\n              network_driver: docker\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n        -\n          context:\n            cluster_templates:\n              coe: mesos\n              dns_nameserver: \"8.8.8.8\"\n              external_network_id: public\n              flavor_id: m1.small\n              image_id: ubuntu-mesos\n              network_driver: docker\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n      MagnumClusters.create_and_list_clusters:\n        -\n          args:\n            node_count: 1\n          context:\n            cluster_templates:\n              coe: kubernetes\n              dns_nameserver: \"8.8.8.8\"\n              docker_volume_size: 5\n              external_network_id: public\n              flavor_id: m1.small\n              image_id: fedora-atomic-latest\n              network_driver: flannel\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n        -\n          args:\n            node_count: 1\n          context:\n            cluster_templates:\n              coe: swarm\n              dns_nameserver: \"8.8.8.8\"\n              docker_volume_size: 5\n              external_network_id: public\n              flavor_id: m1.small\n              image_id: fedora-atomic-latest\n              network_driver: docker\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n        -\n          args:\n            node_count: 1\n          context:\n            cluster_templates:\n              coe: mesos\n              dns_nameserver: \"8.8.8.8\"\n              external_network_id: public\n              flavor_id: m1.small\n              image_id: ubuntu-mesos\n              network_driver: docker\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n      MagnumClusters.list_clusters:\n        -\n          context:\n            cluster_templates:\n              coe: kubernetes\n              dns_nameserver: \"8.8.8.8\"\n              docker_volume_size: 5\n              external_network_id: public\n              flavor_id: m1.small\n              image_id: fedora-atomic-latest\n              network_driver: flannel\n            clusters:\n              node_count: 2\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n        -\n          context:\n            cluster_templates:\n              coe: swarm\n              dns_nameserver: \"8.8.8.8\"\n              docker_volume_size: 5\n              external_network_id: public\n              flavor_id: m1.small\n              image_id: fedora-atomic-latest\n              network_driver: docker\n            clusters:\n              node_count: 2\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n        -\n          context:\n            cluster_templates:\n              coe: mesos\n              dns_nameserver: \"8.8.8.8\"\n              external_network_id: public\n              flavor_id: m1.small\n              image_id: ubuntu-mesos\n              network_driver: docker\n            clusters:\n              node_count: 2\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n    neutron_task:\n      NeutronNetworks.create_and_delete_floating_ips:\n        -\n          args:\n            floating_ip_args: {}\n            floating_network: public\n          context:\n            quotas:\n              neutron:\n                floatingip: -1\n            users:\n              tenants: 2\n              users_per_tenant: 3\n          runner:\n            concurrency: 5\n            times: 10\n            type: constant\n      NeutronNetworks.create_and_delete_networks:\n        -\n          args:\n            network_create_args: {}\n          context:\n            quotas:\n              neutron:\n                network: -1\n            users:\n              tenants: 3\n              users_per_tenant: 3\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      NeutronNetworks.create_and_delete_ports:\n        -\n          args:\n            network_create_args: {}\n            port_create_args: {}\n            ports_per_network: 10\n          context:\n            network: {}\n            quotas:\n              neutron:\n                network: -1\n                port: -1\n            users:\n              tenants: 3\n              users_per_tenant: 3\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      NeutronNetworks.create_and_delete_routers:\n        -\n          args:\n            network_create_args: {}\n            router_create_args: {}\n            subnet_cidr_start: 1.1.0.0/30\n            subnet_create_args: {}\n            subnets_per_network: 2\n          context:\n            network: {}\n            quotas:\n              neutron:\n                network: -1\n                router: -1\n                subnet: -1\n            users:\n              tenants: 3\n              users_per_tenant: 3\n          runner:\n            concurrency: 10\n            times: 30\n            type: constant\n      NeutronNetworks.create_and_delete_subnets:\n        -\n          args:\n            network_create_args: {}\n            subnet_cidr_start: 1.1.0.0/30\n            subnet_create_args: {}\n            subnets_per_network: 2\n          context:\n            network: {}\n            quotas:\n              neutron:\n                network: -1\n                subnet: -1\n            users:\n              tenants: 3\n              users_per_tenant: 3\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      NeutronNetworks.create_and_list_floating_ips:\n        -\n          args:\n            floating_ip_args: {}\n            floating_network: public\n          context:\n            quotas:\n              neutron:\n                floatingip: -1\n            users:\n              tenants: 2\n              users_per_tenant: 3\n          runner:\n            concurrency: 5\n            times: 10\n            type: constant\n      NeutronNetworks.create_and_list_networks:\n        -\n          args:\n            network_create_args: {}\n          context:\n            quotas:\n              neutron:\n                network: -1\n            users:\n              tenants: 3\n              users_per_tenant: 3\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n        -\n          args:\n            network_create_args:\n              ? \"provider:network_type\"\n              : vxlan\n          context:\n            quotas:\n              neutron:\n                network: -1\n            roles:\n              - admin\n            users:\n              tenants: 3\n              users_per_tenant: 3\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NeutronNetworks.create_and_list_ports:\n        -\n          args:\n            network_create_args: {}\n            port_create_args: {}\n            ports_per_network: 10\n          context:\n            network: {}\n            quotas:\n              neutron:\n                network: -1\n                port: -1\n            users:\n              tenants: 3\n              users_per_tenant: 3\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      NeutronNetworks.create_and_list_routers:\n        -\n          args:\n            network_create_args: {}\n            router_create_args: {}\n            subnet_cidr_start: 1.1.0.0/30\n            subnet_create_args: {}\n            subnets_per_network: 2\n          context:\n            network: {}\n            quotas:\n              neutron:\n                network: -1\n                router: -1\n                subnet: -1\n            users:\n              tenants: 3\n              users_per_tenant: 3\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      NeutronNetworks.create_and_list_subnets:\n        -\n          args:\n            network_create_args: {}\n            subnet_cidr_start: 1.1.0.0/30\n            subnet_create_args: {}\n            subnets_per_network: 2\n          context:\n            network: {}\n            quotas:\n              neutron:\n                network: -1\n                subnet: -1\n            users:\n              tenants: 2\n              users_per_tenant: 3\n          runner:\n            concurrency: 5\n            times: 10\n            type: constant\n      NeutronNetworks.create_and_show_network:\n        -\n          args:\n            network_create_args: {}\n          context:\n            quotas:\n              neutron:\n                network: -1\n            users:\n              tenants: 3\n              users_per_tenant: 3\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NeutronNetworks.create_and_update_networks:\n        -\n          args:\n            network_create_args: {}\n            network_update_args:\n              admin_state_up: false\n              name: _updated\n          context:\n            quotas:\n              neutron:\n                network: -1\n            users:\n              tenants: 2\n              users_per_tenant: 3\n          runner:\n            concurrency: 5\n            times: 10\n            type: constant\n      NeutronNetworks.create_and_update_ports:\n        -\n          args:\n            network_create_args: {}\n            port_create_args: {}\n            port_update_args:\n              admin_state_up: false\n              device_id: dummy_id\n              device_owner: dummy_owner\n              name: _port_updated\n            ports_per_network: 5\n          context:\n            network: {}\n            quotas:\n              neutron:\n                network: -1\n                port: -1\n            users:\n              tenants: 2\n              users_per_tenant: 3\n          runner:\n            concurrency: 5\n            times: 10\n            type: constant\n      NeutronNetworks.create_and_update_routers:\n        -\n          args:\n            network_create_args: {}\n            router_create_args: {}\n            router_update_args:\n              admin_state_up: false\n              name: _router_updated\n            subnet_cidr_start: 1.1.0.0/30\n            subnet_create_args: {}\n            subnets_per_network: 2\n          context:\n            network: {}\n            quotas:\n              neutron:\n                network: -1\n                router: -1\n                subnet: -1\n            users:\n              tenants: 2\n              users_per_tenant: 3\n          runner:\n            concurrency: 5\n            times: 10\n            type: constant\n      NeutronNetworks.create_and_update_subnets:\n        -\n          args:\n            network_create_args: {}\n            subnet_cidr_start: 1.4.0.0/16\n            subnet_create_args: {}\n            subnet_update_args:\n              enable_dhcp: false\n              name: _subnet_updated\n            subnets_per_network: 2\n          context:\n            network: {}\n            quotas:\n              neutron:\n                network: -1\n                subnet: -1\n            users:\n              tenants: 2\n              users_per_tenant: 3\n          runner:\n            concurrency: 5\n            times: 10\n            type: constant\n      NeutronNetworks.list_agents:\n        -\n          args:\n            agent_args: {}\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 3\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NeutronSecurityGroup.create_and_delete_security_groups:\n        -\n          args:\n            security_group_create_args: {}\n          context:\n            quotas:\n              neutron:\n                security_group: -1\n            users:\n              tenants: 3\n              users_per_tenant: 3\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      NeutronSecurityGroup.create_and_list_security_groups:\n        -\n          args:\n            security_group_create_args: {}\n          context:\n            quotas:\n              neutron:\n                security_group: -1\n            users:\n              tenants: 3\n              users_per_tenant: 3\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n      NeutronSecurityGroup.create_and_update_security_groups:\n        -\n          args:\n            security_group_create_args: {}\n            security_group_update_args: {}\n          context:\n            quotas:\n              neutron:\n                security_group: -1\n            users:\n              tenants: 3\n              users_per_tenant: 3\n          runner:\n            concurrency: 10\n            times: 100\n            type: constant\n    nova_task:\n      NovaAgents.list_agents:\n        -\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaAggregates.create_aggregate_add_and_remove_host:\n        -\n          args:\n            availability_zone: nova\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaAggregates.create_aggregate_add_host_and_boot_server:\n        -\n          args:\n            availability_zone: nova\n            boot_server_kwargs: {}\n            disk: 1\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n            metadata:\n              test_metadata: \"true\"\n            ram: 512\n            vcpus: 1\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaAggregates.create_and_delete_aggregate:\n        -\n          args:\n            availability_zone: nova\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaAggregates.create_and_get_aggregate_details:\n        -\n          args:\n            availability_zone: nova\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaAggregates.create_and_list_aggregates:\n        -\n          args:\n            availability_zone: nova\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaAggregates.create_and_update_aggregate:\n        -\n          args:\n            availability_zone: nova\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaAggregates.list_aggregates:\n        -\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaAvailabilityZones.list_availability_zones:\n        -\n          args:\n            detailed: true\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaFlavors.create_and_delete_flavor:\n        -\n          args:\n            disk: 1\n            ram: 500\n            vcpus: 1\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaFlavors.create_and_get_flavor:\n        -\n          args:\n            disk: 1\n            ram: 500\n            vcpus: 1\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaFlavors.create_and_list_flavor_access:\n        -\n          args:\n            disk: 1\n            ram: 500\n            vcpus: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaFlavors.create_flavor:\n        -\n          args:\n            disk: 1\n            ram: 500\n            vcpus: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaFlavors.create_flavor_and_add_tenant_access:\n        -\n          args:\n            disk: 1\n            ram: 500\n            vcpus: 1\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaFlavors.create_flavor_and_set_keys:\n        -\n          args:\n            disk: 1\n            extra_specs:\n              ? \"quota:disk_read_bytes_sec\"\n              : 10240\n            ram: 500\n            vcpus: 1\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaFlavors.list_flavors:\n        -\n          args:\n            detailed: true\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaHypervisors.list_and_get_hypervisors:\n        -\n          args:\n            detailed: true\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 2\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaHypervisors.list_and_get_uptime_hypervisors:\n        -\n          args:\n            detailed: true\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 2\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaHypervisors.list_and_search_hypervisors:\n        -\n          args:\n            detailed: true\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 2\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaHypervisors.list_hypervisors:\n        -\n          args:\n            detailed: true\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaHypervisors.statistics_hypervisors:\n        -\n          args: {}\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 2\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaKeypair.boot_and_delete_server_with_keypair:\n        -\n          args:\n            boot_server_kwargs: {}\n            flavor:\n              name: m1.tiny\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n          context:\n            network:\n              start_cidr: 100.1.0.0/26\n            users:\n              tenants: 2\n              users_per_tenant: 1\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 5\n            type: constant\n      NovaKeypair.create_and_delete_keypair:\n        -\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaKeypair.create_and_list_keypairs:\n        -\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaServers.boot_and_associate_floating_ip:\n        -\n          args:\n            flavor:\n              name: m1.tiny\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n          context:\n            network: {}\n            users:\n              tenants: 1\n              users_per_tenant: 1\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n      NovaServers.boot_and_bounce_server:\n        -\n          args:\n            actions:\n              -\n                hard_reboot: 1\n              -\n                soft_reboot: 1\n              -\n                stop_start: 1\n              -\n                rescue_unrescue: 1\n            flavor:\n              name: m1.tiny\n            force_delete: false\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaServers.boot_and_delete_multiple_servers:\n        -\n          args:\n            count: 5\n            flavor:\n              name: m1.tiny\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n      NovaServers.boot_and_delete_server:\n        -\n          args:\n            flavor:\n              name: m1.tiny\n            force_delete: false\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n        -\n          args:\n            auto_assign_nic: true\n            flavor:\n              name: m1.tiny\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n          context:\n            network:\n              networks_per_tenant: 2\n              start_cidr: 10.2.0.0/24\n            users:\n              tenants: 3\n              users_per_tenant: 2\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaServers.boot_and_get_console_output:\n        -\n          args:\n            flavor:\n              name: m1.tiny\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n      NovaServers.boot_and_list_server:\n        -\n          args:\n            detailed: true\n            flavor:\n              name: m1.tiny\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n      NovaServers.boot_and_live_migrate_server:\n        -\n          args:\n            block_migration: false\n            flavor:\n              name: m1.tiny\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaServers.boot_and_migrate_server:\n        -\n          args:\n            flavor:\n              name: m1.tiny\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaServers.boot_and_rebuild_server:\n        -\n          args:\n            flavor:\n              name: m1.tiny\n            from_image:\n              name: cirros-0.6.2-x86_64-disk.img\n            to_image:\n              name: cirros-0.6.2-x86_64-disk.img\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 5\n            type: constant\n      NovaServers.boot_and_show_server:\n        -\n          args:\n            flavor:\n              name: m1.tiny\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n      NovaServers.boot_and_update_server:\n        -\n          args:\n            flavor:\n              name: m1.tiny\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaServers.boot_lock_unlock_and_delete:\n        -\n          args:\n            flavor:\n              name: m1.tiny\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaServers.boot_server:\n        -\n          args:\n            flavor:\n              name: m1.tiny\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaServers.boot_server_associate_and_dissociate_floating_ip:\n        -\n          args:\n            flavor:\n              name: m1.tiny\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n          context:\n            network: {}\n            users:\n              tenants: 3\n              users_per_tenant: 2\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 5\n            type: constant\n          sla:\n            failure_rate:\n              max: 0\n      NovaServers.boot_server_attach_created_volume_and_live_migrate:\n        -\n          args:\n            block_migration: false\n            boot_server_kwargs: {}\n            create_volume_kwargs: {}\n            flavor:\n              name: m1.tiny\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n            size: 10\n          context:\n            users:\n              tenants: 2\n              users_per_tenant: 2\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 5\n            type: constant\n      NovaServers.boot_server_attach_created_volume_and_resize:\n        -\n          args:\n            boot_server_kwargs: {}\n            confirm: true\n            create_volume_kwargs: {}\n            do_delete: true\n            flavor:\n              name: m1.tiny\n            force_delete: false\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n            to_flavor:\n              name: m1.small\n            volume_size: 1\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaServers.boot_server_from_volume:\n        -\n          args:\n            flavor:\n              name: m1.tiny\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n            volume_size: 10\n            volume_type: \"\"\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaServers.boot_server_from_volume_and_delete:\n        -\n          args:\n            flavor:\n              name: m1.tiny\n            force_delete: false\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n            volume_size: 10\n            volume_type: \"\"\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaServers.boot_server_from_volume_and_live_migrate:\n        -\n          args:\n            block_migration: false\n            flavor:\n              name: m1.tiny\n            force_delete: false\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n            volume_size: 10\n            volume_type: \"\"\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaServers.boot_server_from_volume_and_resize:\n        -\n          args:\n            boot_server_kwargs: {}\n            confirm: true\n            create_volume_kwargs: {}\n            do_delete: true\n            flavor:\n              name: m1.tiny\n            force_delete: false\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n            to_flavor:\n              name: m1.small\n            volume_size: 1\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaServers.boot_server_from_volume_snapshot:\n        -\n          args:\n            flavor:\n              name: m1.tiny\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n            volume_size: 10\n            volume_type: \"\"\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaServers.list_servers:\n        -\n          args:\n            detailed: true\n          context:\n            servers:\n              flavor:\n                name: m1.tiny\n              image:\n                name: cirros-0.6.2-x86_64-disk.img\n              servers_per_tenant: 2\n            users:\n              tenants: 1\n              users_per_tenant: 1\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 1\n            times: 1\n            type: constant\n      NovaServers.pause_and_unpause_server:\n        -\n          args:\n            flavor:\n              name: m1.tiny\n            force_delete: false\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaServers.resize_server:\n        -\n          args:\n            confirm: true\n            flavor:\n              name: m1.tiny\n            force_delete: false\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n            to_flavor:\n              name: m1.small\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 5\n            times: 10\n            type: constant\n      NovaServers.shelve_and_unshelve_server:\n        -\n          args:\n            flavor:\n              name: m1.tiny\n            force_delete: false\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaServers.snapshot_server:\n        -\n          args:\n            flavor:\n              name: m1.tiny\n            force_delete: false\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaServers.suspend_and_resume_server:\n        -\n          args:\n            flavor:\n              name: m1.tiny\n            force_delete: false\n            image:\n              name: cirros-0.6.2-x86_64-disk.img\n          context:\n            users:\n              tenants: 3\n              users_per_tenant: 2\n            images:\n              image_url: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n              image_name: cirros-0.6.2-x86_64-disk.img\n              image_type: qcow2\n              image_container: bare\n              images_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n      NovaServices.list_services:\n        -\n          runner:\n            concurrency: 2\n            times: 10\n            type: constant\n    swift_task:\n      SwiftObjects.create_container_and_object_then_delete_all:\n        -\n          args:\n            object_size: 102400\n            objects_per_container: 5\n          context:\n            roles:\n              - admin\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 4\n            type: constant\n      SwiftObjects.create_container_and_object_then_download_object:\n        -\n          args:\n            object_size: 1024\n            objects_per_container: 5\n          context:\n            roles:\n              - admin\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 3\n            times: 6\n            type: constant\n      SwiftObjects.create_container_and_object_then_list_objects:\n        -\n          args:\n            object_size: 5120\n            objects_per_container: 2\n          context:\n            roles:\n              - admin\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 2\n            type: constant\n      SwiftObjects.list_and_download_objects_in_containers:\n        -\n          context:\n            roles:\n              - admin\n            swift_objects:\n              containers_per_tenant: 2\n              object_size: 10240\n              objects_per_container: 5\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 2\n            times: 2\n            type: constant\n      SwiftObjects.list_objects_in_containers:\n        -\n          context:\n            roles:\n              - admin\n            swift_objects:\n              containers_per_tenant: 1\n              object_size: 1024\n              objects_per_container: 10\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          runner:\n            concurrency: 3\n            times: 6\n            type: constant\n\ntls:\n  identity: false\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  configmap_tasks: true\n  configmap_test_templates: true\n  job_bootstrap: true\n  job_db_init: true\n  job_image_repo_sync: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  job_manage_db: true\n  job_run_task: true\n  pvc_rally: true\n  secret_db: true\n  secret_keystone: true\n  secret_registry: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "redis/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v4.0.1\ndescription: OpenStack-Helm Redis\nname: redis\nversion: 2025.2.0\nhome: https://github.com/redis/redis\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "redis/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: redis-bin\ndata:\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n  redis-test.sh: |\n{{ tuple \"test/_redis_test.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  python-tests.py: |\n{{ tuple \"test/_python_redis_tests.py.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "redis/templates/deployment.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"redis\" }}\n{{ tuple $envAll \"redis\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: redis\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"redis\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.server }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"redis\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"redis\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"redis\" \"server\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.redis.node_selector_key }}: {{ .Values.labels.redis.node_selector_value | quote }}\n      initContainers:\n{{ tuple $envAll \"redis\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: redis\n{{ tuple $envAll \"redis\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.server | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - redis-server\n            - --port\n            - {{ .Values.network.port | quote }}\n          ports:\n            - containerPort: {{ .Values.network.port }}\n          readinessProbe:\n            tcpSocket:\n              port: {{ .Values.network.port }}\n{{- end }}\n"
  },
  {
    "path": "redis/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "redis/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"redis\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "redis/templates/pod_test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.helm_tests }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := print .Release.Name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: \"{{.Release.Name}}-test\"\n  labels:\n{{ tuple $envAll \"redis\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  serviceAccountName: {{ $serviceAccountName }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n  restartPolicy: Never\n  containers:\n    - name: {{.Release.Name}}-helm-tests\n{{ tuple $envAll \"helm_tests\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n      command:\n        - /tmp/redis-test.sh\n      env:\n        - name: REDIS_HOST\n          value: \"redis\"\n        - name: REDIS_PORT\n          value: \"{{ .Values.network.port }}\"\n        - name: REDIS_DB\n          value: '0'\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: redis-test\n          mountPath: /tmp/redis-test.sh\n          subPath: redis-test.sh\n        - name: redis-python\n          mountPath: /tmp/python-tests.py\n          subPath: python-tests.py\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: redis-test\n      configMap:\n        name: redis-bin\n        defaultMode: 0555\n    - name: redis-python\n      configMap:\n        name: redis-bin\n        defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "redis/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "redis/templates/service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: redis\nspec:\n  clusterIP: None\n  ports:\n    - port: {{ .Values.network.port }}\n  selector:\n{{ tuple $envAll \"redis\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "redis/templates/test/_python_redis_tests.py.tpl",
    "content": "import os\nimport redis\n\n\nclass RedisTest(object):\n\n    def __init__(self):\n        host = os.environ.get('REDIS_HOST', 'redis')\n        port = os.environ.get('REDIS_PORT', 6379)\n        db = os.environ.get('REDIS_DB', 0)\n        self.redis_conn = redis.Redis(host, port, db)\n\n    def test_connection(self):\n        ping = self.redis_conn.ping()\n        if not ping: raise Exception('No connection to database')\n        print(\"Successfully connected to database\")\n\n    def database_info(self):\n        ip_port = []\n        for client in self.redis_conn.client_list():\n            ip_port.append(client[\"addr\"])\n        print(ip_port)\n        if not self.redis_conn.client_list():\n            raise Exception('Database client list is null')\n        return ip_port\n\n    def test_insert_delete_data(self):\n        key = \"test\"\n        value = \"it's working\"\n        result_set = self.redis_conn.set(key, value)\n        if not result_set: raise Exception('ERROR: SET command failed')\n        print(\"Successfully SET keyvalue pair\")\n        result_get = self.redis_conn.get(key)\n        if not result_get: raise Exception('ERROR: GET command failed')\n        print(\"Successfully GET keyvalue pair\")\n        db_size = self.redis_conn.dbsize()\n        if db_size <= 0: raise Exception(\"Database size not valid\")\n        result_delete = self.redis_conn.delete(key)\n        if not result_delete == 1: raise Exception(\"Error: Delete command failed\")\n        print(\"Successfully DELETED keyvalue pair\")\n\n    def test_client_kill(self, client_ip_port_list):\n        for client_ip_port in client_ip_port_list:\n            result = self.redis_conn.client_kill(client_ip_port)\n            if not result: raise Exception('Client failed to be removed')\n            print(\"Successfully DELETED client\")\n\n\nclient_ip_port = []\nredis_client = RedisTest()\nredis_client.test_connection()\nclient_ip_port = redis_client.database_info()\nredis_client.test_insert_delete_data()\nredis_client.test_client_kill(client_ip_port)\n"
  },
  {
    "path": "redis/templates/test/_redis_test.sh.tpl",
    "content": "#!/bin/bash\nset -ex\n\necho \"Start Redis Test\"\necho \"Print Environmental variables\"\necho $REDIS_HOST\necho $REDIS_PORT\necho $REDIS_DB\n\npython /tmp/python-tests.py\n"
  },
  {
    "path": "redis/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for redis.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nimages:\n  tags:\n    redis: docker.io/library/redis:4.0.1\n    helm_tests: docker.io/redislabs/redis-py:latest\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: quay.io/airshipit/docker:27.5.0\n  pull_policy: IfNotPresent\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\npod:\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  replicas:\n    server: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n  resources:\n    enabled: false\n    server:\n      limits:\n        memory: \"128Mi\"\n        cpu: \"500m\"\n      requests:\n        memory: \"128Mi\"\n        cpu: \"500m\"\n    jobs:\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nlabels:\n  redis:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nnetwork:\n  port: 6379\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - redis-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    redis:\n      services: null\n\nsecrets:\n  oci_image_registry:\n    redis: redis-oci-image-registry-key\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      redis:\n        username: redis\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n\nmanifests:\n  configmap_bin: true\n  deployment: true\n  job_image_repo_sync: true\n  secret_registry: true\n  service: true\n  helm_tests: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "registry/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v2.0.0\ndescription: OpenStack-Helm Docker Registry\nname: registry\nversion: 2025.2.0\nhome: https://github.com/kubernetes/ingress\nsources:\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "registry/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/sh\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n\nIFS=',' ; for IMAGE in ${PRELOAD_IMAGES}; do\n  docker pull ${IMAGE}\n  docker tag ${IMAGE} ${LOCAL_REPO}/${IMAGE}\n  docker push ${LOCAL_REPO}/${IMAGE}\ndone\n"
  },
  {
    "path": "registry/templates/bin/_registry-proxy.sh.tpl",
    "content": "#!/bin/sh\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec nginx -g \"daemon off;\"\n"
  },
  {
    "path": "registry/templates/bin/_registry.sh.tpl",
    "content": "#!/bin/sh\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec registry serve /etc/docker/registry/config.yml\n"
  },
  {
    "path": "registry/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: registry-bin\ndata:\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  registry.sh: |\n{{ tuple \"bin/_registry.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  registry-proxy.sh: |\n{{ tuple \"bin/_registry-proxy.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "registry/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n{{- if empty .Values.conf.registry.http.addr -}}\n{{ $_ := cat \"0.0.0.0\" (tuple \"docker_registry\" \"internal\" \"registry\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\") | replace \" \" \":\" | set .Values.conf.registry.http \"addr\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.registry.redis.addr -}}\n{{ $_ := tuple \"redis\" \"internal\" \"redis\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.registry.redis \"addr\" -}}\n{{- end -}}\n\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: registry-etc\ndata:\n  config.yml: |\n{{  toYaml .Values.conf.registry | indent 4 }}\n  default.conf: |\n{{ tuple \"etc/_default.conf.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "registry/templates/daemonset-registry-proxy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.daemonset_registry_proxy }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"docker-registry-proxy\" }}\n{{ tuple $envAll \"registry_proxy\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: docker-registry-proxy\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"docker\" \"registry-proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"docker\" \"registry-proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"docker\" \"registry-proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"registry_proxy\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      nodeSelector:\n        {{ .Values.labels.registry.node_selector_key }}: {{ .Values.labels.registry.node_selector_value | quote }}\n      dnsPolicy: {{ .Values.pod.dns_policy }}\n      hostNetwork: true\n      initContainers:\n{{ tuple $envAll \"registry_proxy\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: registry-proxy\n{{ tuple $envAll \"registry_proxy\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.registry_proxy | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"registry_proxy\" \"container\" \"registry_proxy\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/registry-proxy.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: registry-bin\n              mountPath: /tmp/registry-proxy.sh\n              subPath: registry-proxy.sh\n              readOnly: true\n            - name: registry-etc\n              mountPath: /etc/nginx/conf.d/default.conf\n              subPath: default.conf\n              readOnly: true\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: registry-bin\n          configMap:\n            name: registry-bin\n            defaultMode: 0555\n        - name: registry-etc\n          configMap:\n            name: registry-etc\n            defaultMode: 0444\n{{- end }}\n"
  },
  {
    "path": "registry/templates/deployment-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_registry }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"docker-registry\" }}\n{{ tuple $envAll \"registry\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: docker-registry\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"docker\" \"registry\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.registry }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"docker\" \"registry\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"docker\" \"registry\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"registry\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"docker\" \"registry\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.registry.node_selector_key }}: {{ .Values.labels.registry.node_selector_value | quote }}\n      initContainers:\n{{ tuple $envAll \"registry\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: registry\n{{ tuple $envAll \"registry\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.registry | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"registry\" \"container\" \"registry\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          ports:\n            - name: d-reg\n              containerPort: {{ tuple \"docker_registry\" \"internal\" \"registry\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          command:\n            - /tmp/registry.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: registry-bin\n              mountPath: /tmp/registry.sh\n              subPath: registry.sh\n              readOnly: true\n            - name: registry-etc\n              mountPath: /etc/docker/registry/config.yml\n              subPath: config.yml\n              readOnly: true\n            - name: docker-images\n              mountPath: {{ .Values.conf.registry.storage.filesystem.rootdirectory }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: registry-bin\n          configMap:\n            name: registry-bin\n            defaultMode: 0555\n        - name: registry-etc\n          configMap:\n            name: registry-etc\n            defaultMode: 0444\n        - name: docker-images\n          persistentVolumeClaim:\n            claimName: docker-images\n{{- end }}\n"
  },
  {
    "path": "registry/templates/etc/_default.conf.tpl",
    "content": "# Docker registry proxy for api version 2\n\nupstream docker-registry {\n    server {{ tuple \"docker_registry\" \"internal\" \"registry\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" }};\n}\n\n# No client auth or TLS\n# TODO(bacongobbler): experiment with authenticating the registry if it's using TLS\nserver {\n    listen {{ tuple \"docker_registry\" \"public\" \"registry\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }};\n    server_name localhost;\n\n    # disable any limits to avoid HTTP 413 for large image uploads\n    client_max_body_size 0;\n\n    # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486)\n    chunked_transfer_encoding on;\n\n    location / {\n        # Do not allow connections from docker 1.5 and earlier\n        # docker pre-1.6.0 did not properly set the user agent on ping, catch \"Go *\" user agents\n        if ($http_user_agent ~ \"^(docker\\/1\\.(3|4|5(?!\\.[0-9]-dev))|Go ).*$\" ) {\n            return 404;\n        }\n\n        include docker-registry.conf;\n    }\n}\n"
  },
  {
    "path": "registry/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "registry/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_bootstrap }}\n{{- $envAll := . }}\n{{- if .Values.bootstrap.enabled }}\n\n{{- $serviceAccountName := \"docker-bootstrap\" }}\n{{ tuple $envAll \"bootstrap\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: docker-bootstrap\n  labels:\n{{ tuple $envAll \"docker\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"docker\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value | quote }}\n      initContainers:\n{{ tuple $envAll \"bootstrap\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\"  | indent 8 }}\n      containers:\n        - name: docker-bootstrap\n{{ tuple $envAll \"bootstrap\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          env:\n            - name: LOCAL_REPO\n              value: \"localhost:{{ tuple \"docker_registry\" \"public\" \"registry\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\"\n            - name: PRELOAD_IMAGES\n              value: \"{{  include \"helm-toolkit.utils.joinListWithComma\" .Values.bootstrap.preload_images }}\"\n          command:\n            - /tmp/bootstrap.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: registry-bin\n              mountPath: /tmp/bootstrap.sh\n              subPath: bootstrap.sh\n              readOnly: true\n            - name: docker-socket\n              mountPath: /var/run/docker.sock\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: registry-bin\n          configMap:\n            name: registry-bin\n            defaultMode: 0555\n        - name: docker-socket\n          hostPath:\n            path: /var/run/docker.sock\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "registry/templates/pvc-images.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pvc_images }}\n{{- $envAll := . }}\n---\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: docker-images\nspec:\n  accessModes: [\"ReadWriteOnce\"]\n  resources:\n    requests:\n      storage: {{ .Values.volume.size }}\n  {{- if ne .Values.volume.class_name \"default\" }}\n  storageClassName: {{ .Values.volume.class_name }}\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "registry/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "registry/templates/service-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_registry }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"docker_registry\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: d-reg\n      port: {{ tuple \"docker_registry\" \"internal\" \"registry\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.registry.node_port.enabled }}\n      nodePort: {{ .Values.network.registry.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"docker\" \"registry\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.registry.node_port.enabled }}\n  type: NodePort\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "registry/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for docker registry.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nlabels:\n  registry:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nrelease_group: null\n\nimages:\n  tags:\n    registry: docker.io/library/registry:2\n    registry_proxy: registry.k8s.io/kube-registry-proxy:0.4\n    bootstrap: docker.io/library/docker:29\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n\nvolume:\n  class_name: general\n  size: 2Gi\n\nnetwork:\n  registry:\n    ingress:\n      public: false\n    node_port:\n      enabled: false\n      port: 5000\n\nconf:\n  registry:\n    version: 0.1\n    log:\n      fields:\n        service: registry\n    storage:\n      cache:\n        blobdescriptor: redis\n      filesystem:\n        rootdirectory: /var/lib/registry\n    http:\n      secret: not-so-secret-secret\n      headers:\n        X-Content-Type-Options: [nosniff]\n    health:\n      storagedriver:\n        enabled: true\n        interval: 10s\n        threshold: 3\n    redis:\n      addr: null\n\npod:\n  security_context:\n    registry_proxy:\n      pod:\n        runAsUser: 65534\n      container:\n        registry_proxy:\n          runAsUser: 0\n          readOnlyRootFilesystem: false\n    registry:\n      pod:\n        runAsUser: 65534\n      container:\n        registry:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  dns_policy: \"ClusterFirstWithHostNet\"\n  replicas:\n    registry: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n  resources:\n    enabled: false\n    registry:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    registry_proxy:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nbootstrap:\n  enabled: true\n  script:\n    docker info\n  preload_images:\n    - quay.io/airshipit/nginx:alpine3.18\n\ndependencies:\n  static:\n    bootstrap:\n      pod:\n        # NOTE(srwilkers): As the daemonset dependency is currently broken for\n        # kubernetes 1.16, use the pod dependency and require the same node\n        # instead for the same result\n        - requireSameNode: true\n          labels:\n            application: docker\n            component: registry-proxy\n      services:\n        - endpoint: internal\n          service: docker_registry\n    registry:\n      services:\n        - endpoint: internal\n          service: redis\n    registry_proxy:\n      services:\n        - endpoint: internal\n          service: docker_registry\n\nsecrets:\n  oci_image_registry:\n    registry: registry-oci-image-registry-key\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      registry:\n        username: registry\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  docker_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: 5000\n  redis:\n    namespace: null\n    hosts:\n      default: redis\n    host_fqdn_override:\n      default: null\n    port:\n      redis:\n        default: 6379\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  daemonset_registry_proxy: true\n  deployment_registry: true\n  job_bootstrap: true\n  job_image_repo_sync: true\n  pvc_images: true\n  secret_registry: true\n  service_registry: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "release.asc",
    "content": "-----BEGIN PGP PUBLIC KEY BLOCK-----\nVersion: GnuPG v1\n\nmQINBFX4hgkBEADLqn6O+UFp+ZuwccNldwvh5PzEwKUPlXKPLjQfXlQRig1flpCH\nE0HJ5wgGlCtYd3Ol9f9+qU24kDNzfbs5bud58BeE7zFaZ4s0JMOMuVm7p8JhsvkU\nC/Lo/7NFh25e4kgJpjvnwua7c2YrA44ggRb1QT19ueOZLK5wCQ1mR+0GdrcHRCLr\n7Sdw1d7aLxMT+5nvqfzsmbDullsWOD6RnMdcqhOxZZvpay8OeuK+yb8FVQ4sOIzB\nFiNi5cNOFFHg+8dZQoDrK3BpwNxYdGHsYIwU9u6DWWqXybBnB9jd2pve9PlzQUbO\neHEa4Z+jPqxY829f4ldaql7ig8e6BaInTfs2wPnHJ+606g2UH86QUmrVAjVzlLCm\nnqoGymoAPGA4ObHu9X3kO8viMBId9FzooVqR8a9En7ZE0Dm9O7puzXR7A1f5sHoz\nJdYHnr32I+B8iOixhDUtxIY4GA8biGATNaPd8XR2Ca1hPuZRVuIiGG9HDqUEtXhV\nfY5qjTjaThIVKtYgEkWMT+Wet3DPPiWT3ftNOE907e6EWEBCHgsEuuZnAbku1GgD\nLBH4/a/yo9bNvGZKRaTUM/1TXhM5XgVKjd07B4cChgKypAVHvef3HKfCG2U/DkyA\nLjteHt/V807MtSlQyYaXUTGtDCrQPSlMK5TjmqUnDwy6Qdq8dtWN3DtBWQARAQAB\ntCpDZXBoLmNvbSAocmVsZWFzZSBrZXkpIDxzZWN1cml0eUBjZXBoLmNvbT6JAjgE\nEwECACIFAlX4hgkCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOhKwsBG\nDzmUXdIQAI8YPcZMBWdv489q8CzxlfRIRZ3Gv/G/8CH+EOExcmkVZ89mVHngCdAP\nDOYCl8twWXC1lwJuLDBtkUOHXNuR5+Jcl5zFOUyldq1Hv8u03vjnGT7lLJkJoqpG\nl9QD8nBqRvBU7EM+CU7kP8+09b+088pULil+8x46PwgXkvOQwfVKSOr740Q4J4nm\n/nUOyTNtToYntmt2fAVWDTIuyPpAqA6jcqSOC7Xoz9cYxkVWnYMLBUySXmSS0uxl\n3p+wK0lMG0my/gb+alke5PAQjcE5dtXYzCn+8Lj0uSfCk8Gy0ZOK2oiUjaCGYN6D\nu72qDRFBnR3jaoFqi03bGBIMnglGuAPyBZiI7LJgzuT9xumjKTJW3kN4YJxMNYu1\nFzmIyFZpyvZ7930vB2UpCOiIaRdZiX4Z6ZN2frD3a/vBxBNqiNh/BO+Dex+PDfI4\nTqwF8zlcjt4XZ2teQ8nNMR/D8oiYTUW8hwR4laEmDy7ASxe0p5aijmUApWq5UTsF\n+s/QbwugccU0iR5orksM5u9MZH4J/mFGKzOltfGXNLYI6D5Mtwrnyi0BsF5eY0u6\nvkdivtdqrq2DXY+ftuqLOQ7b+t1RctbcMHGPptlxFuN9ufP5TiTWSpfqDwmHCLsT\nk2vFiMwcHdLpQ1IH8ORVRgPPsiBnBOJ/kIiXG2SxPUTjjEGOVgeA\n=/Tod\n-----END PGP PUBLIC KEY BLOCK-----\n"
  },
  {
    "path": "releasenotes/config.yaml",
    "content": "---\n# The 2024.2.0 is the release when we introduced CHANGELOG.md\n# When we generate the CHANGELOG.md we append all earlier release notes as well.\n# So the earliest release we should take while generating the CHANGELOG.md\n# is 2024.2.0.\nearliest_version: 2024.2.0\nbranch: master\ncollapse_pre_releases: false\nstop_at_branch_base: true\nsections:\n  - [aodh, aodh Chart]\n  - [barbican, barbican Chart]\n  - [blazar, blazar Chart]\n  - [ca-issuer, ca-issuer Chart]\n  - [calico, calico Chart]\n  - [ceilometer, ceilometer Chart]\n  - [ceph-client, ceph-client Chart]\n  - [ceph-mon, ceph-mon Chart]\n  - [ceph-osd, ceph-osd Chart]\n  - [ceph-provisioners, ceph-provisioners Chart]\n  - [cinder, cinder Chart]\n  - [cloudkitty, cloudkitty Chart]\n  - [designate, designate Chart]\n  - [elastic-apm-server, elastic-apm-server Chart]\n  - [elastic-filebeat, elastic-filebeat Chart]\n  - [elastic-metricbeat, elastic-metricbeat Chart]\n  - [elastic-packetbeat, elastic-packetbeat Chart]\n  - [elasticsearch, elasticsearch Chart]\n  - [etcd, etcd Chart]\n  - [fluentbit, fluentbit Chart]\n  - [fluentd, fluentd Chart]\n  - [freezer, freezer Chart]\n  - [glance, glance Chart]\n  - [gnocchi, gnocchi Chart]\n  - [grafana, grafana Chart]\n  - [heat, heat Chart]\n  - [helm-toolkit, helm-toolkit Chart]\n  - [horizon, horizon Chart]\n  - [ingress, ingress Chart]\n  - [ironic, ironic Chart]\n  - [keystone, keystone Chart]\n  - [kibana, kibana Chart]\n  - [kube-dns, kube-dns Chart]\n  - [kubernetes-keystone-webhook, kubernetes-keystone-webhook Chart]\n  - [kubernetes-node-problem-detector, kubernetes-node-problem-detector Chart]\n  - [ldap, ldap Chart]\n  - [libvirt, libvirt Chart]\n  - [local-storage, local-storage Chart]\n  - [magnum, magnum Chart]\n  - [mariadb, mariadb Chart]\n  - [memcached, memcached Chart]\n  - [mistral, mistral Chart]\n  - [nagios, nagios Chart]\n  - [namespace-config, namespace-config Chart]\n  - [neutron, neutron Chart]\n  - [nfs-provisioner, nfs-provisioner Chart]\n  - [nova, nova Chart]\n  - [octavia, octavia Chart]\n  - [openvswitch, openvswitch Chart]\n  - [ovn, ovn Chart]\n  - [placement, placement Chart]\n  - [postgresql, postgresql Chart]\n  - [powerdns, powerdns Chart]\n  - [prometheus-alertmanager, prometheus-alertmanager Chart]\n  - [prometheus-blackbox-exporter, prometheus-blackbox-exporter Chart]\n  - [prometheus-kube-state-metrics, prometheus-kube-state-metrics Chart]\n  - [prometheus-node-exporter, prometheus-node-exporter Chart]\n  - [prometheus-openstack-exporter, prometheus-openstack-exporter Chart]\n  - [prometheus-process-exporter, prometheus-process-exporter Chart]\n  - [prometheus, prometheus Chart]\n  - [rabbitmq, rabbitmq Chart]\n  - [rally, rally Chart]\n  - [redis, redis Chart]\n  - [registry, registry Chart]\n  - [tacker, tacker Chart]\n  - [tempest, tempest Chart]\n  - [trove, trove Chart]\n  - [watcher, watcher Chart]\n  - [features, New Features]\n  - [issues, Known Issues]\n  - [upgrade, Upgrade Notes]\n  - [api, API Changes]\n  - [security, Security Issues]\n  - [fixes, Bug Fixes]\n  - [zaqar, zaqar Chart]\ntemplate: |\n  ---\n  # To create a new release note related to a specific chart:\n  # reno new <chart_name>\n  #\n  # To create a new release note for a common change (when multiple charts\n  # are changed):\n  # reno new common\n  <chart_name>:\n    - |\n      Describe changes here, or remove this section. This paragraph will appear in\n      the unnamed section of the <chart_name>/CHANGELOG.md for a given version.\n  features:\n    - |\n      List new features here, or remove this section. If this section is given\n      in the releasenotes/notes/<chart_name>-<sha>.yaml it will only appear in the\n      \"New Features\" section of the <chart_name>/CHANGELOG.md. If this section is\n      given in the releasenotes/notes/common-<sha>.yaml it will appear in the\n      CHANGELOG.md files of all charts.\n  issues:\n    - |\n      List known issues here, or remove this section. If this section is given\n      in the releasenotes/notes/<chart_name>-<sha>.yaml it will only appear in the\n      \"Known Issues\" section of the <chart_name>/CHANGELOG.md. If this section is\n      given in the releasenotes/notes/common-<sha>.yaml it will appear in the\n      CHANGELOG.md files of all charts.\n  upgrade:\n    - |\n      List upgrade notes here, or remove this section. If this section is given\n      in the releasenotes/notes/<chart_name>-<sha>.yaml it will only appear in the\n      \"Upgrade Notes\" section of the <chart_name>/CHANGELOG.md. If this section is\n      given in the releasenotes/notes/common-<sha>.yaml it will appear in the\n      CHANGELOG.md files of all charts.\n  api:\n    - |\n      List API changes here (e.g. values format), or remove this section. If this section is given\n      in the releasenotes/notes/<chart_name>-<sha>.yaml it will only appear in the\n      \"API Changes\" section of the <chart_name>/CHANGELOG.md. If this section is\n      given in the releasenotes/notes/common-<sha>.yaml it will appear in the\n      CHANGELOG.md files of all charts.\n  security:\n    - |\n      List security issues here, or remove this section. If this section is given\n      in the releasenotes/notes/<chart_name>-<sha>.yaml it will only appear in the\n      \"Security Issues\" section of the <chart_name>/CHANGELOG.md. If this section is\n      given in the releasenotes/notes/common-<sha>.yaml it will appear in the\n      CHANGELOG.md files of all charts.\n  fixes:\n    - |\n      List bug fixes here, or remove this section. If this section is given\n      in the releasenotes/notes/<chart_name>-<sha>.yaml it will only appear in the\n      \"Bug Fixes\" section of the <chart_name>/CHANGELOG.md. If this section is\n      given in the releasenotes/notes/common-<sha>.yaml it will appear in the\n      CHANGELOG.md files of all charts.\n  ...\n...\n"
  },
  {
    "path": "releasenotes/notes/added-nova-uid-parameter-to-ovs-chart-41d2b05b79300a31.yaml",
    "content": "---\nother:\n  - |\n    When running openvswitch (OVS) with DPDK enabled, vhost-user sockets are\n    used to connect VMs to OVS. nova-compute needs access to those sockets in\n    order to plug them into OVS. For this reason, the directory containing\n    vhost-user sockets must have proper permissions. The openvswitch chart now\n    sets ownership of this directory to the UID of the nova user. The OVS chart\n    uses the same default as the Nova chart (42424). However, if the Nova UID\n    is changed in the Nova chart in a particular deployment, it also needs to\n    be changed in the OVS chart correspondingly if DPDK is used.\n...\n"
  },
  {
    "path": "releasenotes/notes/aodh-0fe345390dd08642.yaml",
    "content": "---\naodh:\n  - |\n    Remove outdated default kolla images and use quay.io/airshipit/aodh:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "releasenotes/notes/aodh-1002dad350db1c60.yaml",
    "content": "---\naodh:\n  - |\n    Allow users to add additional sources to the Projected Volume that is mounted\n    at /etc/aodh/aodh.conf.d/ so they may more easily override configs or provide\n    additional configs for the various services in the chart.\n...\n"
  },
  {
    "path": "releasenotes/notes/aodh-bb91c011b0c7d911.yaml",
    "content": "---\naodh:\n  - |\n    Add support for etcSources to db-sync job.\n...\n"
  },
  {
    "path": "releasenotes/notes/aodh.yaml",
    "content": "---\naodh:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.2.0 Remove support for releases before T\n  - 0.2.1 Use policies in yaml format\n  - 0.2.2 Update htk requirements repo\n  - 0.2.3 Enable taint toleration for Openstack services\n  - 0.2.4 Migrated CronJob resource to batch/v1 API version & PodDisruptionBudget to policy/v1\n  - 0.2.5 Added OCI registry authentication\n  - 0.2.6 Remove default policy rules\n  - 0.2.7 Replace node-role.kubernetes.io/master with control-plane\n  - 0.2.8 Define service_type in keystone_authtoken to support application credentials with access rules\n  - 0.2.9 Enable custom annotations for Openstack pods\n  - 0.2.10 Enable custom annotations for Openstack secrets\n  - 0.2.11 Update images used by default\n  - 0.2.12 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.2.13 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/barbican-669168de73ab5847.yaml",
    "content": "---\nbarbican:\n  - |\n    Allow users to add additional sources to the Projected Volume that is mounted\n    at /etc/barbican/barbican.conf.d/ so they may more easily override configs or provide\n    additional configs for the various services in the chart.\n...\n"
  },
  {
    "path": "releasenotes/notes/barbican-d291498fada9e601.yaml",
    "content": "---\nbarbican:\n  - |\n    Removed the autocommit option, which has been deprecated\n    since SQLAlchemy 2.0.\n...\n"
  },
  {
    "path": "releasenotes/notes/barbican-ead8061b2a6b1b1b.yaml",
    "content": "---\nbarbican:\n  - Use more standard DB config setting\n...\n"
  },
  {
    "path": "releasenotes/notes/barbican.yaml",
    "content": "---\nbarbican:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Added post-install and post-upgrade helm hook for Jobs\n  - 0.2.0 Remove support for releases before T\n  - 0.2.1 Use policies in yaml format\n  - 0.2.2 Add helm hook conditional\n  - 0.2.3 Add support for master kek rotation\n  - 0.2.4 Add Ussuri release support\n  - 0.2.5 Add Victoria and Wallaby releases support\n  - 0.2.6 Allow Barbican to talk to Mariadb over TLS\n  - 0.2.7 Fix db connection key name\n  - 0.2.8 Update htk requirements repo\n  - 0.2.9 Removed default policy in favor in code policy\n  - 0.2.10 Enable taint toleration for Openstack services\n  - 0.2.11 Fix job annotations for db init job\n  - 0.2.12 Remove older values overrides\n  - 0.2.13 Migrated PodDisruptionBudget resource to policy/v1 API version\n  - 0.2.14 Add Xena and Yoga values overrides\n  - 0.2.15 Added OCI registry authentication\n  - 0.2.16 Distinguish between port number of internal endpoint and binding port number\n  - 0.2.17 Use HTTP probe instead of TCP probe\n  - 0.2.18 Support TLS for ks jobs\n  - 0.2.19 Support SSL offloading at reverse proxy for internal and admin endpoints\n  - 0.3.0 Remove support for Train and Ussuri\n  - 0.3.1 Replace node-role.kubernetes.io/master with control-plane\n  - 0.3.2 Define service_type in keystone_authtoken to support application credentials with access rules\n  - 0.3.3 Add Zed overrides\n  - 0.3.4 Add 2023.1 overrides\n  - 0.3.5 Add Ubuntu Jammy overrides\n  - 0.3.6 Add 2023.2 Ubuntu Jammy overrides\n  - 0.3.7 Fix TLS connection to rabbitmq, and generate barbican certificate\n  - 0.3.8 Make barbican TLS configuration granular\n  - 0.3.9 Enable custom annotations for Openstack pods\n  - 0.3.10 Add 2024.1 overrides\n  - 0.3.11 Enable custom annotations for Openstack secrets\n  - 0.3.12 Update images used by default\n  - 0.3.13 Sync uWSGI config to other services\n  - 0.3.14 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.3.15 Add 2024.2 Ubuntu Jammy overrides\n  - 0.3.16 Add livenessProbe and change path in probe\n  - 0.3.17 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/blazar-73cedded47699964.yaml",
    "content": "---\nblazar:\n  - |\n    Added initial support for OpenStack Blazar reservation service deployment\n    in Kubernetes environments through Helm charts. This enables users to\n    deploy and manage Blazar services alongside other OpenStack components.\nfeatures:\n  - |\n    Introduced Blazar Helm chart with support for:\n    - Blazar API and Manager service deployment and configuration\n    - Integration with existing OpenStack identity services\n    - Support for custom Blazar configuration through values.yaml\n...\n"
  },
  {
    "path": "releasenotes/notes/blazar-a7b9b29ba15720c0.yaml",
    "content": "---\nblazar:\n  - |\n    Updating values.yaml for adding support for creating flavor-based instance reservations.\n...\n"
  },
  {
    "path": "releasenotes/notes/blazar-b7fc5016b49c8f59.yaml",
    "content": "---\nblazar:\n  - |\n    Updating secret-keystone.yaml to make it similar to other services. Also updating\n    values.yaml to use unique ports and disabling node_port to make it similar to other\n    services, and for DNS lookups and ingress support.\n...\n"
  },
  {
    "path": "releasenotes/notes/ca-clusterissuer.yaml",
    "content": "---\nca-clusterissuer:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Update htk requirements\n  - 0.1.2 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/ca-issuer.yaml",
    "content": "---\nca-issuer:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Update apiVersion of Issuer to v1\n  - 0.1.3 Revert - Update apiVersion of Issuer to v1\n  - 0.2.0 Only Cert-manager version v1.0.0 or greater will be supported\n  - 0.2.1 Cert-manager \"< v1.0.0\" supports cert-manager.io/v1alpha3 else use api cert-manager.io/v1\n  - 0.2.2 Update htk requirements\n  - 0.2.3 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/ceilometer-15768e1064d3339d.yaml",
    "content": "---\nceilometer:\n  - |\n    Support for ceilometer-collector service was removed. This service was\n    removed from ceilometer during queens cycle.\n...\n"
  },
  {
    "path": "releasenotes/notes/ceilometer-8fc69d6664cdf101.yaml",
    "content": "---\nceilometer:\n  - |\n    Removed metrics which require Intel CMT perf events.\n...\n"
  },
  {
    "path": "releasenotes/notes/ceilometer-ab177a5c0aad98df.yaml",
    "content": "---\nceilometer:\n  - |\n    Removed support for ceilometer-api, which was removed from ceilometer long\n    time ago.\n  - |\n    Removed support for oslo.db backend and mongodb backend of ceilometer,\n    which haven't been supported by ceulometer actually for multiple releases.\n...\n"
  },
  {
    "path": "releasenotes/notes/ceilometer-b03ea218e1e61f90.yaml",
    "content": "---\nceilometer:\n  - |\n    Remove outdated default kolla images and use quay.io/airshipit/ceilometer:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "releasenotes/notes/ceilometer-b86532145d088208.yaml",
    "content": "---\nceilometer:\n  - |\n    Allow users to add additional sources to the Projected Volume that is mounted\n    at /etc/ceilometer/ceilometer.conf.d/ so they may more easily override configs or provide\n    additional configs for the various services in the chart.\n...\n"
  },
  {
    "path": "releasenotes/notes/ceilometer-c08f029ffa1e122f.yaml",
    "content": "---\nceilometer:\n  - |\n    Add support for etcSources to db-sync job.\n...\n"
  },
  {
    "path": "releasenotes/notes/ceilometer.yaml",
    "content": "---\nceilometer:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.2.0 Remove support for releases before T\n  - 0.2.1 Use policies in yaml format\n  - 0.2.2 Update htk requirements repo\n  - 0.2.3 Enable taint toleration for Openstack services\n  - 0.2.4 Update default image values to Wallaby\n  - 0.2.5 Migrated PodDisruptionBudget resource to policy/v1 API version\n  - 0.2.6 Added OCI registry authentication\n  - 0.2.7 Remove default policy rules\n  - 0.2.8 Replace node-role.kubernetes.io/master with control-plane\n  - 0.2.9 Enable custom annotations for Openstack pods\n  - 0.2.10 Enable custom annotations for Openstack secrets\n  - 0.2.11 Update images used by default\n  - 0.2.12 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.2.13 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/ceph-adapter-rook-2fd83689f9bf78fb.yaml",
    "content": "---\nceph-adapter-rook:\n  - |\n    Update Ceph to Tentacle 20.2.1\n...\n"
  },
  {
    "path": "releasenotes/notes/ceph-adapter-rook-f0855e8843fe615f.yaml",
    "content": "---\nceph-adapter-rook:\n  - |\n    Update Ceph to Tentacle 20.2.0 and replaced image sources from docker.io/openstackhelm with quay.io/airshipit\n...\n"
  },
  {
    "path": "releasenotes/notes/ceph-adapter-rook.yaml",
    "content": "---\nceph-adapter-rook:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Update Ceph images to Jammy and Reef 18.2.1\n  - 0.1.2 Update Ceph images to patched 18.2.2 and restore debian-reef repo\n  - 0.1.3 Simplify and remove unnecessary entities\n  - 0.1.4 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.5 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/ceph-client-055d675e86b2d0ea.yaml",
    "content": "---\nceph-client:\n  - |\n    Update Ceph to Tentacle 20.2.1\n...\n"
  },
  {
    "path": "releasenotes/notes/ceph-client-f4c8397a4313c53a.yaml",
    "content": "---\nceph-client:\n  - |\n    Update Ceph to Tentacle 20.2.0 and replaced image sources from docker.io/openstackhelm with quay.io/airshipit\n...\n"
  },
  {
    "path": "releasenotes/notes/ceph-client.yaml",
    "content": "---\nceph-client:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 fix the logic to disable the autoscaler on pools\n  - 0.1.3 Run as ceph user and disallow privilege escalation\n  - 0.1.4 Improvements for ceph-client helm tests\n  - 0.1.5 Fix Helm test check_pgs() check for inactive PGs\n  - 0.1.6 Uplift from Nautilus to Octopus release\n  - 0.1.7 Don't wait for premerge PGs in the rbd pool job\n  - 0.1.8 enhance logic to enable the autoscaler for Octopus\n  - 0.1.9 Revert \"[ceph-client] enhance logic to enable the autoscaler for Octopus\"\n  - 0.1.10 Separate pool quotas from pg_num calculations\n  - 0.1.11 enhance logic to enable and disable the autoscaler\n  - 0.1.12 Disable autoscaling before pools are created\n  - 0.1.13 Fix ceph-client helm test\n  - 0.1.14 Allow Ceph RBD pool job to leave failed pods\n  - 0.1.15 Make ceph-client helm test more PG specific\n  - 0.1.16 Make Ceph pool init job consistent with helm test\n  - 0.1.17 Add pool rename support for Ceph pools\n  - 0.1.18 Add pool delete support for Ceph pools\n  - 0.1.19 Use full image ref for docker official images\n  - 0.1.20 Export crash dumps when Ceph daemons crash\n  - 0.1.21 Fix Ceph checkDNS script\n  - 0.1.22 Set pg_num_min in all cases\n  - 0.1.23 Helm 3 - Fix Job labels\n  - 0.1.24 Performance optimizations for the ceph-rbd-pool job\n  - 0.1.25 Update htk requirements\n  - 0.1.26 Fix ceph-rbd-pool deletion race\n  - 0.1.27 Update ceph_mon config to ips from fqdn\n  - 0.1.28 Fix ceph.conf update job labels, rendering\n  - 0.1.29 Consolidate mon_host discovery\n  - 0.1.30 Move ceph-mgr deployment to the ceph-mon chart\n  - 0.1.31 Consolidate mon_endpoints discovery\n  - 0.1.32 Simplify test rules for ceph-mgr deployment\n  - 0.1.33 More robust naming of clusterrole-checkdns\n  - 0.1.34 Migrated CronJob resource to batch/v1 API version\n  - 0.1.35 Handle multiple mon versions in the pool job\n  - 0.1.36 Add the ability to run Ceph commands from values\n  - 0.1.37 Added OCI registry authentication\n  - 0.1.38 Make use of noautoscale with Pacific\n  - 0.1.39 Correct check for too many OSDs in the pool job\n  - 0.1.40 Fix OSD count checks in the ceph-rbd-pool job\n  - 0.1.41 Allow gate scripts to use 1x replication in Ceph\n  - 0.1.42 Update all Ceph images to Focal\n  - 0 1.43 Document the use of mon_allow_pool_size_one\n  - 0.1.44 Allow pg_num_min to be overridden per pool\n  - 0.1.45 Update Ceph to 17.2.6\n  - 0.1.46 Strip any errors preceding pool properties JSON\n  - 0.1.47 Use Helm toolkit functions for Ceph probes\n  - 0.1.48 Update Rook to 1.12.5 and Ceph to 18.2.0\n  - 0.1.49 Update Ceph images to Jammy and Reef 18.2.1\n  - 0.1.50 Update Ceph images to patched 18.2.2 and restore debian-reef repo\n  - 0.1.51 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.52 Run utils-defragOSDs.sh in ceph-osd-default container\n  - 0.1.53 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/ceph-mon-1a1ecc38a96bfead.yaml",
    "content": "---\nceph-mon:\n  - |\n    Grant the bootstrap-osd client the necessary permissions to set cluster\n    config parameters.\n...\n"
  },
  {
    "path": "releasenotes/notes/ceph-mon-5ece5f0b0f571966.yaml",
    "content": "---\nceph-mon:\n  - |\n    Upgrade Ceph to 19.2.3 and adjust the ceph-mgr liveness probe to account\n    for a new asok status query handler that returns an empty dictionary.\n...\n"
  },
  {
    "path": "releasenotes/notes/ceph-mon-a1f450d714b90cfb.yaml",
    "content": "---\nceph-mon:\n  - |\n    Update Ceph to Tentacle 20.2.0 and replaced image sources from docker.io/openstackhelm with quay.io/airshipit\n...\n"
  },
  {
    "path": "releasenotes/notes/ceph-mon-f029c2a86a0b7edd.yaml",
    "content": "---\nceph-mon:\n  - |\n    Update Ceph to Tentacle 20.2.1\n...\n"
  },
  {
    "path": "releasenotes/notes/ceph-mon.yaml",
    "content": "---\nceph-mon:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency to >= 0.1.0\n  - 0.1.2 Enable shareProcessNamespace in mon daemonset\n  - 0.1.3 Run mon container as ceph user\n  - 0.1.4 Uplift from Nautilus to Octopus release\n  - 0.1.5 Add Ceph CSI plugin\n  - 0.1.6 Fix python3 issue for util scripts\n  - 0.1.7 remove deprecated svc annotation tolerate-unready-endpoints\n  - 0.1.8 Use full image ref for docker official images\n  - 0.1.9 Remove unnecessary parameters for ceph-mon\n  - 0.1.10 Export crash dumps when Ceph daemons crash\n  - 0.1.11 Correct mon-check executing binary and logic\n  - 0.1.12 Fix Ceph checkDNS script\n  - 0.1.13 Helm 3 - Fix Job labels\n  - 0.1.14 Update htk requirements\n  - 0.1.15 Prevent mon-check from removing mons when down temporarily\n  - 0.1.16 Correct Ceph Mon Check Ports\n  - 0.1.17 Skip monmap endpoint check for missing mons\n  - 0.1.18 Move ceph-mgr deployment to the ceph-mon chart\n  - 0.1.19 Add a post-apply job to restart mons after mgrs\n  - 0.1.20 Consolidate mon_endpoints discovery\n  - 0.1.21 Change configmap names to be based on release name\n  - 0.1.22 Correct configmap names for all resources\n  - 0.1.23 Release-specific ceph-template configmap name\n  - 0.1.24 Prevents mgr SA from repeated creation\n  - 0.1.25 Allow for unconditional mon restart\n  - 0.1.26 Added OCI registry authentication\n  - 0.1.27 Update all Ceph images to Focal\n  - 0.1.28 Document the use of mon_allow_pool_size_one\n  - 0.1.29 Update Ceph to 17.2.6\n  - 0.1.30 Use Helm tookkit functions for Ceph probes\n  - 0.1.31 Add Rook Helm charts for managing Ceph with Rook\n  - 0.1.32 Update Rook to 1.12.5 and Ceph to 18.2.0\n  - 0.1.33 Update Ceph images to Jammy and Reef 18.2.1\n  - 0.1.34 Update Ceph images to patched 18.2.2 and restore debian-reef repo\n  - 0.1.35 Use seprate secrets for CSI plugin and CSI provisioner\n  - 0.1.36 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.37 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/ceph-osd-294b73092b0b301b.yaml",
    "content": "---\nceph-osd:\n  - |\n    Add a config script to set Ceph configuration options from Helm values. In\n    some cases, settings in ceph.conf are not effective and must be set via 'ceph\n    config set' commands. This script is called during OSD initialization to set\n    these options.\n\n    Also, bluestore_elastic_shared_blobs=false is added to the Ceph config to work\n    around https://tracker.ceph.com/issues/70390.\n...\n"
  },
  {
    "path": "releasenotes/notes/ceph-osd-c897b82dd8d0104b.yaml",
    "content": "---\nceph-osd:\n  - |\n    Update Ceph to Tentacle 20.2.1\n...\n"
  },
  {
    "path": "releasenotes/notes/ceph-osd-e9bd9ab0cb036080.yaml",
    "content": "---\nceph-osd:\n  - |\n    Update Ceph to Tentacle 20.2.0 and replaced image sources from docker.io/openstackhelm with quay.io/airshipit\n...\n"
  },
  {
    "path": "releasenotes/notes/ceph-osd.yaml",
    "content": "---\nceph-osd:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency to >= 0.1.0\n  - 0.1.2 wait for only osd pods from post apply job\n  - 0.1.3 Search for complete logical volume name for OSD data volumes\n  - 0.1.4 Don't try to prepare OSD disks that are already deployed\n  - 0.1.5 Fix the sync issue between osds when using shared disk for metadata\n  - 0.1.6 Logic improvement for used osd disk detection\n  - 0.1.7 Synchronization audit for the ceph-volume osd-init script\n  - 0.1.8 Update post apply job\n  - 0.1.9 Check inactive PGs multiple times\n  - 0.1.10 Fix typo in check inactive PGs logic\n  - 0.1.11 Fix post-apply job failure related to fault tolerance\n  - 0.1.12 Add a check for misplaced objects to the post-apply job\n  - 0.1.13 Remove default OSD configuration\n  - 0.1.14 Alias synchronized commands and fix descriptor leak\n  - 0.1.15 Correct naming convention for logical volumes in disk_zap()\n  - 0.1.16 dmsetup remove logical devices using correct device names\n  - 0.1.17 Fix a bug with DB orphan volume removal\n  - 0.1.18 Uplift from Nautilus to Octopus release\n  - 0.1.19 Update rbac api version\n  - 0.1.20 Update directory-based OSD deployment for image changes\n  - 0.1.21 Refactor Ceph OSD Init Scripts - First PS\n  - 0.1.22 Refactor Ceph OSD Init Scripts - Second PS\n  - 0.1.23 Use full image ref for docker official images\n  - 0.1.24 Ceph OSD Init Improvements\n  - 0.1.25 Export crash dumps when Ceph daemons crash\n  - 0.1.26 Mount /var/crash inside ceph-osd pods\n  - 0.1.27 Limit Ceph OSD Container Security Contexts\n  - 0.1.28 Change var crash mount propagation to HostToContainer\n  - 0.1.29 Fix Ceph checkDNS script\n  - 0.1.30 Ceph OSD log-runner container should run as ceph user\n  - 0.1.31 Helm 3 - Fix Job labels\n  - 0.1.32 Update htk requirements\n  - 0.1.33 Update log-runner container for MAC\n  - 0.1.34 Remove wait for misplaced objects during OSD restarts\n  - 0.1.35 Consolidate mon_endpoints discovery\n  - 0.1.36 Add OSD device location pre-check\n  - 0.1.37 Add a disruptive OSD restart to the post-apply job\n  - 0.1.38 Skip pod wait in post-apply job when disruptive\n  - 0.1.39 Allow for unconditional OSD restart\n  - 0.1.40 Remove udev interactions from osd-init\n  - 0.1.41 Remove ceph-mon dependency in ceph-osd liveness probe\n  - 0.1.42 Added OCI registry authentication\n  - 0.1.43 Update all Ceph images to Focal\n  - 0.1.44 Update Ceph to 17.2.6\n  - 0.1.45 Extend the ceph-osd post-apply job PG wait\n  - 0.1.46 Use Helm toolkit functions for Ceph probes\n  - 0.1.47 Add disk zap to OSD init forced repair case\n  - 0.1.48 Update Rook to 1.12.5 and Ceph to 18.2.0\n  - 0.1.49 Update Ceph images to Jammy and Reef 18.2.1\n  - 0.1.50 Allow lvcreate to wipe existing LV metadata\n  - 0.1.51 Update Ceph images to patched 18.2.2 and restore debian-reef repo\n  - 0.1.52 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.53 Update ceph-daemon to be able to use tini init system\n  - 0.1.54 Remove use of tini for ceph-daemon\n  - 0.1.55 Update ceph-osd pod containers to make sure OSD pods are properly terminated at restart\n  - 0.1.56 Add preStop lifecycle script to log-runner\n  - 0.1.57 Added code to kill another background process in log-runner at restart\n  - 0.1.58 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/ceph-provisioners-091a682dc01c219f.yaml",
    "content": "---\nceph-provisioners:\n  - |\n    Update Ceph to Tentacle 20.2.0 and replaced image sources from docker.io/openstackhelm with quay.io/airshipit\n...\n"
  },
  {
    "path": "releasenotes/notes/ceph-provisioners-c4334743e1cadc04.yaml",
    "content": "---\nceph-provisioners:\n  - |\n    Update Ceph to Tentacle 20.2.1\n...\n"
  },
  {
    "path": "releasenotes/notes/ceph-provisioners.yaml",
    "content": "---\nceph-provisioners:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Validate each storageclass created\n  - 0.1.3 Uplift from Nautilus to Octopus release\n  - 0.1.4 Add Ceph CSI plugin\n  - 0.1.5 Fix Helm tests for the Ceph provisioners\n  - 0.1.6 Update ceph_mon config as per new ceph clients\n  - 0.1.7 Use full image ref for docker official images\n  - 0.1.8 Enable Ceph CSI Provisioner to Stand Alone\n  - 0.1.10 Add check for empty ceph endpoint\n  - 0.1.11 Limit Ceph Provisioner Container Security Contexts\n  - 0.1.12 Add ceph mon v2 port for ceph csi provisioner\n  - 0.1.13 Fix ceph-provisioner rbd-healer error\n  - 0.1.14 Helm 3 - Fix Job labels\n  - 0.1.15 Add support to connect to rook-ceph cluster\n  - 0.1.16 Update htk requirements\n  - 0.1.17 Consolidate mon_endpoints discovery\n  - 0.1.18 Update CSI images & fix ceph csi provisioner RBAC\n  - 0.1.19 Add pods watch and list permissions to cluster role\n  - 0.1.20 Add missing CRDs for volume snapshots (classes, contents)\n  - 0.1.21 Added OCI registry authentication\n  - 0.1.22 Remove legacy Ceph provisioners\n  - 0.1.23 Remove unnecessary templates\n  - 0.1.24 Update all Ceph images to Focal\n  - 0.1.25 Update kubernetes registry to registry.k8s.io\n  - 0.1.26 Update Ceph to 17.2.6\n  - 0.1.27 Update Rook to 1.12.5 and Ceph to 18.2.0\n  - 0.1.28 Update Ceph images to Jammy and Reef 18.2.1\n  - 0.1.29 Update Ceph images to patched 18.2.2 and restore debian-reef repo\n  - 0.1.30 Specify CSI drivername in values.yaml\n  - 0.1.31 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.32 Update ceph_rbd_provisioner image to 18.2.2\n  - 0.1.33 Remove dependencies on legacy provisioners\n  - 0.1.34 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/ceph-rgw-1dc7fd498ff7ed46.yaml",
    "content": "---\nceph-rgw:\n  - |\n    Update Ceph to Tentacle 20.2.1\n...\n"
  },
  {
    "path": "releasenotes/notes/ceph-rgw-9d99622a011584b0.yaml",
    "content": "---\nceph-rgw:\n  - |\n    Update Ceph to Tentacle 20.2.0 and replaced image sources from docker.io/openstackhelm with quay.io/airshipit\n...\n"
  },
  {
    "path": "releasenotes/notes/ceph-rgw.yaml",
    "content": "---\nceph-rgw:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Uplift from Nautilus to Octopus release\n  - 0.1.3 update rbac api version\n  - 0.1.4 Rgw placement target support\n  - 0.1.5 Add tls support\n  - 0.1.6 Update tls override options\n  - 0.1.7 Use ca cert for helm tests\n  - 0.1.8 Add placement target delete support to RGW\n  - 0.1.9 Use full image ref for docker official images\n  - 0.1.10 Fix a bug in placement target deletion for new targets\n  - 0.1.11 Change s3 auth order to use local before external\n  - 0.1.12 Export crash dumps when Ceph daemons crash\n  - 0.1.13 Add configmap hash for keystone rgw\n  - 0.1.14 Disable crash dumps for rgw\n  - 0.1.15 Correct rgw placement target functions\n  - 0.1.16 Helm 3 - Fix Job labels\n  - 0.1.17 Update htk requirements\n  - 0.1.18 Consolidate mon_endpoints discovery\n  - 0.1.19 Add ClusterRole to the bootstrap-job\n  - 0.1.20 Enable taint toleration for Openstack services jobs\n  - 0.1.21 Correct mon discovery for multiple RGWs in different NS\n  - 0.1.22 Update default image values\n  - 0.1.23 Added OCI registry authentication\n  - 0.1.24 Replace civetweb with beast for unencrypted connections\n  - 0.1.25 Update all Ceph images to Focal\n  - 0.1.26 Replace node-role.kubernetes.io/master with control-plane\n  - 0.1.27 Update Ceph to 17.2.6\n  - 0.1.28 Use Helm toolkit functions for Ceph probes\n  - 0.1.29 Add 2023.1 Ubuntu Focal overrides\n  - 0.1.30 Update Rook to 1.12.5 and Ceph to 18.2.0\n  - 0.1.31 Add a ceph-rgw-pool job to manage RGW pools\n  - 0.1.32 Multiple namespace support for the ceph-rgw-pool job\n  - 0.1.33 Update Ceph images to Jammy and Reef 18.2.1\n  - 0.1.34 Update Ceph images to patched 18.2.2 and restore debian-reef repo\n  - 0.1.35 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.36 Add 2024.1 Ubuntu Jammy overrides\n  - 0.1.37 Update heat image default tag to 2024.1-ubuntu_jammy\n  - 0.1.38 Add 2024.2 overrides\n  - 0.1.39 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/cert-rotation-06fbf166bc55e372.yaml",
    "content": "---\ncert-rotation:\n  - |\n    Update Ceph to Tentacle 20.2.0 and replaced image sources from docker.io/openstackhelm with quay.io/airshipit\n...\n"
  },
  {
    "path": "releasenotes/notes/cert-rotation.yaml",
    "content": "---\ncert-rotation:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Return true if grep finds no match\n  - 0.1.2 Correct and enhance the rotation script\n  - 0.1.3 Update htk requirements\n  - 0.1.4 Consider initContainers when restarting resources\n  - 0.1.5 Migrated CronJob resource to batch/v1 API version\n  - 0.1.6 Added OCI registry authentication\n  - 0.1.7 Update all Ceph images to Focal\n  - 0.1.8 Update Ceph images to Jammy and Reef 18.2.1\n  - 0.1.9 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.10 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/change-default-ovs-image-c1e24787f1b03170.yaml",
    "content": "---\nother:\n  - |\n    The default image used by the openvswitch chart has been changed from a\n    a Debian based image including a source build of openvswitch v2.8.1 to an\n    Ubuntu Bionic based image including a distribution provided build of\n    openvswitch v2.9.2.\n...\n"
  },
  {
    "path": "releasenotes/notes/change-memcache-backend-2d85a3c75b32db39.yaml",
    "content": "---\nother:\n  - |\n    memcache backend for nova has been changed from oslo_cache.memcache_pool\n    to dogpile.cache.memcached. You can revert to previous behaviour by\n    setting conf.nova.cache.backend to \"oslo_cache.memcache_pool\".\n...\n"
  },
  {
    "path": "releasenotes/notes/changed-ovs-dpdk-root-key-f8aaf3ad65189c8a.yaml",
    "content": "---\nother:\n  - |\n    The root configuration key of the DPDK section has been changed from\n    \"dpdk\" to \"ovs_dpdk\" to achieve parity with the corresponding configuration\n    key in the Neutron chart.\n...\n"
  },
  {
    "path": "releasenotes/notes/cinder-1db248fbc00e56ff.yaml",
    "content": "---\ncinder:\n  - |\n    Add missing default glance identity endpoint for cinder.\n...\n"
  },
  {
    "path": "releasenotes/notes/cinder-32aac095ffc09912.yaml",
    "content": "---\ncinder:\n  - |\n    Add support for etcSources to db-sync job.\n...\n"
  },
  {
    "path": "releasenotes/notes/cinder-48232b427a294d57.yaml",
    "content": "---\ncinder:\n  - |\n    Enable passing custom_job_annotations via values for specific Cinder jobs.\n    This allows controlling Argo CD / Helm job behavior (e.g., sync-waves,\n    hook policies) without modifying the chart templates directly.\n...\n"
  },
  {
    "path": "releasenotes/notes/cinder-4e17dd8ee84ca1a2.yaml",
    "content": "---\ncinder:\n  - Fix ingress resource generation\n...\n"
  },
  {
    "path": "releasenotes/notes/cinder-8f8fd56d2c9a5d75.yaml",
    "content": "---\ncinder:\n  - |\n    Add cronjob to purge old deleted database entries\n...\n"
  },
  {
    "path": "releasenotes/notes/cinder-92ee9aa061442690.yaml",
    "content": "---\ncinder:\n  - |\n    Fix cinder volume container permission to fix FailedToDropPrivileges issue.\n...\n"
  },
  {
    "path": "releasenotes/notes/cinder-a25114bef0ed2f56.yaml",
    "content": "---\nupgrade:\n  - |\n    Change the default volume v3 path to not include the tenant_id. The is the\n    current recommended approach and has not been necessary since the Yoga release.\n...\n"
  },
  {
    "path": "releasenotes/notes/cinder-a530fe90112c74d1.yaml",
    "content": "---\ncinder:\n  - |\n    Unhardcode readiness/liveness probe parameters for cinder-api\n...\n"
  },
  {
    "path": "releasenotes/notes/cinder-aca94f2247bcddcd.yaml",
    "content": "---\ncinder:\n  - |\n    Allow users to add additional sources to the Projected Volume that is mounted\n    at /etc/cinder/cinder.conf.d/ so they may more easily override configs or provide\n    additional configs for the various services in the chart.\n...\n"
  },
  {
    "path": "releasenotes/notes/cinder-b605e2bc57b6d49f.yaml",
    "content": "---\ncinder:\n  - |\n    Update Ceph to Tentacle 20.2.0 and replaced image sources from docker.io/openstackhelm with quay.io/airshipit\n...\n"
  },
  {
    "path": "releasenotes/notes/cinder-ddd3bb79dff72ba6.yaml",
    "content": "---\ncinder:\n  - |\n    Add missing priority class and runtime class definition for cinder_db_purge\n...\n"
  },
  {
    "path": "releasenotes/notes/cinder-ded5ec20ef58ac93.yaml",
    "content": "---\ncinder:\n  - |\n    Split out the OpenStack service account definitions from cinder.conf and into\n    config snippets which are loaded at /etc/cinder/cinder.d/, which is automatically\n    loaded by OSLO when loading the main cinder.conf. This makes it easier for users\n    to use the regular config generation while supplying credentials out of band.\n...\n"
  },
  {
    "path": "releasenotes/notes/cinder-f177532ecd78dcec.yaml",
    "content": "---\nfixes:\n  - |\n    Some backends of cinder will write some temp data into the state_path\n    so it should be something available to be written to for the pod.\n...\n"
  },
  {
    "path": "releasenotes/notes/cinder.yaml",
    "content": "---\ncinder:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Support service tokens to prevent long-running job failures\n  - 0.1.3 Support of external ceph backend\n  - 0.1.4 Enable iscsi to work correctly in cinder volume\n  - 0.1.5 Resolves mount issue with termination-log\n  - 0.1.6 Enable volume backup for iSCSI based volumes\n  - 0.1.7 Change Issuer to ClusterIssuer\n  - 0.1.8 Revert - Change Issuer to ClusterIssuer\n  - 0.1.9 Use HostToContainer mount propagation\n  - 0.1.10 Change Issuer to ClusterIssuer\n  - 0.1.11 Update RBAC apiVersion from /v1beta1 to /v1\n  - 0.1.12 Update volume type creation bootstrap logic\n  - 0.1.13 Add NFS cinder backup override\n  - 0.1.14 Add Multipathd support for ISCSI backed volumes\n  - 0.1.15 Fix the problem in hostNetwork mode\n  - 0.2.0 Remove support for releases before T\n  - 0.2.1 Fix the ceph pool creations for openstack services\n  - 0.2.2 Adding rabbitmq TLS logic\n  - 0.2.3 Mount rabbitmq TLS secret\n  - 0.2.4 Add Ussuri release support\n  - 0.2.5 Add volume QoS support\n  - 0.2.6 Added helm.sh/hook with value of post-install and post-upgrade\n  - 0.2.7 Add Victoria and Wallaby releases support\n  - 0.2.8 Add logic to bootstrap to handle upgrade timing issue\n  - 0.2.9 Mount rabbitmq TLS secret for audit usage cronjob\n  - 0.2.10 Helm 3 - Fix Job Labels\n  - 0.2.11 Update htk requirements repo\n  - 0.2.12 Remove cinder v1/v2 defaults\n  - 0.2.13 Upgrade default images to ussuri\n  - 0.2.14 Fix notifications\n  - 0.2.15 Remove glance registry\n  - 0.2.16 Enable taint toleration for Openstack services\n  - 0.2.17 Remove unsupported values overrides\n  - 0.2.18 Add helm hook in bootstrap job\n  - 0.2.19 Add volume types visibility (public/private)\n  - 0.2.20 Allow cinder v1/v2 endpoint creation if needed\n  - 0.2.21 Migrated CronJob resource to batch/v1 API version & PodDisruptionBudget to policy/v1\n  - 0.2.22 Add Xena and Yoga values overrides\n  - 0.2.23 Added OCI registry authentication\n  - 0.2.24 Fix conditional check for cinder.utils.has_ceph_backend template\n  - 0.2.25 Remove volumes unrelated with ceph backend from conditional volume list in cinder-volume deployment\n  - 0.2.26 Distinguish between port number of internal endpoint and binding port number\n  - 0.2.27 Support TLS endpoints\n  - 0.2.28 Use HTTP probe instead of TCP probe\n  - 0.2.29 Add SYS_ADMIN capability in cinder-volume\n  - 0.2.30 Specify a existing configmap name for external ceph configuration\n  - 0.2.31 Remove fixed node name from default values and add service cleaner cronjob\n  - 0.2.32 Revert \"Remove fixed node name from default values and add service cleaner cronjob\"\n  - 0.3.0 Remove support for Train and Ussuri\n  - 0.3.1 Change ceph-config-helper image tag\n  - 0.3.2 Remove default policy rules\n  - 0.3.3 Fix for creation endpoins and services when v1/v2 are disabled\n  - 0.3.4 Fix Helm hooks for storage bootstrap jobs\n  - 0.3.5 Add Nova endpoint details to support online volume resize\n  - 0.3.6 Fix ceph keyring placement for uppercased backends\n  - 0.3.7 Allow Ceph pools to use 1x replication\n  - 0.3.8 Update all Ceph images to Focal\n  - 0.3.9 Replace node-role.kubernetes.io/master with control-plane\n  - 0.3.10 Define service_type in keystone_authtoken to support application credentials with access rules\n  - 0.3.11 Add Zed overrides\n  - 0.3.12 Add 2023.1 overrides\n  - 0.3.13 Use service tokens\n  - 0.3.14 Add Ubuntu Jammy overrides\n  - 0.3.15 Add 2023.2 Ubuntu Jammy overrides\n  - 0.3.16 Update Ceph images to Jammy and Reef 18.2.1\n  - 0.3.17 Use uWSGI for API service\n  - 0.3.18 Enable custom annotations for Openstack pods\n  - 0.3.19 Add 2024.1 overrides\n  - 0.3.20 Add readiness probe initial delay\n  - 0.3.21 Enable custom annotations for Openstack secrets\n  - 0.3.22 Update images used by default\n  - 0.3.23 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.3.24 Fix volume type create to allow encrypt volume type\n  - 0.3.25 Add 2024.2 Ubuntu Jammy overrides\n  - 0.3.26 Mount /run/cryptsetup in cinder-volume container\n  - 0.3.27 Add support for using a tmpfs for cinder image conversion\n  - 0.3.28 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/cloudkitty-a95de06fbfeac965.yaml",
    "content": "---\ncloudkitty:\n  - |\n    Allow users to add additional sources to the Projected Volume that is mounted\n    at /etc/cloudkitty/cloudkitty.conf.d/ so they may more easily override configs or provide\n    additional configs for the various services in the chart.\n...\n"
  },
  {
    "path": "releasenotes/notes/cloudkitty-d61bea096f10b731.yaml",
    "content": "---\ncloudkitty:\n  - |\n    Add support for the Cloudkitty rating service to define how resource usage\n    (compute, storage, network, etc) should be priced, then generate\n    cost reports based on usage data collected from various sources.\n  - Removed unnecessary code in pod spec, added metrics.yml to cloudkitty-api container\nfeatures:\n  - |\n    Added Cloudkitty Helm chart with support for\n    - Cloudkitty API and Processor deployment and config.\n    - Customize cloudkitty metrics scraping from gnocchi/ceilometer and prometheus.\n    - Provides APIs and a Horizon (dashboard) plugin to generate reports for projects/tenants.\nissues:\n  - |\n    Currently, there is no published support for Skyline dashboard integration\n    Requires gnocchi to be installed and running within the local cluster, or cloudkitty processor\n    pods will throw errors.\n...\n"
  },
  {
    "path": "releasenotes/notes/cloudkitty.yaml",
    "content": "---\ncloudkitty:\n  - 0.1.0 Initial Chart\n...\n"
  },
  {
    "path": "releasenotes/notes/common-695408be564c5d44.yaml",
    "content": "---\nfeatures:\n  - |\n    Update default ingress classes and annotations for charts to make them implementation\n    agnostic. They used to be nginx specific because we always used ingress-nginx as the most\n    common choice. Ingress-nginx is deprecated and will become unmaintained in Feb/2026.\n\n    Now for all test jobs we use HAProxy Ingress as the default implementation. However any\n    other implementation can be used as far as it suppports annotations similar to\n    'nginx.ingress.kubernetes.io/rewrite-target' or 'haproxy.org/path-rewrite' which are\n    used in many OpenStack-Helm charts.\n...\n"
  },
  {
    "path": "releasenotes/notes/common-76e452ae14eb3707.yaml",
    "content": "---\nfeatures:\n  - |\n    Update apparmor values to use security_context instead of annotations.\n...\n"
  },
  {
    "path": "releasenotes/notes/common-8730c7058550f934.yaml",
    "content": "---\nfeatures:\n  - |\n    Added an extraObjects list to every chart to allow for adding extra\n    Kubernetes objects along side the chart without modifying the chart.\n    It has the added benefit of using Helm's built in templating so\n    you are able to utilize other values supplied to the chart.\n...\n"
  },
  {
    "path": "releasenotes/notes/common-d6d7b93fcc7296e9.yaml",
    "content": "---\nfeatures:\n  - |\n    Added emptyDir mounted at /var/lock and set OSLO_LOCK_PATH env var to\n    this path for oslo.concurrency support in all OpenStack services.\n...\n"
  },
  {
    "path": "releasenotes/notes/common-eb7338a63d83ad95.yaml",
    "content": "---\nupgrade:\n  - |\n    Support for Helm 2 has been removed by removal of the helm3_hook value across all charts.\n    It is no longer possible to set helm3_hook to False to support Helm 2.\n...\n"
  },
  {
    "path": "releasenotes/notes/common-f19dec4799b18756.yaml",
    "content": "---\nfeatures:\n  - |\n    Add support for runtimeClassName and priorityClassName\n...\n"
  },
  {
    "path": "releasenotes/notes/cyborg.yaml",
    "content": "---\ncyborg:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Migrated PodDisruptionBudget resource to policy/v1 API version\n  - 0.1.2 Added OCI registry authentication\n  - 0.1.3 Define service_type in keystone_authtoken to support application credentials with access rules\n  - 0.1.4 Enable custom annotations for Openstack pods\n  - 0.1.5 Enable custom annotations for Openstack secrets\n  - 0.1.6 Update images used by default\n  - 0.1.7 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.8 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/designate-9ed4257ab657b224.yaml",
    "content": "---\ndesignate:\n  - |\n    Add a periodic job to clean Designate services which not report\n    it's heartbeat within two heartbeat interval cycle.\n...\n"
  },
  {
    "path": "releasenotes/notes/designate-bc18055009645160.yaml",
    "content": "---\ndesignate:\n  - |\n    Allow users to add additional sources to the Projected Volume that is mounted\n    at /etc/designate/designate.conf.d/ so they may more easily override configs or provide\n    additional configs for the various services in the chart.\n...\n"
  },
  {
    "path": "releasenotes/notes/designate.yaml",
    "content": "---\ndesignate:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Added post-install and post-upgrade helm hooks on Jobs\n  - 0.2.0 Remove support for releases before T\n  - 0.2.1 Use policies in yaml format\n  - 0.2.2 Update htk requirements repo\n  - 0.2.3 Fix extra volume mounts\n  - 0.2.4 Update default image values to Wallaby\n  - 0.2.5 Migrated PodDisruptionBudget resource to policy/v1 API version\n  - 0.2.6 Added OCI registry authentication\n  - 0.2.7 Use HTTP probe instead of TCP probe\n  - 0.2.8 Remove default policy rules\n  - 0.2.9 Define service_type in keystone_authtoken to support application credentials with access rules\n  - 0.2.10 Uses uWSGI for API service\n  - 0.2.11 Enable custom annotations for Openstack pods\n  - 0.2.12 Enable custom annotations for Openstack secrets\n  - 0.2.13 Update images used by default\n  - 0.2.14 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.2.15 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/elastic-apm-server.yaml",
    "content": "---\nelastic-apm-server:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Use full image ref for docker official images\n  - 0.1.3 Update htk requirements\n  - 0.1.4 Added OCI registry authentication\n  - 0.1.5 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.6 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/elastic-filebeat.yaml",
    "content": "---\nelastic-filebeat:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Use full image ref for docker official images\n  - 0.1.3 Update htk requirements\n  - 0.1.4 Added OCI registry authentication\n  - 0.1.5 Replace node-role.kubernetes.io/master with control-plane\n  - 0.1.6 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.7 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/elastic-metricbeat.yaml",
    "content": "---\nelastic-metricbeat:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Update RBAC apiVersion from /v1beta1 to /v1\n  - 0.1.3 Use full image ref for docker official images\n  - 0.1.4 Update htk requirements\n  - 0.1.5 Added OCI registry authentication\n  - 0.1.6 Replace node-role.kubernetes.io/master with control-plane\n  - 0.1.7 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.8 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/elastic-packetbeat.yaml",
    "content": "---\nelastic-packetbeat:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Use full image ref for docker official images\n  - 0.1.3 Update htk requirements\n  - 0.1.4 Added OCI registry authentication\n  - 0.1.5 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.6 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/elasticseach-625bc83028513f08.yaml",
    "content": "---\nelasticsearch:\n  - |\n    Properly configure RBAC for create-elasticsearch-templates\n    and verify-repositories service accounts. This ensures they have\n    the necessary permissions to access ObjectBucket cluster resources.\n...\n"
  },
  {
    "path": "releasenotes/notes/elasticsearch-127e34013b70451d.yaml",
    "content": "---\nelasticsearch:\n  - Upgrade to the latest v8.18.1\n  - Replace the elasticsearch_templates image with a\n    lighweight upstream image that includes yq and jq\n  - Switch to the upstream image, as S3 repository\n    support is natively integrated starting from v8.x\n    (https://www.elastic.co/guide/en/elasticsearch/plugins/8.0/repository-s3.html)\n    and elasticsearch_templates is replaced to use jq\n...\n"
  },
  {
    "path": "releasenotes/notes/elasticsearch-1fb9cb9d0b6169a7.yaml",
    "content": "---\nelasticsearch:\n  - |\n    Update Ceph to Tentacle 20.2.0 and replaced image sources from docker.io/openstackhelm with quay.io/airshipit\n...\n"
  },
  {
    "path": "releasenotes/notes/elasticsearch-4a005ef3cec5f170.yaml",
    "content": "---\nelasticsearch:\n  - |\n    Update Ceph to Tentacle 20.2.1\n...\n"
  },
  {
    "path": "releasenotes/notes/elasticsearch-653d4b77cf26c277.yaml",
    "content": "---\nelasticsearch:\n  - Upgrade the Prometheus elasticsearch-exporter to the latest v1.9.0\n  - Rename the slm flag according to the changelog\n    https://github.com/prometheus-community/elasticsearch_exporter/releases/tag/v1.9.0\n...\n"
  },
  {
    "path": "releasenotes/notes/elasticsearch-ba314935c85c3b25.yaml",
    "content": "---\nelasticsearch:\n  - |\n    Elasticsearch job responsible for creation of s3 user and bucket\n    required a secret radosgw-s3-admin-creds to be created,\n    but its data wasn't used. Getting rid of this.\n...\n"
  },
  {
    "path": "releasenotes/notes/elasticsearch-baf978b047efc111.yaml",
    "content": "---\nelasticsearch:\n  - Upgrade to v8.19.9\n  - Use absolute paths for log files due to upstream changes\n    (https://github.com/elastic/elasticsearch/commit/6876b8cf05e8a5ae416b5c7394a9c887c8976cf1)\n...\n"
  },
  {
    "path": "releasenotes/notes/elasticsearch.yaml",
    "content": "---\nelasticsearch:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Update to 7.6.2 image\n  - 0.1.3 Add elasticsearch snapshot policy template for SLM\n  - 0.1.4 Add elasticsearch ILM functionality\n  - 0.1.5 Make templates job more generic\n  - 0.1.6 Fix elasticsearch-master rendering error\n  - 0.1.7 Pin Java options to specific versions\n  - 0.1.8 Disable Curator in Gate & Chart Defaults\n  - 0.2.0 Add more S3 configuration options\n  - 0.2.1 Make templates job more robust & allow overrides\n  - 0.2.2 Update the ES curator config to {}\n  - 0.2.3 Add configurable backoffLimit to templates job\n  - 0.2.4 Update helm-test script\n  - 0.2.5 Enable TLS with Kibana\n  - 0.2.6 Enable TLS path between nodes in cluster and TLS path between ceph-rgw\n  - 0.2.7 Get connection option from values.yaml\n  - 0.2.8 Use full image ref for docker official images\n  - 0.2.9 Removed repo verification check from helm-test\n  - 0.2.10 Enable TLS path between Prometheus-elasticsearch-exporter and Elasticsearch\n  - 0.2.11 Enable TLS path between Curator and Elasticsearch\n  - 0.2.12 Helm 3 - Fix Job labels\n  - 0.2.13 Update htk requirements\n  - 0.2.14 Fix cronjob rendering\n  - 0.2.15 Fix elasticsearch-data shutdown\n  - 0.2.16 Use python3 for helm tests when possible\n  - 0.2.17 Annotate ES master/data sts with S3 secret hash\n  - 0.2.18 Update default image value to Wallaby\n  - 0.2.19 Migrated CronJob resource to batch/v1 API version\n  - 0.2.20 Set default python for helm test\n  - 0.2.21 Added OCI registry authentication\n  - 0.2.22 Update all Ceph images to Focal\n  - 0.2.23 Add configurable liveness probe for elasticsearch client\n  - 0.2.24 Update Ceph to 17.2.6\n  - 0.2.25 Update ElasticSearch to 8.9.0\n  - 0.2.26 Add 2023.1 Ubuntu Focal overrides\n  - 0.2.27 Update Rook to 1.12.5 and Ceph to 18.2.0\n  - 0.2.28 Utilize bucket claim CRD when using with Rook\n  - 0.2.29 Make es curator path configurable\n  - 0.2.30 Update curator for es v8\n  - 0.3.0 Update elasticsearch_exporter to v1.7.0\n  - 0.3.1 Update Ceph images to Jammy and Reef 18.2.1\n  - 0.3.2 Update Ceph images to patched 18.2.2 and restore debian-reef repo\n  - 0.3.3 Update es curator to 8.0.10\n  - 0.3.4 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.3.5 Remove gateway node role\n  - 0.3.6 Add 2024.1 Ubuntu Jammy overrides\n  - 0.3.7 Add 2024.2 overrides\n  - 0.3.8 Remove use of python in helm tests\n  - 0.3.9 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/etcd.yaml",
    "content": "---\netcd:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Update to container image repo k8s.gcr.io\n  - 0.1.3 Use full image ref for docker official images\n  - 0.1.4 Update htk requirements\n  - 0.1.5 Added OCI registry authentication\n  - 0.1.6 Update kubernetes registry to registry.k8s.io\n  - 0.1.7 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.8 Switch etcd to staetefulset\n  - 0.1.9 Adding cronjob with etcd compaction\n  - 0.1.10 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/fluentbit.yaml",
    "content": "---\nfluentbit:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Use full image ref for docker official images\n  - 0.1.3 Update htk requirements\n  - 0.1.4 Added OCI registry authentication\n  - 0.1.5 Replace node-role.kubernetes.io/master with control-plane\n  - 0.1.6 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.7 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/fluentd.yaml",
    "content": "---\nfluentd:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Add Configurable Readiness and Liveness Probes\n  - 0.1.3 Enable TLS path for output to Elasticsearch\n  - 0.1.4 Use full image ref for docker official images\n  - 0.1.5 Kafka brokers defined as a list with port \"kafka1:9092,kafka2:9020,kafka3:9092\"\n  - 0.1.6 Update htk requirements\n  - 0.1.7 Update default image values to Wallaby\n  - 0.1.8 Added OCI registry authentication\n  - 0.1.9 Set sticky bit for tmp\n  - 0.1.10 Add 2023.1 Ubuntu Focal overrides\n  - 0.1.11 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.12 Add 2024.1 Ubuntu Jammy overrides\n  - 0.1.13 Add 2024.2 overrides\n  - 0.1.14 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/freezer-3272cc6ed891f5a3.yaml",
    "content": "---\nfreezer: >\n    Added support for Freezer disaster recovery and backup-as-a-service component for OpenStack. It provides a way to back up various resources, such as virtual machine instances, databases, and file systems. It will allow users to schedule backups, restore data, and manage the lifecycle of their backups to ensure data protection and business continuity within an OpenStack cloud.\nfeatures:\n  - |\n    Introduced Freezer Helm chart with support for:\n    - Freezer API, Scheduler, Agent service deployment and configuration\n    - Integration with existing OpenStack identity services\n    - Support for custom Blazar configuration through values.yaml\n...\n"
  },
  {
    "path": "releasenotes/notes/glance-1245a71c1694b79c.yaml",
    "content": "---\nglance:\n  - |\n    Add support for etcSources to db-sync job.\n...\n"
  },
  {
    "path": "releasenotes/notes/glance-79dad0da1e27df42.yaml",
    "content": "---\nglance:\n  - |\n    Add support for etcSources to glance-metadefs-load job.\n...\n"
  },
  {
    "path": "releasenotes/notes/glance-9043d8c0a8119256.yaml",
    "content": "---\nglance:\n  - |\n    Update Ceph to Tentacle 20.2.0 and replaced image sources from docker.io/openstackhelm with quay.io/airshipit\n...\n"
  },
  {
    "path": "releasenotes/notes/glance-cb814fab2bccc95e.yaml",
    "content": "---\nglance:\n  - |\n    add raise_for_status method call to the livenessProbe command to properly\n    raise an error when return code is 4xx (client error) or 5xx (server error)\n...\n"
  },
  {
    "path": "releasenotes/notes/glance-cbd61a1ae1e902b5.yaml",
    "content": "---\nglance:\n  - |\n    Allow users to add additional sources to the Projected Volume that is mounted\n    at /etc/glance/glance.conf.d/ so they may more easily override configs or provide\n    additional configs for the various services in the chart.\n...\n"
  },
  {
    "path": "releasenotes/notes/glance-e528d9f2473763a1.yaml",
    "content": "---\nfixes:\n  - |\n    Fix missing job annotation support from values.yaml on the clean, metadefs-load,\n    and storage-init jobs.\n...\n"
  },
  {
    "path": "releasenotes/notes/glance.yaml",
    "content": "---\nglance:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency to >= 0.1.0\n  - 0.1.2 Change issuer to clusterissuer\n  - 0.1.3 Revert - Change issuer to clusterissuer\n  - 0.1.4 Update RBAC apiVersion from /v1beta1 to /v1\n  - 0.1.5 Change Issuer to ClusterIssuer\n  - 0.1.6 Update glance default policy values\n  - 0.1.7 Update storage init script with cacert\n  - 0.1.8 Update glance default policy values\n  - 0.2.0 Remove support for releases before T\n  - 0.2.1 Fix the ceph pool creations for openstack services\n  - 0.2.2 Adding rabbitmq TLS logic\n  - 0.2.3 Use policies in yaml format\n  - 0.2.4 Mount rabbitmq TLS secret\n  - 0.2.5 Add Ussuri release support\n  - 0.2.6 Add Victoria and Wallaby releases support\n  - 0.2.7 Added helm.sh/hook for the jobs\n  - 0.2.8 Helm 3 - Fix Job Labels\n  - 0.2.9 Helm 3 - Fix More Job Labels\n  - 0.2.10 Update htk requirements repo\n  - 0.3.0 Remove glance registry\n  - 0.3.1 Enable taint toleration for Openstack services\n  - 0.3.2 Decrease terminationGracePeriodSeconds on glance-api\n  - 0.3.3 Update naming for subchart compatibility\n  - 0.3.4 Change image default version to wallaby\n  - 0.3.5 Migrated PodDisruptionBudget resource to policy/v1 API version\n  - 0.3.6 Add Xena and Yoga values overrides\n  - 0.3.7 Fix glance-etc template changing due to comment and whitespace between install and first upgrade\n  - 0.3.8 Added OCI registry authentication\n  - 0.3.9 Support TLS endpoints\n  - 0.3.10 Distinguish between port number of internal endpoint and binding port number\n  - 0.3.11 Use HTTP probe instead of TCP probe\n  - 0.3.12 Add support for using Cinder as backend\n  - 0.4.0 Remove support for Train and Ussuri\n  - 0.4.1 Remove default policy rules\n  - 0.4.2 Allow Ceph pools to use 1x replication\n  - 0.4.3 Update all Ceph images to Focal\n  - 0.4.4 Replace node-role.kubernetes.io/master with control-plane\n  - 0.4.5 Fix wrong configFile path in glance bootstrap container.\n  - 0.4.6 Define service_type in keystone_authtoken to support application credentials with access rules\n  - 0.4.7 Add Zed overrides\n  - 0.4.8 Add 2023.1 overrides\n  - 0.4.9 Use service tokens\n  - 0.4.10 Add exec probe timeouts\n  - 0.4.11 Bring liveness/readiness params out to values.yaml\n  - 0.4.12 Add flag `keep_pvc` to allows set helm resource-policy for glance-images PVC to keep.\n  - 0.4.13 Add Ubuntu Jammy overrides\n  - 0.4.14 Bump Cirros version to 0.6.2\n  - 0.4.15 Add 2023.2 Ubuntu Jammy overrides\n  - 0.4.16 Use --region option to prevent OS_SWIFT_ENDPOINT_PREFIX is broken in storage-init.sh\n  - 0.4.17 Update Ceph images to Jammy and Reef 18.2.1\n  - 0.4.18 Enable custom annotations for Openstack pods\n  - 0.4.19 Add 2024.1 overrides\n  - 0.4.20 Add readiness probe initial delay\n  - 0.4.21 Use uWSGI\n  - 0.4.22 Enable custom annotations for Openstack secrets\n  - 0.4.23 Update images used by default\n  - 0.4.24 Do not attach backend pvc to storage init pod\n  - 0.4.25 Allow customisation of pvc storage accessMode so we can run multiple api pods\n  - 0.4.26 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.5.0 Remove deprecated config options `stores` and `default_store`\n  - 0.5.1 Add 2024.2 Ubuntu Jammy overrides\n  - 0.5.2 Fix HTTP healthcheck URLs for Kubernetes probes\n  - 0.5.3 Add override for downloading Ubuntu image\n  - 0.5.4 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/gnocchi-37ba93d527c7ba75.yaml",
    "content": "---\ngnocchi:\n  - |\n    Update Ceph to Tentacle 20.2.0 and replaced image sources from docker.io/openstackhelm with quay.io/airshipit\n...\n"
  },
  {
    "path": "releasenotes/notes/gnocchi-71bec40a3416cb8a.yaml",
    "content": "---\ngnocchi:\n  - |\n    Update Ceph to Tentacle 20.2.1\n...\n"
  },
  {
    "path": "releasenotes/notes/gnocchi.yaml",
    "content": "---\ngnocchi:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Use full image ref for docker official images\n  - 0.1.3 Helm 3 - Fix Job labels\n  - 0.1.4 Update htk requirements\n  - 0.1.5 Enable taint toleration for Openstack services jobs\n  - 0.1.6 Update all Ceph images to Focal\n  - 0.1.7 Replace node-role.kubernetes.io/master with control-plane\n  - 0.1.8 Migrated pdb resource to policy/v1 API version\n  - 0.1.9 Migrated CronJob resource to batch/v1 API version\n  - 0.1.10 Update Ceph to 17.2.6\n  - 0.1.11 Update Rook to 1.12.5 and Ceph to 18.2.0\n  - 0.1.12 Update Ceph images to Jammy and Reef 18.2.1\n  - 0.1.13 Bugfix Ceph user creation for RBD access\n  - 0.1.14 Update Ceph images to patched 18.2.2 and restore debian-reef repo\n  - 0.1.15 Add 2023.2 Ubuntu Jammy overrides\n  - 0.1.16 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.17 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/grafana-b3fac6a311d115a6.yaml",
    "content": "---\ngrafana:\n  - Adjust Python code for SQLAlchemy 2.0 compatibility\n...\n"
  },
  {
    "path": "releasenotes/notes/grafana-d1a2049e057fe878.yaml",
    "content": "---\ngrafana:\n  - Upgrade Grafana to the latest v12.4.2.\n  - Upgrade grafana-image-renderer to v5.7.3.\n  - Migrate deprecated parameters and add new\n    ones in Grafana configuration.\n  - Fix Grafana helm Selenium test - wait for\n    login form, not page title.\n...\n"
  },
  {
    "path": "releasenotes/notes/grafana.yaml",
    "content": "---\ngrafana:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Update Grafana version\n  - 0.1.3 Provision any dashboard as homepage\n  - 0.1.4 Enable TLS for Grafana\n  - 0.1.5 Enable TLS between Grafana and Prometheus\n  - 0.1.6 Enable TLS for Grafana ingress path\n  - 0.1.7 Update Grafana version and Selenium script\n  - 0.1.8 Use full image ref for docker official images\n  - 0.1.9 Add Alertmanager dashboard to Grafana\n  - 0.1.10 Helm 3 - Fix Job labels\n  - 0.1.11 Update htk requirements\n  - 0.1.12 Add iDRAC dashboard to Grafana\n  - 0.1.13 Update prometheus metric name\n  - 0.1.14 Add run migrator job\n  - 0.1.15 Added OCI registry authentication\n  - 0.1.16 Grafana 8.5.10 with unified alerting\n  - 0.1.17 Fix uid for the user grafana\n  - 0.1.18 Migrator job is now mariadb-fail-proof\n  - 0.1.19 Update grafana to 9.2.10\n  - 0.1.20 Upgrade osh-selenium image to latest-ubuntu_focal\n  - 0.1.21 Fix run migrator job deployment condition\n  - 0.1.22 Make selenium v4 syntax optional\n  - 0.1.23 Modified selenium test for compatibility\n  - 0.1.24 Add image rendering sidecar\n  - 0.1.25 Add value for rendering sidecar feature\n  - 0.1.26 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.27 Update default images tags. Add 2024.1-ubuntu_jammy overrides.\n  - 0.1.28 Upgrade osh-selenium image to ubuntu_jammy\n  - 0.1.29 Add 2024.2 overrides\n  - 0.1.30 Update chart helm test environment variables\n  - 0.1.31 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/heat-5e861ec1ee8e2784.yaml",
    "content": "---\nheat:\n  - |\n    Create heat and heat_trustee service users in a single job.\n    This is to align with the helm-toolkit change regarding\n    Keystone user creation job.\n...\n"
  },
  {
    "path": "releasenotes/notes/heat-7222563449ea848e.yaml",
    "content": "---\nheat:\n  - |\n    Allow users to add additional sources to the Projected Volume that is mounted\n    at /etc/heat/heat.conf.d/ so they may more easily override configs or provide\n    additional configs for the various services in the chart.\n...\n"
  },
  {
    "path": "releasenotes/notes/heat-a584fab629e1c4fc.yaml",
    "content": "---\nheat:\n  - |\n    Add support for etcSources to db-sync job.\n...\n"
  },
  {
    "path": "releasenotes/notes/heat.yaml",
    "content": "---\nheat:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Remove tls values override for clients_heat\n  - 0.1.3 Change Issuer to ClusterIssuer\n  - 0.1.4 Revert - Change Issuer to ClusterIssuer\n  - 0.1.5 Change Issuer to ClusterIssuer\n  - 0.2.0 Remove support for releases before T\n  - 0.2.1 Adding rabbitmq TLS logic\n  - 0.2.2 Use policies in yaml format\n  - 0.2.3 Mount rabbitmq TLS secret\n  - 0.2.4 Add Ussuri release support\n  - 0.2.5 Add Victoria and Wallaby releases support\n  - 0.2.6 Added post-install and post-upgrade helm-hook for jobs\n  - 0.2.7 Helm 3 - Fix Job Labels\n  - 0.2.8 Update htk requirements repo\n  - 0.2.9 Enable taint toleration for Openstack services\n  - 0.2.10 Updated naming for subchart compatibility\n  - 0.2.11 Remove old releases values override in heat\n  - 0.2.12 Migrated CronJob resource to batch/v1 API version & PodDisruptionBudget to policy/v1\n  - 0.2.13 Add Xena and Yoga values overrides\n  - 0.2.14 Added OCI registry authentication\n  - 0.2.15 Distinguish between port number of internal endpoint and binding port number\n  - 0.2.16 Support TLS endpoints\n  - 0.2.17 Use HTTP probe instead of TCP probe\n  - 0.2.18 Change hook weight for bootstrap job\n  - 0.3.0 Remove support for Train and Ussuri\n  - 0.3.1 Remove default policy rules\n  - 0.3.2 Replace node-role.kubernetes.io/master with control-plane\n  - 0.3.3 Define service_type in keystone_authtoken to support application credentials with access rules\n  - 0.3.4 Add Zed overrides\n  - 0.3.5 Add 2023.1 overrides\n  - 0.3.6 Add Ubuntu Jammy overrides\n  - 0.3.7 Add 2023.2 Ubuntu Jammy overrides\n  - 0.3.8 Fixed annotation indentation for jobs\n  - 0.3.9 Uses uWSGI for API services\n  - 0.3.10 Enable custom annotations for Openstack pods\n  - 0.3.11 Add 2024.1 overrides\n  - 0.3.12 Add readiness probe initial delay\n  - 0.3.13 Enable custom annotations for Openstack secrets\n  - 0.3.14 Update images used by default\n  - 0.3.15 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.3.16 Add 2024.2 Ubuntu Jammy overrides\n  - 0.3.17 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/helm-toolkit-04996581655d9952.yaml",
    "content": "---\nhelm-toolkit:\n  - |\n    The 'service' domain and project are not scoped to regions in most multi-region\n    installations and will instead be shared so it does not make sense to include\n    the region name in the description for those.\n...\n"
  },
  {
    "path": "releasenotes/notes/helm-toolkit-1ac16e62f779d907.yaml",
    "content": "---\nhelm-toolkit:\n  - |\n    Add support for etcSources to db-sync job.\n...\n"
  },
  {
    "path": "releasenotes/notes/helm-toolkit-49593d58783c3a97.yaml",
    "content": "---\nhelm-toolkit:\n  - |\n    Add priorityClassName and runtimeClassName snippets\n...\n"
  },
  {
    "path": "releasenotes/notes/helm-toolkit-5fa68b35be3378b3.yaml",
    "content": "---\nhelm-toolkit:\n  - |\n    Removing non-used script which allows to create bucket using admin user.\n    Actually any user can do this and there is better script which is\n    utilized by elasticsearch chart:\n    elasticsearch/templates/bin/_create_s3_buckets.sh.tpl\n    The only requirement is - to create the user.\n    Also, removing S3_ADMIN_<> env vars from job manifests\n    (see helm-toolkit.snippets.rgw_s3_admin_env_vars)\n    because those vars are not used by actual scripts.\n    We now use ceph.conf and keyring to create a user.\n    ceph.conf and keyring can be provisioned by either\n    ceph chart or ceph-adapter-rook chart.\n...\n"
  },
  {
    "path": "releasenotes/notes/helm-toolkit-81cf091a301877ff.yaml",
    "content": "---\nfixes:\n  - |\n    Added new snippet to include failover OpenRC environment variables in backup cron jobs for\n    MariaDB and PostgreSQL charts.\n...\n"
  },
  {
    "path": "releasenotes/notes/helm-toolkit-9618f6c4379c13bc.yaml",
    "content": "---\nhelm-toolkit:\n  - |\n    If the specific job that is static dependency has its manifest disabled\n    the user needs to have a long diff to replace out just that one job from\n    being depended on. This makes it automatically disable waiting on those\n    jobs if they've been disabled to make the dependency management easier\n    for users.\n...\n"
  },
  {
    "path": "releasenotes/notes/helm-toolkit-a2810391532bd64a.yaml",
    "content": "---\nhelm-toolkit:\n  - |\n    Modify job_ks_user template to be able to create multiple Keystone users\n...\n"
  },
  {
    "path": "releasenotes/notes/helm-toolkit-acb954baa2fe7b2f.yaml",
    "content": "---\nfeatures:\n  - |\n    Add an ability to add more path into ingress rules, and add more ports into service.\n...\n"
  },
  {
    "path": "releasenotes/notes/helm-toolkit-e84e695df114929d.yaml",
    "content": "---\nhelm-toolkit:\n  - |\n    Corrected helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount to\n    cover the case when service account must have access to endpoint in\n    different namespace. In that case Role/Binding are getting created\n    in that endpoint namespace, but now rolebinding will be point to\n    the service account in the original namespace, which is $.Release.Namespace.\n    This was tested with elasticsearch chart where\n    s3.clients.local-rgw-elasticsearch.settings.endpoint was pointing to\n    another namespace. With whose changes job-s3-user was able to detect\n    availability of that enpoint. Before changes init container had this error in the logs:\n    Resolving dependency Service rook-ceph-rgw-default in namespace ceph failed: endpoints \"rook-ceph-rgw-default\" is forbidden: User\n    \"system:serviceaccount:osh-infra:elasticsearch-s3-user\" cannot get resource \"endpoints\" in API group \"\" in the namespace \"ceph\"\n...\n"
  },
  {
    "path": "releasenotes/notes/helm-toolkit-fa49be61648b2d72.yaml",
    "content": "---\nhelm-toolkit:\n  - |\n    Mount volumes requested into the job's pod.\n...\n"
  },
  {
    "path": "releasenotes/notes/helm-toolkit.yaml",
    "content": "---\nhelm-toolkit:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Add extra DNS names to Ingress\n  - 0.1.2 Make database backups work with openstack Train\n  - 0.1.3 Fix ks-user script case matching for domain\n  - 0.1.4 Update ingress tpl in helmtoolkit\n  - 0.1.5 Add capability to delete a backup archive\n  - 0.2.0 Update default Kubernetes API for use with Helm v3\n  - 0.2.1 Change Issuer to ClusterIssuer\n  - 0.2.2 Revert Change Issuer to ClusterIssuer\n  - 0.2.3 Allow openstack service list to retry in event of keystone connection issues\n  - 0.2.4 Added detailed FiXME for ks-service script bug and code changes\n  - 0.2.5 Added logic to support cert-manager versioning\n  - 0.2.6 Add metadata in job templates\n  - 0.2.7 Replace brace expansion with more standardized Posix approach\n  - 0.2.8 Override the expiry of Ingress TLS certificate\n  - 0.2.9 Jobs; put labels only in the template spec\n  - 0.2.10 Add more S3 configuration options\n  - 0.2.11 Revert S3 User & Bucket job scripts to v0.2.9\n  - 0.2.12 Remove hook-delete-policy\n  - 0.2.13 Modify connection args for s3 bucket creation when TLS is enabled\n  - 0.2.14 Remove TLS_OPTION argument from s3 bucket creation job\n  - 0.2.15 Adding TLS rabbitmq logic\n  - 0.2.16 Add manual mode to the created backup file name\n  - 0.2.17 Update db backup/restore retry for sending to remote\n  - 0.2.18 Make Rabbit-init job more robust\n  - 0.2.19 Revoke all privileges for PUBLIC role in postgres dbs\n  - 0.2.20 Modify the template of rbac_role to make secrets accessible\n  - 0.2.21 Fix issue with db backup error return code being eaten\n  - 0.2.22 Add ability to set labels to add to resources\n  - 0.2.23 Helm 3 - Fix Job labels\n  - 0.2.24 Migrate Ingress resources to networking.k8s.io/v1\n  - 0.2.25 Set Security Context to ks-user job\n  - 0.2.26 Revert Set Security Context to ks-user job\n  - 0.2.27 Correct private key size input for Certificates and remove minor version support\n  - 0.2.28 Set Security context to ks-user job at pod and container level\n  - 0.2.29 Enhance mariadb backup\n  - 0.2.30 Add ability to image pull secrets on pods\n  - 0.2.31 Add log strings for alert generation\n  - 0.2.32 Consolidate mon_endpoints discovery\n  - 0.2.33 Remove set -x\n  - 0.2.34 Modify database backup logic to maintain minimum number of backups\n  - 0.2.35 Database B/R improvements\n  - 0.2.36 Enable taint toleration for Openstack services jobs\n  - 0.2.37 Updated chart naming for subchart compatibility\n  - 0.2.38 Minor change to display archive directory with files in sub-directory\n  - 0.2.39 Removed tillerVersion from Chart to pass helm3 linting\n  - 0.2.40 Revert chart naming for subchart compatibility\n  - 0.2.41 Database B/R - archive name parser added\n  - 0.2.42 Database B/R - fix to make script compliant with a retention policy\n  - 0.2.43 Support having a single external ingress controller\n  - 0.2.44 Added OCI registry authentication\n  - 0.2.45 Modify use_external_ingress_controller place in openstack-helm values.yaml\n  - 0.2.46 Fixed for getting kibana ingress value parameters\n  - 0.2.47 Adjusting of kibana ingress value parameters\n  - 0.2.48 Added verify_databases_backup_archives function call to backup process and added remote backup sha256 hash verification\n  - 0.2.49 Moved RabbitMQ Guest Admin removal to init\n  - 0.2.50 Allow tls for external ingress without specifying key and crt\n  - 0.2.51 Added a random delay up to 300 seconds to remote backup upload/download for load spreading purpose\n  - 0.2.52 Decreased random delay to up to 30 seconds and switched remote backup verification protocol to md5\n  - 0.2.53 Update create db user queries\n  - 0.2.54 Fix dependency resolver to ignore non-existing dependencyKey when dependencyMixinParam is a slice\n  - 0.2.55 Updated deprecated IngressClass annotation\n  - 0.2.56 Expose S3 credentials from Rook bucket CRD secret\n  - 0.2.57 Safer file removal\n  - 0.2.58 Backups verification improvements\n  - 0.2.59 Added throttling remote backups\n  - 0.2.60 Change default ingress pathType to Prefix\n  - 0.2.61 Add custom pod annotations snippet\n  - 0.2.62 Add custom secret annotations snippet\n  - 0.2.63 Add custom job annotations snippet and wire it into job templates\n  - 0.2.64 Use custom secret annotations snippet in other secret templates\n  - 0.2.65 Escape special characters in password for DB connection\n  - 0.2.66 Align db scripts with sqlalchemy 2.0\n  - 0.2.67 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.2.68 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.2.69 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.2.70 Decode url-encoded password for rabbit connection\n  - 0.2.71 Add snippet with service parameters\n  - 0.2.72 Add snippet configmap_oslo_policy\n  - 0.2.73 Add ability to get multiple hosts endpoint\n  - 0.2.74 Remove trailing slash in endpoinds\n  - 0.2.75 Add daemonset_overrides_root util\n  - 0.2.76 update tookit to support fqdn alias\n  - 0.2.77 Add recommended kubernetes name label to pods definition\n  - 0.2.78 Fix db-init and db-drop scripts to make them work with sqlalchemy >2.0\n  - 0.2.79 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/horizon-172a4ff3264fc495.yaml",
    "content": "---\nfixes:\n  - |\n    Fix missing job annotation support from values.yaml on the db-sync job.\n...\n"
  },
  {
    "path": "releasenotes/notes/horizon.yaml",
    "content": "---\nhorizon:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 To avoid wrong version check for mysqlclient\n  - 0.1.3 Modify Password validator related settings in Horizon\n  - 0.1.4 Change Issuer to ClusterIssuer\n  - 0.1.5 Revert - Change Issuer to ClusterIssuer\n  - 0.1.6 Change Issuer to ClusterIssuer\n  - 0.1.7 Update glance default policy values\n  - 0.1.8 Implement \"CSRF_COOKIE_HTTPONLY\" option support in horizon\n  - 0.2.0 Remove support for releases before T\n  - 0.2.1 Make python script PEP8 compliant\n  - 0.2.2 Use policies in yaml format\n  - 0.2.3 Add openstack_enable_password_retrieve variable in value\n  - 0.2.4 Fix OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT value\n  - 0.2.5 Add Ussuri release support\n  - 0.2.6 Add Victoria and Wallaby releases support\n  - 0.2.7 Fix OPENSTACK_ENABLE_PASSWORD_RETRIEVE value\n  - 0.2.8 Add default polices\n  - 0.2.9 Removed default policy in chart in favor of default policy in code\n  - 0.2.10 Helm 3 - Fix Job Labels\n  - 0.2.11 Update htk requirements repo\n  - 0.2.12 Support both json and yaml RBAC Policy Format\n  - 0.2.13 Add container infra api version in values\n  - 0.2.14 Add OPENSTACK_ENDPOINT_TYPE value\n  - 0.2.15 Add local_settings.d\n  - 0.2.16 Fix container-infra value\n  - 0.2.17 Add custom logo\n  - 0.2.18 Enable taint toleration for Openstack services\n  - 0.2.19 Remove unsupported value overrides\n  - 0.2.20 Add SHOW_OPENRC_FILE value\n  - 0.2.21 Add helm hook annotations in db-sync and db-init jobs\n  - 0.2.22 Migrated PodDisruptionBudget resource to policy/v1 API version\n  - 0.2.23 Add Xena and Yoga value overrides\n  - 0.2.24 Remove blank lines in logo configmap\n  - 0.2.25 Added OCI registry authentication\n  - 0.2.26 Support SSL identity endpoint\n  - 0.3.0 Remove support for Train and Ussuri\n  - 0.3.1 Fix container infra api version in values\n  - 0.3.2 Update mysql client version to 1.4.0\n  - 0.3.3 Update mysql client version in django.wsgi also\n  - 0.3.4 Add readiness probe timeout\n  - 0.3.5 Replace node-role.kubernetes.io/master with control-plane\n  - 0.3.6 Fix container infra api version parsing\n  - 0.3.7 Update the script to add extra panels\n  - 0.3.8 Fix horizon tolerations\n  - 0.3.9 Add Zed overrides\n  - 0.3.10 Add 2023.1 overrides\n  - 0.3.11 Rollout when logo configmap is changed\n  - 0.3.12 Add Ubuntu Jammy overrides\n  - 0.3.13 Make selenium v4 syntax optional\n  - 0.3.14 Add 2023.2 Ubuntu Jammy overrides\n  - 0.3.15 Update osh-selenium image used by default\n  - 0.3.16 Add support for custom panels\n  - 0.3.17 Set ingress annotation proxy-body-size=300m by default\n  - 0.3.18 Enable custom annotations for Openstack pods\n  - 0.3.19 Add 2024.1 overrides\n  - 0.3.20 Enable custom annotations for Openstack secrets\n  - 0.3.21 Update images used by default\n  - 0.3.22 Align with 2024.1 requirements\n  - 0.3.23 Use global wsgi subinterpreter\n  - 0.3.24 Use base64 values for custom logo\n  - 0.3.25 Implement \"CSRF_TRUSTED_ORIGINS\" option support in horizon\n  - 0.3.26 Fix templating of CSRF_TRUSTED_ORIGINS\n  - 0.3.27 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.3.28 Add WEBSSO_KEYSTONE_URL value\n  - 0.3.29 Add 2024.2 Ubuntu Jammy overrides\n  - 0.3.30 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/increase-default-logging-31db0e9d3e51b429.yaml",
    "content": "---\nother:\n  - |\n    The logging for barbican, cinder, congress, glance, heat, ironic,\n    keystone, magnum, mistral, neutron, nova, and senlin has been increased to\n    log all warnings (and above) to stdout by default.\n...\n"
  },
  {
    "path": "releasenotes/notes/ingress.yaml",
    "content": "---\ningress:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Update to container image repo k8s.gcr.io\n  - 0.2.0 Update default Kubernetes API for use with Helm v3\n  - 0.2.1 Use HostToContainer mountPropagation\n  - 0.2.2 Use full image ref for docker official images\n  - 0.2.3 Uplift ingress to 0.42.0\n  - 0.2.4 Update htk requirements\n  - 0.2.5 Migrate Ingress resources to networking.k8s.io/v1\n  - 0.2.6 Add option to assign VIP as externalIP\n  - 0.2.7 Enable taint toleration for Openstack services jobs\n  - 0.2.8 Uplift ingress to 1.1.3\n  - 0.2.9 Added OCI registry authentication\n  - 0.2.10 Update neutron images to xena release\n  - 0.2.11 Fix resource name in the role\n  - 0.2.12 Uplift ingress to 1.5.1\n  - 0.2.13 Allow setting node_port for the svc\n  - 0.2.14 Replace node-role.kubernetes.io/master with control-plane\n  - 0.2.15 Update kubernetes registry to registry.k8s.io\n  - 0.2.16 Updated deprecated IngressClass annotation\n  - 0.2.17 Fixed controller parameters\n  - 0.2.18 Fixed some additional controller issues\n  - 0.2.19 Uplift ingress controller image to 1.8.2\n...\n"
  },
  {
    "path": "releasenotes/notes/ironic-0035b6286b1c6333.yaml",
    "content": "---\nironic:\n  - |\n    Switched the Ironic API server from the built-in ironic-api command to\n    uWSGI to improve concurrency handling and production readiness. A follow-up fix\n    replaced the wsgi-file directive with the module directive because Ironic\n    does not ship a wsgi_scripts entry point, which prevented the application\n    from loading correctly under uWSGI.\n...\n"
  },
  {
    "path": "releasenotes/notes/ironic-022571f573f6c430.yaml",
    "content": "---\nironic:\n  - |\n    Drop additional access that Ironic conductor no longer needs with the\n    removal of the iSCSI deploy interface. This change went into effect\n    with 2023.2. Remove host mount for /dev, /sys, and /var/run.\n    Disable hostIPC by default.\n...\n"
  },
  {
    "path": "releasenotes/notes/ironic-2b9283c8924f8c63.yaml",
    "content": "---\nironic:\n  - |\n    Set default config value for \"database.max_retries\" to \"-1\".\n...\n"
  },
  {
    "path": "releasenotes/notes/ironic-2fcd7c5ae98b55f4.yaml",
    "content": "---\nironic:\n  - |\n    Drop the retrive-cleaning-network init container as it was only setting a\n    deprecated option and the script it ran gave less feedback to the user than\n    the built in check inside of Ironic. With the future of Ironic standalone\n    networking this option will make even less sense so allow the default\n    behavior of looking up the name in Ironic.\n...\n"
  },
  {
    "path": "releasenotes/notes/ironic-4963b8bfe3c212d0.yaml",
    "content": "---\nironic:\n  - |\n    Add default TLS secret names like the other charts have.\n...\n"
  },
  {
    "path": "releasenotes/notes/ironic-4a1d33f9e4147b79.yaml",
    "content": "---\nironic:\n  - |\n    Add missing ironic authentication config for `[nova]` and `[cinder]`.\n...\n"
  },
  {
    "path": "releasenotes/notes/ironic-82bd78c64b57d2ce.yaml",
    "content": "---\nironic:\n  - |\n    Remove outdated default kolla images and use quay.io/airshipit/ironic:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "releasenotes/notes/ironic-adbba9c6718cc0d6.yaml",
    "content": "---\nironic:\n  - |\n    Add `ironic-dbsync online_data_migrations` to the ironic dbsync template\n    per the ironic upgrade guide:\n    https://docs.openstack.org/ironic/latest/admin/upgrade-guide.html\n...\n"
  },
  {
    "path": "releasenotes/notes/ironic-c0de8abe9970dca0.yaml",
    "content": "---\nironic:\n  - |\n    Allow users to add additional sources to the Projected Volume that is mounted\n    at /etc/ironic/ironic.conf.d/ so they may more easily override configs or provide\n    additional configs for the various services in the chart.\n...\n"
  },
  {
    "path": "releasenotes/notes/ironic.yaml",
    "content": "---\nironic:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Added post-install and post-upgrade helm.sh/hook for jobs\n  - 0.2.0 Remove support for releases before T\n  - 0.2.1 Use policies in yaml format\n  - 0.2.2 Update htk requirements repo\n  - 0.2.3 Enable taint toleration for Openstack services\n  - 0.2.4 Update defaults to W release\n  - 0.2.5 Migrated PodDisruptionBudget resource to policy/v1 API version\n  - 0.2.6 Added OCI registry authentication\n  - 0.2.7 Use HTTP probe instead of TCP probe\n  - 0.2.8 Add helm3 hook supports to allow things like terraform deploys\n  - 0.2.9 Replace node-role.kubernetes.io/master with control-plane\n  - 0.2.10 Add standalone overrides\n  - 0.2.11 Enable custom annotations for Openstack pods\n  - 0.2.12 allow custom annotations on jobs\n  - 0.2.13 Enable custom annotations for Openstack secrets\n  - 0.2.14 Update images used by default\n  - 0.2.15 Allow enabling/disabling of conductor http and pxe containers and overriding their init and runtime scripts\n  - 0.2.16 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.2.17 Allow overriding of hostNetwork and hostIPC for Ironic conductor\n  - 0.2.18 Use service tokens\n  - 0.2.19 Allow extra containers for the conductor\n  - 0.2.20 ensure tempdir is set to a reasonable default\n  - 0.2.21 fix path to ironic.conf for 0.2.20's tempdir setting\n  - 0.2.22 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/keystone-0e6674e1c443cd81.yaml",
    "content": "---\nkeystone:\n  - |\n    Allow users to add additional sources to the Projected Volume that is mounted\n    at /etc/keystone/keystone.conf.d/ so they may more easily override configs or provide\n    additional configs for the various services in the chart.\n...\n"
  },
  {
    "path": "releasenotes/notes/keystone-12efe8927d1a0934.yaml",
    "content": "---\nkeytone:\n  - |\n    Use Keytone API wsgi module directly instead of wsgi script which\n    has been removed.\n...\n"
  },
  {
    "path": "releasenotes/notes/keystone-17cdfeb53f6eb5dd.yaml",
    "content": "---\nkeystone:\n  - |\n    Remove endpoint-update.py from db-sync Job.\n    The script was originally introduced in OSH to work around a\n    limitation where `keystone-manage bootstrap` would not update endpoints\n    once created. That limitation was resolved when upstream keystone added\n    endpoint updates support to bootstrap.\n...\n"
  },
  {
    "path": "releasenotes/notes/keystone-1aaec51f0512e445.yaml",
    "content": "---\nkeystone:\n  - |\n    Provide a WSGI script for Apache to use to start up Keystone since Keystone\n    stopped shipping their own entrypoint. This is done in a way that users can\n    override it and the container has less moving pieces at startup.\n...\n"
  },
  {
    "path": "releasenotes/notes/keystone-56908951efdcc19e.yaml",
    "content": "---\nkeystone:\n  - |\n    Annotate credential and fernet keys secrets with the Helm keep policy.\n    While helm does not clean up hook resources today, their documentation\n    says that it is coming and users should annotate resources they do not\n    expect to be deleted appropriately. Some GitOps tools like ArgoCD\n    implement the cleanup today as part of their Helm support.\n...\n"
  },
  {
    "path": "releasenotes/notes/keystone-5dd1eca70f3382d8.yaml",
    "content": "---\nkeystone:\n  - |\n    Ensure all errors go to the kubenertes logs and not to a file in the\n    container.\n...\n"
  },
  {
    "path": "releasenotes/notes/keystone-9bca09a40cc3dc68.yaml",
    "content": "---\nfixes:\n  - Fix the number of max active fernet keys\n...\n"
  },
  {
    "path": "releasenotes/notes/keystone-dab27a4eeaab96d1.yaml",
    "content": "---\nkeystone:\n  - |\n    Use Keystone native wsgi script keystone/wsgi/api.py\n    instead of the wsgi wrapper script provided by the\n    keystone chart.\n...\n"
  },
  {
    "path": "releasenotes/notes/keystone-e2d6c0f6c85415ab.yaml",
    "content": "---\nkeystone:\n  - |\n    Adds optional settings backoffLimit and activeDeadlineSeconds\n    to the keystone bootstrap job.\n...\n"
  },
  {
    "path": "releasenotes/notes/keystone-fb00add9c87916a3.yaml",
    "content": "---\nkeystone:\n  - |\n    Add support for etcSources to db-sync job.\n...\n"
  },
  {
    "path": "releasenotes/notes/keystone-healthcheck-1f72d266f886e735.yaml",
    "content": "---\nkeystone:\n  - Use oslo.middleware healthcheck endpoint for liveness and readiness\n...\n"
  },
  {
    "path": "releasenotes/notes/keystone.yaml",
    "content": "---\nkeystone:\n  - 0.1.0 Initial Chart\n  - 0.1.1 UPDATE\n  - 0.1.2 UPDATE\n  - 0.1.3 UPDATE\n  - 0.1.4 UPDATE\n  - 0.1.5 Revert clusterissuer change\n  - 0.1.6 Fix typo in subPath entry\n  - 0.1.7 Move rabbit-init to dynamic dependency\n  - 0.1.8 Change Issuer to ClusterIssuer\n  - 0.1.9 Add helm.sh/hook related annotations\n  - 0.1.10 Update RBAC apiVersion from /v1beta1 to /v1\n  - 0.1.11 Remove congress residue\n  - 0.1.12 Add helm hook conditional\n  - 0.1.13 Fix Error - wrong number of args for set\n  - 0.1.14 Remove setup helm hooks\n  - 0.2.0 Remove support for releases before T\n  - 0.2.1 Remove paste ini config settings\n  - 0.2.2 Make python script PEP8 compliant\n  - 0.2.3 Adding rabbitmq TLS logic\n  - 0.2.4 Use policies in yaml format\n  - 0.2.5 Mount rabbitmq TLS secret\n  - 0.2.6 Modify default probe timings\n  - 0.2.7 Add Ussuri release support\n  - 0.2.8 Remove member bootstrap logic\n  - 0.2.9 Add Victoria and Wallaby releases support\n  - 0.2.10 Make internal TLS more robust\n  - 0.2.11 Add missing slash\n  - 0.2.12 Helm 3 - Fix Job Labels\n  - 0.2.13 Helm 3 - Fix more Job Labels\n  - 0.2.14 Update htk requirements repo\n  - 0.2.15 Reduce log chattiness\n  - 0.2.16 Remove extra fsGroup\n  - 0.2.17 Update default image references\n  - 0.2.18 Remove default policy\n  - 0.2.19 Revert Reduce log chattiness\n  - 0.2.20 Enable taint toleration for Openstack services\n  - 0.2.21 Updated naming for subchart compatibility\n  - 0.2.22 Remove older values overrides\n  - 0.2.23 Remove usage of six\n  - 0.2.24 Remove unused admin port in keystone\n  - 0.2.25 Migrated CronJob resource to batch/v1 API version & PodDisruptionBudget to policy/v1\n  - 0.2.26 Add Xena and Yoga values overrides\n  - 0.2.27 Use LOG.warning instead of deprecated LOG.warn\n  - 0.2.28 Added OCI registry authentication\n  - 0.2.29 Support TLS endpoints\n  - 0.2.30 Distinguish between port number of internal endpoint and binding port number\n  - 0.3.0 Remove support for Train and Ussuri\n  - 0.3.1 Replace node-role.kubernetes.io/master with control-plane\n  - 0.3.2 Add Zed overrides\n  - 0.3.3 Add 2023.1 overrides\n  - 0.3.4 Add Ubuntu Jammy overrides\n  - 0.3.5 Add 2023.2 Ubuntu Jammy overrides\n  - 0.3.6 Use region option in keystone endpoint-update.py\n  - 0.3.7 Make keystone TLS configuration granular\n  - 0.3.8 Enable custom annotations for Openstack pods\n  - 0.3.9 Add 2024.1 overrides\n  - 0.3.10 Allow custom annotations on jobs\n  - 0.3.11 Fix custom annotations when helm3_hook is disabled\n  - 0.3.12 Enable custom annotations for Openstack secrets\n  - 0.3.13 Update images used by default\n  - 0.3.14 Align db scripts with sqlalchemy 2.0\n  - 0.3.15 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.3.16 Align db scripts with Sqlalchemy 2\n  - 0.3.17 Add 2024.2 Ubuntu Jammy overrides\n  - 0.3.18 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/kibana-053401293f7f508d.yaml",
    "content": "---\nkibana:\n  - Upgrade to v8.19.9, in sync with Elasticsearch\n...\n"
  },
  {
    "path": "releasenotes/notes/kibana-add46185e9a8d6af.yaml",
    "content": "---\nfixes:\n  - |\n    Fix retry logic to index creation script.\n    Prevent creation of duplicate indexes.\n...\n"
  },
  {
    "path": "releasenotes/notes/kibana-c0b39f760a7c5b80.yaml",
    "content": "---\nkibana:\n  - Upgrade to the latest v8.18.1, in sync with Elasticsearch\n...\n"
  },
  {
    "path": "releasenotes/notes/kibana.yaml",
    "content": "---\nkibana:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Drop usage of fsGroup inside container\n  - 0.1.3 Enable TLS with Elasticsearch\n  - 0.1.4 Enable TLS for Kibana ingress path\n  - 0.1.5 Use full image ref for docker official images\n  - 0.1.6 Remove Kibana indices before pod start up\n  - 0.1.7 Helm 3 - Fix Job labels\n  - 0.1.8 Update htk requirements\n  - 0.1.9 Revert removing Kibana indices before pod start up\n  - 0.1.10 Update image defaults\n  - 0.1.11 Added OCI registry authentication\n  - 0.1.12 Added feedback http_code 200 for kibana indexes\n  - 0.1.13 Update Kibana to 8.9.0\n  - 0.1.14 Add 2023.1 Ubuntu Focal overrides\n  - 0.1.15 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.16 Add 2024.1 Ubuntu Jammy overrides\n  - 0.1.17 Update script to use data views replacing deprecated api\n  - 0.1.18 Add retry logic to create_kibana_index_patterns.sh\n  - 0.1.19 Add 2024.2 overrides\n  - 0.1.20 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/kube-dns.yaml",
    "content": "---\nkube-dns:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Update to container image repo k8s.gcr.io\n  - 0.1.3 Use full image ref for docker official images\n  - 0.1.4 Update htk requirements\n  - 0.1.5 Added OCI registry authentication\n  - 0.1.6 Replace node-role.kubernetes.io/master with control-plane\n  - 0.1.7 Update kubernetes registry to registry.k8s.io\n  - 0.1.8 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.9 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/kubernetes-keystone-webhook.yaml",
    "content": "---\nkubernetes-keystone-webhook:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Update k8s-keystone-auth version\n  - 0.1.2 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.3 Remove Kibana source reference\n  - 0.1.4 Use full image ref for docker official images\n  - 0.1.5 Update htk requirements\n  - 0.1.6 Update default image value to Wallaby\n  - 0.1.7 Added OCI registry authentication\n  - 0.1.8 Add 2023.1 Ubuntu Focal overrides\n  - 0.1.9 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.10 Add 2024.1 Ubuntu Jammy overrides\n  - 0.1.11 Add 2024.2 overrides\n  - 0.1.12 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/kubernetes-node-problem-detector.yaml",
    "content": "---\nkubernetes-node-problem-detector:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Unpin images built with osh-images\n  - 0.1.3 Update RBAC apiVersion from /v1beta1 to /v1\n  - 0.1.4 Update the systemd-monitor lookback duration\n  - 0.1.5 Use full image ref for docker official images\n  - 0.1.6 Update htk requirements\n  - 0.1.7 Added OCI registry authentication\n  - 0.1.8 Replace node-role.kubernetes.io/master with control-plane\n  - 0.1.9 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.10 Update node_problem_detector to latest-ubuntu_jammy\n  - 0.1.11 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/ldap-4737a2ba0a8a499f.yaml",
    "content": "---\nldap:\n  - |\n    Update openldap image to symas/openldap:2.6.8-debian-12\n...\n"
  },
  {
    "path": "releasenotes/notes/ldap.yaml",
    "content": "---\nldap:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Use full image ref for docker official images\n  - 0.1.3 Update htk requirements\n  - 0.1.4 Added OCI registry authentication\n  - 0.1.5 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.6 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/libvirt-5bf3185fc00a9938.yaml",
    "content": "---\nfeatures:\n  - |\n    Added ability to configure libvirt hooks.\n...\n"
  },
  {
    "path": "releasenotes/notes/libvirt-85375c3ae369bc39.yaml",
    "content": "---\nlibvirt:\n  - |\n    Add .Values.libvirt.extraContainers hook to make it possible to\n    add additional containers to libvirt daemonset, e.g. for monitoring\n    purposes. Also move exporter container to values_overrides.\n...\n"
  },
  {
    "path": "releasenotes/notes/libvirt-ac59444a6623ddb9.yaml",
    "content": "---\nlibvirt:\n  - |\n    Update Ceph to Tentacle 20.2.1\n...\n"
  },
  {
    "path": "releasenotes/notes/libvirt-b5dc605552feb278.yaml",
    "content": "---\nlibvirt:\n  - |\n    Update Ceph to Tentacle 20.2.0 and replaced image sources from docker.io/openstackhelm with quay.io/airshipit\n...\n"
  },
  {
    "path": "releasenotes/notes/libvirt-e8ba1d91a8ca4999.yaml",
    "content": "---\nfixes:\n  - |\n    Added readiness and liveness probes for the libvirt-exporter sidecar container.\n...\n"
  },
  {
    "path": "releasenotes/notes/libvirt-f81d6fc0b0094209.yaml",
    "content": "---\nlibvirt:\n  - |\n    Keeping vnc ca/key/cert in /tmp so in case container (not pod) restarts\n    it could copy them one again to the proper place. This allows to handle\n    libvirt crashes properly without restarting libvirt pods.\n...\n"
  },
  {
    "path": "releasenotes/notes/libvirt.yaml",
    "content": "---\nlibvirt:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Setup libvirt SSL\n  - 0.1.3 Create override for external ceph cinder backend\n  - 0.1.4 Set unix socket auth method as none\n  - 0.1.5 Use full image ref for docker official images\n  - 0.1.6 Enhancement to enable probes override from values.yaml\n  - 0.1.7 Add libvirt overrides for Victoria and Wallaby\n  - 0.1.8 Update htk requirements\n  - 0.1.9 Exec libvirt instead of forking from bash\n  - 0.1.10 Enable taint toleration for Openstack services jobs\n  - 0.1.11 Remove unused overrides and update default image\n  - 0.1.12 Add libvirt exporter as a sidecar\n  - 0.1.13 Added OCI registry authentication\n  - 0.1.14 Remove use of exec in libvirt.sh\n  - 0.1.15 Add support for libvirt to connect to external ceph without any local ceph present\n  - 0.1.16 Update all Ceph images to Focal\n  - 0.1.17 Add ovn.yaml values_override, remove dependency from neutron-ovs-agent module\n  - 0.1.18 Replace node-role.kubernetes.io/master with control-plane\n  - 0.1.19 Set kubernetes cgroup value equal kubepods.slice to fit systemd cgroup driver\n  - 0.1.20 Update Ceph to 17.2.6\n  - 0.1.21 Disable libvirt cgroup functionality for cgroup-v2\n  - 0.1.22 Set targeted dependency of libvirt with ovn networking backend\n  - 0.1.23 Add support for enabling vencrypt\n  - 0.1.24 Include HOSTNAME_FQDN for certificates\n  - 0.1.25 Add 2023.2 Ubuntu Jammy overrides\n  - 0.1.26 Update Rook to 1.12.5 and Ceph to 18.2.0\n  - 0.1.27 Add watch verb to vencrypt cert-manager Role\n  - 0.1.28 Update Ceph images to Jammy and Reef 18.2.1\n  - 0.1.29 Update Ceph images to patched 18.2.2 and restore debian-reef repo\n  - 0.1.30 Add 2024.1 overrides\n  - 0.1.31 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.32 Enable a flag to parse Libvirt Nova metadata in libvirt exporter\n  - 0.1.33 Handle cgroupv2 correctly\n  - 0.1.34 Remove hugepages creation test\n  - 0.1.35 Allow to initialize virtualization modules\n  - 0.1.36 Allow to generate dynamic config options\n  - 0.1.37 Make readiness probes more tiny\n  - 0.1.38 Implement daemonset overrides for libvirt\n  - 0.1.39 Add 2023.1 overrides for Ubuntu Focal and Jammy\n  - 0.1.40 Add 2024.2 overrides\n  - 0.1.41 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/local-storage.yaml",
    "content": "---\nlocal-storage:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Update htk requirements\n  - 0.1.3 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/local-volume-provisioner.yaml",
    "content": "---\nlocal-volume-provisioner:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/magnum.yaml",
    "content": "---\nmagnum:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Added post-install and post-upgrade helm hook for jobs\n  - 0.2.0 Remove support for releases before T\n  - 0.2.1 Use policies in yaml format\n  - 0.2.2 Fix restarting of magnum-conductor pods\n  - 0.2.3 Update htk requirements repo\n  - 0.2.4 Mount empty temp_cache_dir for performance\n  - 0.2.5 Update default image values to wallaby\n  - 0.2.6 Migrated PodDisruptionBudget resource to policy/v1 API version\n  - 0.2.7 Added OCI registry authentication\n  - 0.2.8 Remove default policy rules\n  - 0.2.9 Define service_type in keystone_authtoken to support application credentials with access rules\n  - 0.2.10 Uses uWSGI for API service\n  - 0.2.11 Enable custom annotations for Openstack pods\n  - 0.2.12 Enable custom annotations for Openstack secrets\n  - 0.2.13 Update images used by default\n  - 0.2.14 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.2.15 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/manila-23590e37667d10a5.yaml",
    "content": "---\nmanila:\n  - |\n    This change enhances the flexibility of the Manila Chart by introducing\n    configurability for the CephFS backend. Previously, CephFS-related configuration values\n    were statically defined within the manifests, limiting deployment customization.\n    With this update, CephFS backend parameters can now be overridden via values.\n...\n"
  },
  {
    "path": "releasenotes/notes/manila-3a767553950629bd.yaml",
    "content": "---\nmanila:\n  - |\n    Add support for the Ceph File System (CephFS) backend in the manila.\n...\n"
  },
  {
    "path": "releasenotes/notes/manila-7bf5ad7472dbf691.yaml",
    "content": "---\nmanila:\n  - Use more standard DB config setting\n...\n"
  },
  {
    "path": "releasenotes/notes/manila-a5beeacdb577dd23.yaml",
    "content": "---\nmanila:\n  - |\n    Allow users to add additional sources to the Projected Volume that is mounted\n    at /etc/manila/manila.conf.d/ so they may more easily override configs or provide\n    additional configs for the various services in the chart.\n...\n"
  },
  {
    "path": "releasenotes/notes/manila-f7286f302a9372eb.yaml",
    "content": "---\nmanila:\n  - |\n    Fix Rally test pod template\n...\n"
  },
  {
    "path": "releasenotes/notes/manila-f8ada2e675fcc308.yaml",
    "content": "---\nmanila:\n  - |\n    Use Manila API module for UWSGI configuration because WSGI script\n    manila-wsgi has been removed.\n...\n"
  },
  {
    "path": "releasenotes/notes/manila.yaml",
    "content": "---\nmanila:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Define service_type in keystone_authtoken to support application credentials with access rules\n  - 0.1.2 Add Zed overrides\n  - 0.1.3 Add 2023.1 overrides\n  - 0.1.4 Add Ubuntu Jammy overrides\n  - 0.1.5 Update port name of service-api.yaml\n  - 0.1.6 Add 2023.2 Ubuntu Jammy overrides\n  - 0.1.7 Properly config network host for share service\n  - 0.1.8 uses uWSGI for API service\n  - 0.1.9 Enable custom annotations for Openstack pods\n  - 0.1.10 Add 2024.1 overrides\n  - 0.1.11 Enable custom annotations for Openstack secrets\n  - 0.1.12 Update images used by default\n  - 0.1.13 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.14 Add 2024.2 Ubuntu Jammy overrides\n  - 0.1.15 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/mariadb-0cb94bb0ae8cf38a.yaml",
    "content": "---\nfixes:\n  - |\n    Added failover openrc environment variables to database backup cron jobs.\n...\n"
  },
  {
    "path": "releasenotes/notes/mariadb-2d75f250c1fbcd73.yaml",
    "content": "---\nmariadb:\n  - |\n    The reboot.node annotation wasn't removed after the cluster init is done.\n    This happened due to the fact that commit\n    c2269d70a23b55c459233ab5fc28362b7c2ca766 removed necessary code lines for\n    this and is absolutely needed. So, we are putting these changes back.\n...\n"
  },
  {
    "path": "releasenotes/notes/mariadb-7d8282a6eeb4d249.yaml",
    "content": "---\nmariadb:\n  - |\n    This change applies a fix to the StatefulSet template of the MariaDB chart,\n    ensuring that the hash for the exporter secrets is correctly included. This\n    is necessary for the proper functioning of the exporter component in the\n    MariaDB deployment.\n...\n"
  },
  {
    "path": "releasenotes/notes/mariadb-840fccbf8f0e9d39.yaml",
    "content": "---\n# To create a new release note related to a specific chart:\n# reno new <chart_name>\n#\n# To create a new release note for a common change (when multiple charts\n# are changed):\n# reno new common\nmariadb:\n  - |\n    This change disables TLS for the Prometheus MySQL exporter sidecar container in the MariaDB StatefulSet.\nissues:\n  - |\n    mysql-exporter sidecar container has TLS enabled but was missing the CA certificate, which caused the exporter to fail to connect to the database.\nfixes:\n  - |\n    In order to fix this issue, the TLS configuration for the Prometheus MySQL exporter sidecar container has been updated to disable TLS. This allows the exporter to connect to the MariaDB database without requiring a CA certificate.\n...\n"
  },
  {
    "path": "releasenotes/notes/mariadb-b923ac9345734125.yaml",
    "content": "---\nmariadb:\n  - |\n    Updated to use MariaDB 11.4.8 built on Ubuntu 22.04 (Noble).\nfixes:\n  - |\n    Replaced deprecated mysql* binaries to their mariadb* equivalents in\n    scripts. Also fixed TLS connection for myqsl-exporter side conainers.\n...\n"
  },
  {
    "path": "releasenotes/notes/mariadb-backup-58c8a77f9c03bae8.yaml",
    "content": "---\nfixes:\n  - |\n    Added failover openrc environment variables to database backup cron jobs.\n...\n"
  },
  {
    "path": "releasenotes/notes/mariadb-backup-af891fea0cfa3db5.yaml",
    "content": "---\nmariadb:\n  - |\n    - fixed backup_mariadb_sh script to correctly handle verification during the backup process\n...\n"
  },
  {
    "path": "releasenotes/notes/mariadb-backup-c27eb2dc0a56a7ed.yaml",
    "content": "---\nmariadb-backup:\n  - Use quay.io/airshipit/mariadb:latest-ubuntu_noble images by default.\n...\n"
  },
  {
    "path": "releasenotes/notes/mariadb-backup.yaml",
    "content": "---\nmariadb-backup:\n  - 0.0.1 Initial Chart\n  - 0.0.2 Added staggered backups support\n  - 0.0.3 Backups verification improvements\n  - 0.0.4 Added throttling remote backups\n  - 0.0.5 Add 2024.1 overrides\n  - 0.0.6 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.0.7 Add 2024.2 overrides\n  - 0.0.8 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/mariadb-cluster-4672d16769afdb47.yaml",
    "content": "---\nmariadb-cluster:\n  - Use quay.io/airshipit/mariadb:latest-ubuntu_noble images by default.\n...\n"
  },
  {
    "path": "releasenotes/notes/mariadb-cluster.yaml",
    "content": "---\nmariadb-cluster:\n  - 0.0.1 Initial Chart\n  - 0.0.2 Enable auto-upgrade\n  - 0.0.3 Fixed TLS config and added x509 requirement\n  - 0.0.4 Add 2024.1 overrides\n  - 0.0.5 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.0.6 Add 2024.2 overrides\n  - 0.0.7 Allow to use default storage class\n  - 0.0.8 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/mariadb-dcd35d40fcd4a749.yaml",
    "content": "---\nmariadb:\n  - |\n    - fixed backup_mariadb_sh script to correctly handle verification during the backup process\n...\n"
  },
  {
    "path": "releasenotes/notes/mariadb.yaml",
    "content": "---\nmariadb:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 mariadb security best practice fixes\n  - 0.1.3 Fix MariaDB backup script\n  - 0.1.4 Unpin images built with osh-images\n  - 0.1.5 Update to container image repo k8s.gcr.io\n  - 0.1.6 Change Issuer to ClusterIssuer\n  - 0.1.7 Revert - Change Issuer to ClusterIssuer\n  - 0.1.8 Change Issuer to ClusterIssuer with logic in place to support cert-manager versioning\n  - 0.1.9 Uplift Mariadb-ingress to 0.42.0\n  - 0.1.10 Rename mariadb backup identities\n  - 0.1.11 Disable mariadb mysql history client logging\n  - 0.1.12 Set strict permission on mariadb data dir\n  - 0.1.13 Fix race condition for grastate.dat\n  - 0.1.14 Update mysqld-exporter image to v0.12.1\n  - 0.2.0 Uplift mariadb version and ubuntu release\n  - 0.2.1 Prevent potential splitbrain issue if cluster is in reboot state\n  - 0.2.2 remove deprecated svc annotation tolerate-unready-endpoints\n  - 0.2.3 Remove panko residue\n  - 0.2.4 Use full image ref for docker official images\n  - 0.2.5 Added helm hook for post-install and post-upgrade in prometheus exporter job.\n  - 0.2.6 Update log format stream for mariadb\n  - 0.2.7 add ingress resources\n  - 0.2.8 Helm 3 - Fix Job labels\n  - 0.2.9 Update htk requirements\n  - 0.2.10 Fix Python exceptions\n  - 0.2.11 Enhance mariadb backup\n  - 0.2.12 Remove set -x\n  - 0.2.13 Adjust readiness.sh in single node and no replication case\n  - 0.2.14 Fix comparison value\n  - 0.2.15 Updated naming for subchart compatibility\n  - 0.2.16 Revert naming for subchart compatibility\n  - 0.2.17 Enable taint toleration for Openstack services jobs\n  - 0.2.18 Updated naming for subchart compatibility\n  - 0.2.19 Update default image value to Wallaby\n  - 0.2.20 Migrated CronJob resource to batch/v1 API version & PodDisruptionBudget to policy/v1; Uplift Mariadb-ingress to 1.1.3\n  - 0.2.21 Fix mysql exporter user privileges\n  - 0.2.22 Fix ingress cluster role privileges\n  - 0.2.23 Fix backup script by ignoring sys database for MariaDB 10.6 compartibility\n  - 0.2.24 Uplift Mariadb-ingress to 1.2.0\n  - 0.2.25 Add liveness probe to restart a pod that got stuck in a transfer wsrep_local_state_comment\n  - 0.2.26 Added OCI registry authentication\n  - 0.2.27 Fix broken helmrelease for helmv3\n  - 0.2.28 Added verify_databases_backup_in_directory function implementation\n  - 0.2.29 Uplift Mariadb-ingress to 1.5.1\n  - 0.2.30 Replace node-role.kubernetes.io/master with control-plane\n  - 0.2.31 Update kubernetes registry to registry.k8s.io\n  - 0.2.32 Prevent liveness probe from killing pods during SST\n  - 0.2.33 Add 2023.1 Ubuntu Focal overrides\n  - 0.2.34 Uplift ingress controller image to 1.8.2\n  - 0.2.35 Update apparmor override\n  - 0.2.36 Added staggered backups support\n  - 0.2.37 Backups verification improvements\n  - 0.2.38 Added throttling remote backups\n  - 0.2.39 Template changes for image 1.9 compatibility\n  - 0.2.40 Start.py allows to create mariadb-service-primary service and endpoint\n  - 0.2.41 Switch to primary service instead of ingress by default\n  - 0.2.42 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.2.43 Add 2024.1 Ubuntu Jammy overrides\n  - 0.2.44 Uplift ingress controller image to 1.11.2\n  - 0.2.45 Add mariadb controller support\n  - 0.2.46 Avoid using cluster endpoints\n  - 0.2.47 Deploy exporter as sidecar\n  - 0.2.48 Switch to mariadb controller deployment\n  - 0.2.49 Remove ingress deployment\n  - 0.2.50 Add cluster-wait job\n  - 0.2.51 Add 2024.2 overrides\n  - 0.2.52 Added SSL support to cluster-wait job\n  - 0.2.53 Use constant for mysql binary name\n  - 0.2.54 Improve leader election on cold start\n  - 0.2.55 Improve python3 compatibility\n  - 0.2.56 Stop running threads on sigkill\n  - 0.2.57 Remove useless retries on conflicts during cm update\n  - 0.2.58 Prevent TypeError in get_active_endpoint function\n  - 0.2.59 Give more time on resolving configmap update conflicts\n  - 0.2.60 Refactor liveness/readiness probes\n  - 0.2.61 Avoid using deprecated isAlive()\n  - 0.2.62 Implement mariadb upgrade during start\n  - 0.2.63 Use service ip for endpoint discovery\n  - 0.2.64 Add terminationGracePeriodSeconds\n  - 0.2.65 Allow to use default storage class\n  - 0.2.66 Add probes for exporter\n  - 0.2.67 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/masakari-ea8acf2427bc9811.yaml",
    "content": "---\nmasakari:\n  - |\n    Add instance introspection to the Masakari chart\nfeatures:\n  - |\n    Added Instance Introspection monitor the Openstack Helm Masakari project to provide\n    vm HA by automatically detecting the system-level failure events via QEMU Guest Agent.\n    If it detects VM heartbeat failure events, it sends notifications to the masakari-api.\n...\n"
  },
  {
    "path": "releasenotes/notes/masakari.yaml",
    "content": "---\nmasakari:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Seperate node labels for monitors\n  - 0.1.2 Added halm hook and fix for hostmonitors to support pacemaker remote\n  - 0.1.3 Mount sudoers file for masakari hostmonitors\n  - 0.1.4 Migrated PodDisruptionBudget resource to policy/v1 API version\n  - 0.1.5 Added OCI registry authentication\n  - 0.1.6 Use HTTP probe instead of TCP probe\n  - 0.1.7 Define service_type in keystone_authtoken to support application credentials with access rules\n  - 0.1.8 Add helm hook to jobs-rabbitmq-init\n  - 0.1.9 Enable custom annotations for Openstack pods\n  - 0.1.10 Enable custom annotations for Openstack secrets\n  - 0.1.11 Move api_paste_config value to wsgi\n  - 0.1.12 Update images used by default\n  - 0.1.13 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.14 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/memcached-1ae10613b2e36813.yaml",
    "content": "---\nmemcached:\n  - |\n    Add .Values.memcached.extraContainers hook to make it possible to\n    add extra containers to memcached statefulset, e.g. for monitoring\n    purposes. Also move the exporter sidecar to values_overrides.\n...\n"
  },
  {
    "path": "releasenotes/notes/memcached.yaml",
    "content": "---\nmemcached:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Make stats cachedump configurable.\n  - 0.1.3 Remove panko residue\n  - 0.1.4 Use full image ref for docker official images\n  - 0.1.5 Update htk requirements\n  - 0.1.6 Switch to using sidecar for exporter\n  - 0.1.7 Updated naming for subchart compatibility\n  - 0.1.8 Enable taint toleration for Openstack services jobs\n  - 0.1.9 Revert naming for subchart compatibility\n  - 0.1.10 Updated naming for subchart compatibility\n  - 0.1.11 Remove gnocchi netpol override\n  - 0.1.12 Added OCI registry authentication\n  - 0.1.13 Replace node-role.kubernetes.io/master with control-plane\n  - 0.1.14 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.15 Allow to pass additional service parameters\n  - 0.1.16 Change deployment type to statefulset\n  - 0.1.17 Fix statefulset spec format\n  - 0.1.18 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/mistral.yaml",
    "content": "---\nmistral:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Added post-install and post-upgrade hook for Jobs\n  - 0.2.0 Remove support for releases before T\n  - 0.2.1 Use policies in yaml format\n  - 0.2.2 Update htk requirements repo\n  - 0.2.3 Update default imaage values to Wallaby\n  - 0.2.4 Migrated PodDisruptionBudget resource to policy/v1 API version\n  - 0.2.5 Added OCI registry authentication\n  - 0.2.6 Use HTTP probe instead of TCP probe\n  - 0.2.7 Remove default policy rules\n  - 0.2.8 Enable custom annotations for Openstack pods\n  - 0.2.9 Enable custom annotations for Openstack secrets\n  - 0.2.10 Update images used by default\n  - 0.2.11 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.2.12 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/nagios-36a6b2cb6e9fc720.yaml",
    "content": "---\nnagios:\n  - |\n    Use the quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy image\n    by default for init containers\n...\n"
  },
  {
    "path": "releasenotes/notes/nagios.yaml",
    "content": "---\nnagios:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Use full image ref for docker official images\n  - 0.1.3 Mount internal TLS CA certificate\n  - 0.1.4 Update htk requirements\n  - 0.1.5 Switch nagios image from xenial to bionic\n  - 0.1.6 Added OCI registry authentication\n  - 0.1.7 Upgrade osh-selenium image to latest-ubuntu_focal\n  - 0.1.8 Use helm toolkit for readiness probes\n  - 0.1.9 Make using selenium v4 syntax optional\n  - 0.1.10 Correct selenium v3 syntax\n  - 0.1.11 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.12 Update nagios image tag to latest-ubuntu_jammy\n  - 0.1.13 Add the ability to use custom Nagios plugins\n  - 0.1.14 Upgrade osh-selenium image to ubuntu_jammy\n  - 0.1.15 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/namespace-config.yaml",
    "content": "---\nnamespace-config:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Grant access to existing PodSecurityPolicy\n  - 0.1.2 Rmove PodSecurityPolicy\n  - 0.1.3 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-00a56405067b123d.yaml",
    "content": "---\nneutron:\n  - |\n    Fix issue with etcSources where resources fails to apply if the list is empty.\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-013c9be46456b92c.yaml",
    "content": "---\nneutron:\n  - |\n    Fix neutron ironic agent fail to start with missing host information.\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-288ac8b37720832e.yaml",
    "content": "---\nneutron:\n  - |\n    Use psutil.net_connections to iterate over tcp connections\n    during health check. The psutil.connections is still available\n    but deprecated.\n  - |\n    Do not skip children processes when counting established connections\n    because after eventlet removal plain threads are used.\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-2af36e49a0a377c3.yaml",
    "content": "---\n# To create a new release note related to a specific chart:\n# reno new <chart_name>\n#\n# To create a new release note for a common change (when multiple charts\n# are changed):\n# reno new common\nneutron:\n  - |\n    Use `ip add replace` instead of `ip addr add` in the init script to\n    make it idempotent and avoid errors when the script tries to assign\n    the same IP address to the interface on subsequent runs.\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-2bb975307f0d27f2.yaml",
    "content": "---\nneutron:\n  - |\n    Added the ability to configure custom OVN Northbound (ovn_nb_connection) and\n    Southbound (ovn_sb_connection) connection strings\n    (e.g., for deployments using Kube-OVN or external OVN databases).\n    By default, the chart continues to use the in-cluster service environment\n    This change provides flexibility to support both default in-cluster OVN and\n    custom/external OVN backends seamlessly.\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-2d4db97bc8900286.yaml",
    "content": "---\nneutron:\n  - |\n    Create multiple Keystone service accounts to access to\n    other Openstack APIs\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-315f825e54d3f34c.yaml",
    "content": "---\nneutron:\n  - |\n    Allow users to add additional sources to the Projected Volume that is mounted\n    at /etc/neutron/neutron.conf.d/ so they may more easily override configs or provide\n    additional configs for the various services in the chart. Ensure that the neutron\n    chart always loads config overrides from /etc/neutron/neutron.conf.d\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-32815761690bedf5.yaml",
    "content": "---\nneutron:\n  - |\n    Add support for etcSources to db-sync job.\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-3c11cf48f8c7c592.yaml",
    "content": "---\nneutron:\n  - |\n    add raise_for_status method call to the livenessProbe command to properly\n    raise an error when return code is 4xx (client error) or 5xx (server error)\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-3c33aea435f7ab8a.yaml",
    "content": "---\n# To create a new release note related to a specific chart:\n# reno new <chart_name>\n#\n# To create a new release note for a common change (when multiple charts\n# are changed):\n# reno new common\nneutron:\n  - |\n    Neutron OVN now runs under **uWSGI**, and the legacy **eventlet** support\n    has been removed. The ``neutron-server`` binary is no longer provided.\n    Updated configuration references to reflect the switch to uWSGI\n    for Neutron OVN.\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-42b77d74ce8fe287.yaml",
    "content": "---\nneutron:\n  - |\n    Update values_overrides for ovn. If OVN is used, network.backend needs to be ovn.\n    There is no need to keep openvswitch as it adds OVS plugin related pod dependencies.\n    daemonset_netns_cleanup_cron is also not required anymore with ovn.\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-4f9263300df02c9b.yaml",
    "content": "---\nneutron:\n  - |\n    Add missing `--config-dir` option to `neutron-ovn-db-sync-util`.\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-659f0c21af1feaa0.yaml",
    "content": "---\nfixes:\n  - |\n    Since 0e7fe77f49 neutron-ironic-agent has had an invalid volumes spec. Fix the\n    spec so the agent can run.\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-670d4cd96f100dea.yaml",
    "content": "---\nfeatures:\n  - |\n    Split out the OpenStack service account definitions from neutron.conf and into\n    config snippets which are loaded at /etc/neutron/neutron.d/, which is automatically\n    loaded by OSLO when loading the main neutron.conf. This makes it easier for users\n    to use the regular config generation while supplying credentials out of band.\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-96d95ffbdeaaf29a.yaml",
    "content": "---\nneutron:\n  - |\n    Add required OVN VPN configuration files to Neutron server so VPN\n    features behave as expected. The Neutron server receives RPC calls from the\n    Neutron OVN VPN agent and executes VPN operations. Therefore, the VPN\n    configuration must be present on the Neutron server.\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-9dbb4250fd893743.yaml",
    "content": "---\nneutron:\n  - |\n    Fix ovn sync db cron job to use helm-toolkit snippets when\n    specifying runtime and priority classes.\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-add-uwsgi-start-time-d73ba462e1157dd2.yaml",
    "content": "---\nneutron:\n  - |\n    Added uwsgi start-time configuration to neutron-api-uwsgi section. The\n    start-time parameter is set to \"%t\" (unix time at instance startup) and is\n    used by ML2/OVN for creating OVN hash ring registers per worker. This\n    configuration is mandatory for proper ML2/OVN operation. See the Neutron\n    documentation for more details:\n    https://docs.openstack.org/neutron/latest/admin/config-wsgi.html\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-b2247f89a5f258aa.yaml",
    "content": "---\n# To create a new release note related to a specific chart:\n# reno new <chart_name>\n#\n# To create a new release note for a common change (when multiple charts\n# are changed):\n# reno new common\nneutron:\n  - |\n    Add interface name parameter for DPDK configs\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-b225c11a5e1d522d.yaml",
    "content": "---\nneutron:\n  - |\n    Fix OVN support in neutron DHCP.\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-c0c7ca4e49cbf03c.yaml",
    "content": "---\nneutron:\n  - |\n    Fix port duplication in neutron server deployment\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-c451a4129f97e891.yaml",
    "content": "---\nfixes:\n  - |\n    Fixed OVN metadata agent DaemonSet resource configuration inconsistency.\n    All containers now consistently reference pod.resources.agent.ovn_metadata.\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-d39cc4643edac73c.yaml",
    "content": "---\nneutron:\n  - |\n    Support for tungstenfabric has been removed.\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-f0674e08d80fc203.yaml",
    "content": "---\nneutron:\n  - |\n    Add new cron job for neutron ovn db sync that runs evey 5 mins by default.\n    This could be use as log alert if any part out of sync.\n    Or it can be use as automatic repair method to prevent\n    OVN DB got modified and failed it's purpose.\n    This cron job is default disabled.\n    Set `.Values.manifests.cron_job_ovn_db_sync_repair` to\n    `true` to enable the cronjob.\n    The sync mode for the cronjob is default only for check sync status.\n    Set `.Values.jobs.ovn_db_sync_repair.sync_mode` to `repair` for enable\n    automatic repair and sync OVN DB from Neutron DB.\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron-fca28403d7a0be3a.yaml",
    "content": "---\nneutron:\n  - |\n    When enabling the OVN backend, don't fail to start with the default\n    connection setup to OVN NB and SB. A feature change was made allowing users\n    to override these connections but it did not preserve the default.\n...\n"
  },
  {
    "path": "releasenotes/notes/neutron.yaml",
    "content": "---\nneutron:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 fixes tls issue\n  - 0.1.3 Update neutron to use Nginx apparmor profile\n  - 0.1.4 Pass ovs agent config to dhcp agent\n  - 0.1.5 Add missing flags to nginx container in neutron chart\n  - 0.1.6 Use HostToContainer mountPropagation\n  - 0.1.7 Change Issuer to ClusterIssuer\n  - 0.1.8 Revert Change Issuer to ClusterIssuer\n  - 0.1.9 Update ovs agent to support host/label overrides\n  - 0.1.10 Change Issuer to ClusterIssuer\n  - 0.1.11 Added the helm.sh/hook, helm.sh/hook-weight annotations\n  - 0.1.12 Removed \"name\" parameter from Rally tests\n  - 0.2.0 Remove support for releases before T\n  - 0.2.1 Adding rabbitmq TLS logic\n  - 0.2.2 Use policies in yaml format\n  - 0.2.3 Mount rabbitmq TLS secret\n  - 0.2.4 Add Ussuri release support\n  - 0.2.5 Use rootwrap daemon\n  - 0.2.6 Fix neutron agent-init script\n  - 0.2.7 Made dnsmasq.conf overridable in configmap-bin\n  - 0.2.8 Add Victoria and Wallaby releases support\n  - 0.2.9 Add option to disable helm.sh/hook annotations\n  - 0.2.10 Update htk requirements repo\n  - 0.2.11 Improve health probe logging\n  - 0.2.12 Fix infinite recursion deadlock on netns cleanup cron\n  - 0.2.13 Enable taint toleration for Openstack services\n  - 0.2.14 Migrate IP from bridge for auto_bridge_add\n  - 0.2.15 Remove unsupported values overrides\n  - 0.2.16 Remove usage of six\n  - 0.2.17 Migrated PodDisruptionBudget resource to policy/v1 API version\n  - 0.2.18 Updated naming for subchart compatibility\n  - 0.2.19 Added qdhcp NS host validation for deleting wrong namespaces.\n  - 0.2.20 Add Xena and Yoga values overrides\n  - 0.2.21 Fix for qdhcp NS host validation for deleting wrong namespaces.\n  - 0.2.22 Fix /run/xtables.lock may be a directory\n  - 0.2.23 Add neutron_netns_cleanup_cron release image override, so that the respective release image is used\n  - 0.2.24 Added OCI registry authentication\n  - 0.2.25 Support TLS endpoints\n  - 0.2.26 Use HTTP probe instead of TCP probe\n  - 0.2.27 Distinguish between port number of internal endpoint and binding port number\n  - 0.3.0 Remove support for Train and Ussuri\n  - 0.3.1 Remove default policy rules\n  - 0.3.2 Use correct labels for ovs which uses one daemonset for ovs-db and ovs-vswitchd\n  - 0.3.3 Add OVN Support\n  - 0.3.4 Replace node-role.kubernetes.io/master with control-plane\n  - 0.3.5 Fix health probe for OVN metadata agent\n  - 0.3.6 Fix the issue that ovn metadata not work in muti-node enviroment\n  - 0.3.7 Sync neutron db to ovn nb db when neutron-server start\n  - 0.3.8 Define service_type in keystone_authtoken to support application credentials with access rules\n  - 0.3.9 Extend neutron liveness probe with readiness probe\n  - 0.3.10 Configure keystone authentication credentials for placement\n  - 0.3.11 Add Zed overrides\n  - 0.3.12 Update oslo_messaging_RPCClient and get_rpc_transport\n  - 0.3.13 Remove duplicated argument when running a liveness check\n  - 0.3.14 Add 2023.1 overrides\n  - 0.3.15 Add asap2 support\n  - 0.3.16 Use service tokens\n  - 0.3.17 Add exec probe timeouts\n  - 0.3.18 Improve OVN support\n  - 0.3.19 Fix getting IP for interface when there are multiple IPs assigned\n  - 0.3.20 Add Ubuntu Jammy overrides\n  - 0.3.21 Run native netns cleanup\n  - 0.3.22 Add BGP Dragent support for running dragent agents as daemonsets\n  - 0.3.23 Fix start function template\n  - 0.3.24 Add 2023.2 Ubuntu Jammy overrides\n  - 0.3.25 Fix ovs member support for readiness\n  - 0.3.26 Fix ovs options to allow multiple options\n  - 0.3.27 Move old overrides from the tools directory\n  - 0.3.28 Fix ovn for slow enviroment\n  - 0.3.29 Disable DVR for OVN floating ip\n  - 0.3.30 Fix designate auth url\n  - 0.3.31 FIX ovn-metadata-agent mountPropagation overrides by parent directory\n  - 0.3.32 Update dpdk override\n  - 0.3.33 Make sure trust on command is applied to avoid race-condition with ovs-dpdk\n  - 0.3.34 Update metadata endpoint\n  - 0.3.35 Do not attach non-existing interfaces to br-ex bridge for OVS agent\n  - 0.3.36 Enable custom annotations for Openstack pods\n  - 0.3.37 Proper chown /run/openvswitch/db.sock under OVN\n  - 0.3.38 Add 2024.1 overrides\n  - 0.3.39 Ensure that the script handles cases where the PID file exists but is empty or does not contain the expected data structure.\n  - 0.3.40 Fix ovs bridge creation in mappings for DPDK\n  - 0.3.41 Enable custom annotations for Openstack secrets\n  - 0.3.42 Update images used by default\n  - 0.3.43 Switch neutron to uWSGI\n  - 0.3.44 Add OVN VPNaas support\n  - 0.3.45 Fix ironic/baremetal authentication\n  - 0.3.46 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.3.47 Add service role to the Neutron user\n  - 0.3.48 Add 2024.2 Ubuntu Jammy overrides\n  - 0.3.49 Add node_selector_* for OVN VPN agent\n  - 0.3.50 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/nfs-provisioner.yaml",
    "content": "---\nnfs-provisioner:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Use full image ref for docker official images\n  - 0.1.3 Update htk requirements\n  - 0.1.4 Added OCI registry authentication\n  - 0.1.5 Update image version\n  - 0.1.6 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.7 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-1a7fb130b261f92d.yaml",
    "content": "---\nnova:\n  - |\n    Update Ceph to Tentacle 20.2.0 and replaced image sources from docker.io/openstackhelm with quay.io/airshipit\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-29572c7b62b6ae0b.yaml",
    "content": "---\nnova:\n  - Fix Cinder auth config in values.yaml\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-2c10ffbcf8d2f838.yaml",
    "content": "---\nnova:\n  - |\n    Support for tungstenfabric has been removed.\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-2e97a6de46b4c9b9.yaml",
    "content": "---\nupgrade:\n  - |\n    Change the default volume v3 path to not include the tenant_id. The is the\n    current recommended approach and has not been necessary since the Yoga release.\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-3493b35ba8c4479a.yaml",
    "content": "---\nnova:\n  - |\n    Update values_overrides for ovn. If OVN is used, network.backend needs to be ovn.\n    There is no need to keep openvswitch as it adds OVS plugin related pod dependencies.\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-366b14dea33d416d.yaml",
    "content": "---\naodh:\n  - |\n    Remove outdated default kolla images and use quay.io/airshipit/aodh:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-467e6c34e9fd1b05.yaml",
    "content": "---\nnova:\n  - |\n    Fix issue with etcSources where resources fails to apply if the list is empty.\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-476f40003a31bc77.yaml",
    "content": "---\nfeatures:\n  - |\n    Split out the OpenStack service account definitions from nova.conf and into\n    config snippets which are loaded at /etc/nova/nova.d/, which is automatically\n    loaded by OSLO when loading the main nova.conf. This makes it easier for users\n    to use the regular config generation while supplying credentials out of band.\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-495c648112a2b539.yaml",
    "content": "---\nnova:\n  - |\n    Fix mount path /etc/nova/nova.conf.d for novncproxy deployment\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-4b998ef222e57fd1.yaml",
    "content": "---\nnova:\n  - |\n    Remove `memcache_servers` and `memcache_secret_key` from `[ironic]` section.\n    It was found that nothing consumes those configurations.\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-5bb93c130c2a280d.yaml",
    "content": "---\nnova:\n  - |\n    Update the wait computes default bootstrap script. Now it is going to print timestamps\n    while waiting for the computes to be ready. This is going to be handy during debugging.\n    Also the typo has been fixed that prevented the script to fail after timeout.\n  - |\n    Update dependencies for the bootstrap job. It must start after cell setup job is finished\n    because it looks up for hypervisors which are discovered by the cell setup job.\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-5d7903f3b97aa088.yaml",
    "content": "---\nnova:\n  - |\n    Includes custom job annotations for nova-bootstrap chart. Other jobs\n    include the custom job annotations, but they were missing for the\n    nova-bootstrap job.\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-60c926ac61319ba1.yaml",
    "content": "---\nnova:\n  - |\n    Add ability to disable nova-ssh Secret with `.manifests.secret_ssh`.\nupgrade:\n  - |\n    The `.manifests.configmap_etc` value no longer control the nova-ssh Secret,\n    use `.manifests.secret_ssh` instead.\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-69cb1a01b6f5c561.yaml",
    "content": "---\nnova:\n  - |\n    Allows users to set custom 'host' values for nova conductor and scheduler\n    services. This may be useful for kubernetes users, where new deploys of\n    scheduler and conductor use the pod names, leaaving stale compute service entries\n    which are eventually cleaned by the nova-service-cleaner job.\n    Ref: https://docs.openstack.org/nova/latest/configuration/config.html#DEFAULT.host\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-6b1d99fb5c67b2dd.yaml",
    "content": "---\n\nnova:\n  - |\n    health-probe.py now supports reading database connections from /etc/nova/nova.conf.d.\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-7f3dbce1333752b8.yaml",
    "content": "---\nnova:\n  - |\n    Ensure that the nova chart always loads config overrides from /etc/nova/nova.conf.d\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-9df2dfa1e3521305.yaml",
    "content": "---\nnova:\n  - |\n    The ironic Keystone user for Nova will not be created anymore unless `manifests.statefulset_compute_ironic` is set to True.\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-b0749b6144e2b871.yaml",
    "content": "---\nnova:\n  - Add custom annotations to the nova-cell-setup job\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-b2ce6bcc83029d1b.yaml",
    "content": "---\nnova:\n  - |\n    Refactored the flavor creation logic in the nova bootstrap script to simplify\n    and generalize flavor definitions using dynamic key/value iteration. This makes\n    the chart more maintainable and flexible for various flavor configurations.\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-c59fc7469b3a8500.yaml",
    "content": "---\nnova:\n  - Add serialproxy support\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-dd4188dbc489977c.yaml",
    "content": "---\nupgrade:\n  - |\n    Change the default volume v2.1 path to not include the tenant_id. The is the\n    current recommended approach and has not been necessary since the Mitaka release.\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-e42deac3199480e6.yaml",
    "content": "---\nnova:\n  - |\n    Add missing nova-etc-snippets for cell-setup cronjob.\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-e8350419e59bc440.yaml",
    "content": "---\nnova:\n  - Adds support to UUID deployment-provisioning configuration\n...\n"
  },
  {
    "path": "releasenotes/notes/nova-fc00bda9bb69988e.yaml",
    "content": "---\nnova:\n  - |\n    Allow users to add additional sources to the Projected Volume that is mounted\n    at /etc/nova/nova.conf.d/ so they may more easily override configs or provide\n    additional configs for the various services in the chart.\n...\n"
  },
  {
    "path": "releasenotes/notes/nova.yaml",
    "content": "---\nnova:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Add ssh to Nova compute\n  - 0.1.3 Establish Nova and Placement dependencies\n  - 0.1.4 Remove deprecated os_region_name for placement\n  - 0.1.5 Enable hostIPC\n  - 0.1.6 Swap SSH key names to reflect the correct key\n  - 0.1.7 Use HostToContainer mountPropagation\n  - 0.1.8 Change Issuer to ClusterIssuer\n  - 0.1.9 Revert \"Change Issuer to ClusterIssuer\"\n  - 0.1.10 Use HostToContainer mount propagation\n  - 0.1.11 Secure libvirt connection from using 127.0.0.1 to use unix socket\n  - 0.1.12 Update RBAC apiVersion from /v1beta1 to /v1\n  - 0.1.13 Change Issuer to ClusterIssuer\n  - 0.1.14 BUG for deploying multiple compute nodes\n  - 0.1.15 Mount /dev/pts in Nova compute container\n  - 0.1.16 Use first IP address for migration\n  - 0.1.17 Add multipathd support for ISCSI backed volume VMs\n  - 0.1.18 Fix the nova-compute-ironic label issue\n  - 0.1.19 Host resource scale adjustment about ironic\n  - 0.2.0 Remove support for releases before T\n  - 0.2.1 Remove unnecessary +x permission on gotpl files\n  - 0.2.2 Adding rabbitmq TLS logic\n  - 0.2.3 Replace deprecated configuration ``[vnc]/vncserver_proxyclient_address``\n  - 0.2.4 Mount rabbitmq TLS secret\n  - 0.2.5 Set reasonable default probe timeouts\n  - 0.2.6 Added cronJob with script for archive deleted rows which cleanup databases\n  - 0.2.7 Add Ussuri release support\n  - 0.2.8 Fix the cron archive_deleted_rows bash script for before and max-rows values\n  - 0.2.9 Add image clean up to rally test\n  - 0.2.10 Add tls cert mounting to nova-novnc\n  - 0.2.11 Add Victoria and Wallaby releases support\n  - 0.2.12 Bootstrap flavor creation efficiencies\n  - 0.2.13 Add missing 'runlock' hostMount when enable_scsi\n  - 0.2.14 Use helm.sh/hook annotations for jobs\n  - 0.2.15 Fix archive-deleted-rows for enabling date command as value for before option\n  - 0.2.16 Remove the policy document in values file\n  - 0.2.17 Fix disablement of helm.sh/hook for Helm v2\n  - 0.2.18 Give service time to restore\n  - 0.2.19 Define service cleaner sleep time\n  - 0.2.20 Update script to true of grep does get anything.\n  - 0.2.21 Helm 3 - Fix Job Labels\n  - 0.2.22 Update htk requirements repo\n  - 0.2.23 Add option to enable extra wait for cell-setup-init\n  - 0.2.24 Fix nova-bootstrap job labels\n  - 0.2.25 Add check for compute nodes\n  - 0.2.26 Fix _ssh-init.sh.tpl to copy the ssh keys to the user on the security context\n  - 0.2.27 Add tls1.2 minimum version to tls overrides\n  - 0.2.28 Move ssl_minimum_version to console section\n  - 0.2.29 Remove ssh-config\n  - 0.2.30 Improve health probe logging\n  - 0.2.31 Update oslo messaging get_transport\n  - 0.2.32 Host of ironic compute service equals pod name\n  - 0.2.33 Cleanup old releases\n  - 0.2.34 Remove consoleauth in nova\n  - 0.2.35 Enable taint toleration for Openstack services\n  - 0.2.36 Support TLS endpoints\n  - 0.2.37 Remove nova-placement\n  - 0.2.38 Update nova image defaults\n  - 0.2.39 Migrated CronJob resource to batch/v1 API version & PodDisruptionBudget to policy/v1\n  - 0.2.40 Updated naming for subchart compatibility\n  - 0.2.41 Add Xena and Yoga values overrides\n  - 0.2.42 Add missing configuration ``[vnc]/novncproxy_host``\n  - 0.2.43 Added OCI registry authentication\n  - 0.2.44 Distinguish between port number of internal endpoint and binding port number\n  - 0.2.45 Support TLS endpoints for metadata-api\n  - 0.2.46 Use HTTP probe instead of TCP probe\n  - 0.2.47 Remove list agents rally test\n  - 0.3.0 Remove support for Train and Ussuri\n  - 0.3.1 Added backoffLimit for bootstrap job\n  - 0.3.2 Remove un-used configs for Nova\n  - 0.3.3 Update all Ceph images to Focal\n  - 0.3.4 Add OVN values_override, disable dependency to ovn-agent and vif configs for ovn\n  - 0.3.5 Replace node-role.kubernetes.io/master with control-plane\n  - 0.3.6 Fix VNC access issues\n  - 0.3.7 Fix live migration without DNS resolution\n  - 0.3.8 Fix missing privilege separation directory for nova compute ssh\n  - 0.3.9 Fix typo in spice proxy deployment\n  - 0.3.10 Define service_type in keystone_authtoken to support application credentials with access rules\n  - 0.3.11 Update get_notification_transport\n  - 0.3.12 Update oslo_messaging_RPCClient\n  - 0.3.13 Add Zed overrides\n  - 0.3.14 Add 2023.1 overrides\n  - 0.3.15 Ensure that the health check script handles cases where the PID file exists but is empty or does not contain the expected data structure.\n  - 0.3.16 Use service tokens\n  - 0.3.17 Set targeted dependency of nova-compute with ovn networking backend\n  - 0.3.18 Fix nova ssh keys permission\n  - 0.3.19 Add support for enabling vencrypt\n  - 0.3.20 Add cinder auth config\n  - 0.3.21 Update health probe script considering ovsdb_connection config\n  - 0.3.22 Replace deprecated configuration vncserver_proxyclient_address to server_proxyclient_address\n  - 0.3.23 Add Ubuntu Jammy overrides\n  - 0.3.24 Create a certificate for novnc vencrypt separately\n  - 0.3.25 Add IP addresses search control flag\n  - 0.3.26 Improve cinder authentication support\n  - 0.3.27 Add 2023.2 Ubuntu Jammy overrides\n  - 0.3.28 Add ability to define extra command(s) for the nova cell setup job\n  - 0.3.29 Add ability to define extra command(s) for the nova service cleaner job\n  - 0.3.30 Add the conditional statement for log_config_append\n  - 0.3.31 Add getting LISTEN IP for CIDR\n  - 0.3.32 Set the startupProbe for nova-compute\n  - 0.3.33 Add job to create 'vms' pool\n  - 0.3.34 Add public endpoints for the spiceproxy\n  - 0.3.35 Use directory mount for vencrypt certificates\n  - 0.3.36 Update Ceph images to Jammy and Reef 18.2.1\n  - 0.3.37 Use metadata_listen_port instead of metadata_port\n  - 0.3.38 Using uWSGI\n  - 0.3.39 Enable custom annotations for Openstack pods\n  - 0.3.40 Add 2024.1 overrides\n  - 0.3.41 Enable custom annotations for Openstack secrets\n  - 0.3.42 Update images used by default\n  - 0.3.43 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.3.44 Add DPDK overrides\n  - 0.3.45 Add configuration for nova-scheduler\n  - 0.3.46 Add 2024.2 Ubuntu Jammy overrides\n  - 0.3.47 Use nova-compute.conf in nova-compute-ironic\n  - 0.3.48 Fix typo in archive_deleted_rows script\n  - 0.3.49 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/octavia-171c56de7891c86d.yaml",
    "content": "---\noctavia:\n  - |\n    Directly use module for Octavia API UWSGI as WSGI script octavia-wsgi has\n    removed from Octavia.\n...\n"
  },
  {
    "path": "releasenotes/notes/octavia-3c13346818a743cc.yaml",
    "content": "---\noctavia:\n  - |\n    Allow users to add additional sources to the Projected Volume that is mounted\n    at /etc/octavia/octavia.conf.d/ so they may more easily override configs or provide\n    additional configs for the various services in the chart.\n...\n"
  },
  {
    "path": "releasenotes/notes/octavia-63cb483419410e3c.yaml",
    "content": "---\noctavia:\n  - Fixes typos for driver agent mounts in helm templates\n...\n"
  },
  {
    "path": "releasenotes/notes/octavia-73c0f7c8c13c00a1.yaml",
    "content": "---\noctavia:\n  - Run driver agent as a separate deployment on network nodes\n  - Run worker as a daemonset instead of deployment on network nodes\n  - |\n    Worker daemonset creates an interface attached to the\n    Octavia management network to get access to amphora instances\n...\n"
  },
  {
    "path": "releasenotes/notes/octavia-74938cd9ffae016b.yaml",
    "content": "---\noctavia:\n  - |\n    Add support for etcSources to db-sync job.\n...\n"
  },
  {
    "path": "releasenotes/notes/octavia-875ff6ae26e5586c.yaml",
    "content": "---\noctavia:\n  - |\n    Set octavia-health-manager pods to run on openstack-network-node\n    nodes in order to access the ovs socket and perform ovs functions.\n...\n"
  },
  {
    "path": "releasenotes/notes/octavia-a9a696fde141cd8b.yaml",
    "content": "---\noctavia:\n  - |\n    Fix containerPort and readiness/liveness probe port of octavia-api.\n...\n"
  },
  {
    "path": "releasenotes/notes/octavia-b40e89ec5e5b5568.yaml",
    "content": "---\noctavia:\n  - |\n    Adjust default container images to 2024.1 like other applications in OpenStack\n    Helm instead of an out of date container.\n...\n"
  },
  {
    "path": "releasenotes/notes/octavia-c0e8011e138832db.yaml",
    "content": "---\noctavia:\n  - |\n    Move healthcheck endpoint path to the proper values section\n...\n"
  },
  {
    "path": "releasenotes/notes/octavia-c952d2266d5dbd62.yaml",
    "content": "---\noctavia:\n  - |\n    Fixes octavia-api custom volume mounts inadvertently removed in\n    https://review.opendev.org/c/openstack/openstack-helm/+/953481\n...\n"
  },
  {
    "path": "releasenotes/notes/octavia-c9f2b0ece7ba8406.yaml",
    "content": "---\noctavia:\n  - |\n    Allow for rbac customization of octavia policy.yaml by including\n    a policy.yaml file.\n...\n"
  },
  {
    "path": "releasenotes/notes/octavia-d22c4az0a92b7d16.yaml",
    "content": "---\noctavia:\n  - |\n    Add helm hook annotations in jobs by default.\n...\n"
  },
  {
    "path": "releasenotes/notes/octavia-f6afc93cf3ccc8f7.yaml",
    "content": "---\noctavia:\n  - |\n    Unhardcode readiness of octavia-api and add liveness probe. Also healthcheck of\n    octavia is enabled.\n...\n"
  },
  {
    "path": "releasenotes/notes/octavia-health-manager-net-caps-49adc645e1d03456.yaml",
    "content": "---\n# To create a new release note related to a specific chart:\n# reno new <chart_name>\n#\n# To create a new release note for a common change (when multiple charts\n# are changed):\n# reno new common\noctavia:\n  - |\n    Health manager requires NET_RAW and NET_BIND_SERVICE for allowing ISC DHCPD to work\n...\n"
  },
  {
    "path": "releasenotes/notes/octavia.yaml",
    "content": "---\noctavia:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Added post-install and post-upgrade hook for Jobs\n  - 0.2.0 Remove support for releases before T\n  - 0.2.1 Fix dnsPolicy for housekeeping service\n  - 0.2.2 Update htk requirements repo\n  - 0.2.3 Allow using log_config_append=null\n  - 0.2.4 Fix transport_url\n  - 0.2.5 Migrated PodDisruptionBudget resource to policy/v1 API version\n  - 0.2.6 Added OCI registry authentication\n  - 0.2.7 Use HTTP probe instead of TCP probe\n  - 0.2.8 Define service_type in keystone_authtoken to support application credentials with access rules\n  - 0.2.9 Use default timeout and retry configs for haproxy_amphora\n  - 0.2.10 Fix generating health_manager Role and RoleBinding\n  - 0.2.11 Uses uWSGI for API service\n  - 0.2.12 Enable custom annotations for Openstack pods\n  - 0.2.13 Enable custom annotations for Openstack secrets\n  - 0.2.14 Update images used by default\n  - 0.2.15 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.2.16 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/openstack.yaml",
    "content": "---\nopenstack:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Deploy compute-kit charts (neutron, nova, libvirt, openvswitch, placement)\n  - 0.1.2 Use host path storage for rabbitmq, so that rabbitmq retains data across pod recreation\n  - 0.1.3 Add neutron_netns_cleanup_cron release image override, so that the respective release image is used\n  - 0.1.4 Remove links in openstack/charts\n  - 0.1.5 Revert Remove links in openstack/charts\n  - 0.1.6 Added horizon to install as default component.\n  - 0.1.7 Remove placement db-migrate\n  - 0.2.0 Remove support for Train and Ussuri\n  - 0.2.1 Update all Ceph images to Focal\n  - 0.2.2 Add zed values override\n  - 0.2.3 Add Ubuntu Jammy overrides\n  - 0.2.4 Add 2023.1 overrides for Ubuntu Focal and Jammy\n  - 0.2.5 Add 2023.2 Ubuntu Jammy overrides\n  - 0.2.6 Update libvirt overrides for 2023.1 and 2023.2\n  - 0.2.7 Update Ceph images to Jammy and Reef 18.2.1\n  - 0.2.8 Add 2024.1 overrides\n  - 0.2.9 Add 2024.1 overrides\n  - 0.2.10 Add 2024.1 overrides\n  - 0.2.11 Add 2024.1 overrides\n  - 0.2.12 Add 2024.1 overrides\n  - 0.2.13 Add 2024.1 overrides\n  - 0.2.14 Add 2024.1 overrides\n  - 0.2.15 Add 2024.1 overrides\n  - 0.2.16 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.2.17 Update default values to work on multinode cluster\n  - 0.2.18 Add 2024.2 Ubuntu Jammy overrides\n  - 0.2.19 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/openvswitch-0b37403ffc75bb63.yaml",
    "content": "---\nopenvswitch:\n  - Change Open vSwitch to run with non-root user\n...\n"
  },
  {
    "path": "releasenotes/notes/openvswitch-3401ba2f0dc8e1f6.yaml",
    "content": "---\nopenvswitch:\n  - |\n    Introduce .Values.openvswitch.extraContainers hook to make it possible to\n    add extra containers to openvswitch daemonset, e.g. for monitoring\n    purposes.\n...\n"
  },
  {
    "path": "releasenotes/notes/openvswitch-3df8c5ca6034009f.yaml",
    "content": "---\nopenvswitch:\n  - |\n    Add missing priority class and runtime class definition for openvswitch\n...\n"
  },
  {
    "path": "releasenotes/notes/openvswitch-5c0d74ca4f420e56.yaml",
    "content": "---\nopenvswitch:\n  - Set nova user as owner for hugepages mount path\n...\n"
  },
  {
    "path": "releasenotes/notes/openvswitch-63f74f08815529dd.yaml",
    "content": "---\nfeatures:\n  - |\n    Add ability to override cpuset.mems and cpuset.cpus for osh-openvswitch\n    cgroup.\n...\n"
  },
  {
    "path": "releasenotes/notes/openvswitch-c123b289b476575a.yaml",
    "content": "---\nopenvswitch:\n  - Use quay.io/airshipit/openvswitch:latest-ubuntu_noble images by default.\n...\n"
  },
  {
    "path": "releasenotes/notes/openvswitch-e761d6733b84bdc7.yaml",
    "content": "---\nopenvswitch:\n  - Make the --user flag for OVS server optional\n...\n"
  },
  {
    "path": "releasenotes/notes/openvswitch-e888d02378d4d044.yaml",
    "content": "---\nopenvswitch:\n  - |\n    Added /var/tmp mount to ovs-vswitchd container.\n    This change ensures proper operation when using DPDK with the Mellanox driver.\n...\n"
  },
  {
    "path": "releasenotes/notes/openvswitch.yaml",
    "content": "---\nopenvswitch:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Unpin images built with osh-images\n  - 0.1.3 Use HostToContainer mountPropagation\n  - 0.1.4 Support override of vswitchd liveness and readiness probe\n  - 0.1.5 Use full image ref for docker official images\n  - 0.1.6 Update htk requirements\n  - 0.1.7 Enable taint toleration for Openstack services jobs\n  - 0.1.8 Added OCI registry authentication\n  - 0.1.9 Enable ovs hardware offload\n  - 0.1.10 Merge ovs-db and ovs-vswitchd in one Daemonset\n  - 0.1.11 Add ovn.yaml in values_override, Enable ptcp_port 6640 which needed when use ovn\n  - 0.1.12 Replace node-role.kubernetes.io/master with control-plane\n  - 0.1.13 Upgrade openvswitch image to latest-ubuntu_focal to fix qos issue\n  - 0.1.14 Add buffer before accesses pid file\n  - 0.1.15 Add buffer before accesses ovs controller pid socket\n  - 0.1.16 Restore ServiceAccount to openvswitch pod\n  - 0.1.17 Add buffer to wait for potential new CTL file before running chown\n  - 0.1.18 Add value for extra poststart command\n  - 0.1.19 Add check for cgroups v2 file structure\n  - 0.1.20 Add Ubuntu Focal and Ubuntu Jammy overrides\n  - 0.1.21 Add overrides for dpdk\n  - 0.1.22 Change hugepages size to 2M for easier configuration\n  - 0.1.23 Fix rolebinding for init container\n  - 0.1.24 Change ovs to run as child process of start script\n  - 0.1.25 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.26 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/other-23a753cb53b10bb8.yaml",
    "content": "---\nother:\n  - |\n    Use Loci images by default in all charts. Loci builds\n    images using periodic pipeline and publishes them to\n    Docker Hub registry and to Quay registry which has\n    more tolerant rate limits which is more convenient for\n    users with anonymous accounts.\n...\n"
  },
  {
    "path": "releasenotes/notes/ovn-3b9e82e5d469bc98.yaml",
    "content": "---\nfeatures:\n  - Implement daemonset overrides\n...\n"
  },
  {
    "path": "releasenotes/notes/ovn-3e576a7be97232fe.yaml",
    "content": "---\novn:\n  - |\n    Update system id annotation key. This is to align with the\n    openstack-helm-infra repo retirement.\n...\n"
  },
  {
    "path": "releasenotes/notes/ovn-50ba6d3611decff9.yaml",
    "content": "---\novn:\n  - Add OVN Kubernetes support\n...\n"
  },
  {
    "path": "releasenotes/notes/ovn-53e7ddb42d51e7c9.yaml",
    "content": "---\novn:\n  - |\n    Add missing apiVersion and kind to volumeClaimTemplates for ovn-ovsdb-nb and ovn-ovsdb-sb StatefulSet.\n...\n"
  },
  {
    "path": "releasenotes/notes/ovn-6c1c8afff28cf7f7.yaml",
    "content": "---\novn:\n  - |\n    This change introduces the use of a PersistentVolumeClaim (PVC) mounted at /var/lib/ovn\n    to store OVN database files (ovnnb_db.db and ovnsb_db.db). Previously, the OVN databases\n    were stored on ephemeral pod storage, which caused the databases to be lost whenever the\n    OVN pods were restarted. This resulted in network outages and forced a full synchronization\n    between the Neutron database and OVN, impacting cluster stability.\n    The issue was introduced by commit ffd183a164be190afcc2ce4de27de7e72ab8d386, which caused\n    problems during OVN upgrades due to the lack of persistent storage.\n    By storing the OVN databases on a persistent volume, pod restarts and upgrades can be\n    performed safely without data loss or network disruption.\n...\n"
  },
  {
    "path": "releasenotes/notes/ovn-73332b0bc5d647f2.yaml",
    "content": "---\novn:\n  - |\n    Add support for overriding OVN environment variables.\n    OVN relies on environment variables to control the behavior of its services.\n    This allowing users to override these variables enables finer tuning of OVN\n    services when needed.\n...\n"
  },
  {
    "path": "releasenotes/notes/ovn-8b5cc103886f3b25.yaml",
    "content": "---\novn:\n  - |\n    Add support for ovn_monitor_all.\n...\n"
  },
  {
    "path": "releasenotes/notes/ovn-a82eced671495a3d.yaml",
    "content": "---\novn:\n  - Add OVN network logging parser\n...\n"
  },
  {
    "path": "releasenotes/notes/ovn-b172c29d8c0602b1.yaml",
    "content": "---\novn:\n  - |\n    Update Ceph to Tentacle 20.2.0 and replaced image sources from docker.io/openstackhelm with quay.io/airshipit\n...\n"
  },
  {
    "path": "releasenotes/notes/ovn-d195851d81d68036.yaml",
    "content": "---\novn:\n  - |\n    Add a missing support for oci_image_registry.\n...\n"
  },
  {
    "path": "releasenotes/notes/ovn-ffd84bb8c9e73b64.yaml",
    "content": "---\n# To create a new release note related to a specific chart:\n# reno new <chart_name>\n#\n# To create a new release note for a common change (when multiple charts\n# are changed):\n# reno new common\novn:\n  - |\n    Use `ip add replace` instead of `ip addr add` in the init script to\n    make it idempotent and avoid errors when the script tries to assign\n    the same IP address to the interface on subsequent runs.\n...\n"
  },
  {
    "path": "releasenotes/notes/ovn.yaml",
    "content": "---\novn:\n  - 0.1.0 Add OVN!\n  - 0.1.1 Fix ovn db persistence issue\n  - 0.1.2 Add bridge-mapping configuration\n  - 0.1.3 Fix system-id reuse\n  - 0.1.4 Add support for OVN HA + refactor\n  - 0.1.5 Add ubuntu_focal and ubuntu_jammy overrides\n  - 0.1.6 Fix ovsdb port number\n  - 0.1.7 Use host network for ovn controller pods\n  - 0.1.8 Fix attaching interfaces to the bridge\n  - 0.1.9 Make ovn db file path as configurable\n  - 0.1.10 Fix typo in the controller init script\n  - 0.1.11 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.12 Fix oci_image_registry secret name\n  - 0.1.13 Allow share OVN DB NB/SB socket\n  - 0.1.14 Make the label for OVN controller gateway configurable\n  - 0.1.15 Fix resources\n  - 0.1.16 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/panko.yaml",
    "content": "---\npanko:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.2.0 Remove support for releases before T\n  - 1.0.0 Removed due to retirement\n...\n"
  },
  {
    "path": "releasenotes/notes/placement-2b023904bc06028b.yaml",
    "content": "---\nplacement:\n  - |\n    Split out the OpenStack service account definitions from placement.conf and into\n    config snippets which are loaded at /etc/placement/placement.d/, which is automatically\n    loaded by OSLO when loading the main placement.conf. This makes it easier for users\n    to use the regular config generation while supplying credentials out of band.\n...\n"
  },
  {
    "path": "releasenotes/notes/placement-3115f3ce4c0801af.yaml",
    "content": "---\nplacement:\n  - |\n    Add support for etcSources to db-sync job.\n...\n"
  },
  {
    "path": "releasenotes/notes/placement-a180f4e88ed81d30.yaml",
    "content": "---\nplacement:\n  - |\n    Allow users to add additional sources to the Projected Volume that is mounted\n    at /etc/placement/placement.conf.d/ so they may more easily override configs or provide\n    additional configs for the various services in the chart.\n...\n"
  },
  {
    "path": "releasenotes/notes/placement.yaml",
    "content": "---\nplacement:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Establish Nova/Placement dependencies\n  - 0.1.3 Use proper default placement image\n  - 0.1.4 Add null check condition in placement deployment manifest\n  - 0.1.5 Change Issuer to ClusterIssuer\n  - 0.1.6 Revert - Change Issuer to ClusterIssuer\n  - 0.1.7 Change Issuer to ClusterIssuer\n  - 0.2.0 Remove support for releases before T\n  - 0.2.1 Add Ussuri release support\n  - 0.2.2 Add Victoria and Wallaby releases support\n  - 0.2.3 Added helm.sh/hook annotations for Jobs\n  - 0.2.4 Helm 3 - Fix Job Labels\n  - 0.2.5 Update htk requirements repo\n  - 0.2.6 Enable taint toleration for Openstack services\n  - 0.2.7 Add helm hook annotations for db-sync job\n  - 0.2.8 Migrated PodDisruptionBudget resource to policy/v1 API version\n  - 0.2.9 Add Xena and Yoga values overrides\n  - 0.2.10 Added OCI registry authentication\n  - 0.2.11 Distinguish between port number of internal endpoint and binding port number\n  - 0.2.12 Use HTTP probe instead of TCP probe\n  - 0.2.13 Support TLS endpoints\n  - 0.3.0 Remove placement-migrate\n  - 0.3.1 Remove support for Train and Ussuri\n  - 0.3.2 Remove default policy rules\n  - 0.3.3 Replace node-role.kubernetes.io/master with control-plane\n  - 0.3.4 Define service_type in keystone_authtoken to support application credentials with access rules\n  - 0.3.5 Add Zed overrides\n  - 0.3.6 Add 2023.1 overrides\n  - 0.3.7 Use service tokens\n  - 0.3.8 Add Ubuntu Jammy overrides\n  - 0.3.9 Add 2023.2 Ubuntu Jammy overrides\n  - 0.3.10 Add log_dir option for placement\n  - 0.3.11 Enable custom annotations for Openstack pods\n  - 0.3.12 Add 2024.1 overrides\n  - 0.3.13 Enable custom annotations for Openstack secrets\n  - 0.3.14 Update images used by default\n  - 0.3.15 Uses uWSGI for API service\n  - 0.3.16 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.3.17 Add 2024.2 Ubuntu Jammy overrides\n  - 0.3.18 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/podsecuritypolicy.yaml",
    "content": "---\npodsecuritypolicy:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Update htk requirements\n  - 1.0.0 Remove chart due to PodSecurityPolicy deprecation\n...\n"
  },
  {
    "path": "releasenotes/notes/postgresql-4ee4e72706f17d8a.yaml",
    "content": "---\nfixes:\n  - |\n    Added failover openrc environment variables to database backup cron jobs.\n...\n"
  },
  {
    "path": "releasenotes/notes/postgresql-e1a02dbbe6601b0f.yaml",
    "content": "---\npostgresql:\n  - Add support of hostPath volume for archive mode\n  - Use DirectoryOrCreate type for hostPath volumes\n...\n"
  },
  {
    "path": "releasenotes/notes/postgresql.yaml",
    "content": "---\npostgresql:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 adding archiving to postgres\n  - 0.1.3 Use explicit entrypoint for prometheus exporter\n  - 0.1.4 Allow probe tweaking\n  - 0.1.5 Optimize restart behavior\n  - 0.1.6 Revert \"Add default reject rule ...\"\n  - 0.1.7 postgres archive cleanup script\n  - 0.1.8 Add tls to Postgresql\n  - 0.1.9 Use full image ref for docker official images\n  - 0.1.10 Helm 3 - Fix Job labels\n  - 0.1.11 Update htk requirements\n  - 0.1.12 Enhance postgresql backup\n  - 0.1.13 Remove set -x\n  - 0.1.14 Fix invalid fields in values\n  - 0.1.15 Migrated CronJob resource to batch/v1 API version\n  - 0.1.16 Added OCI registry authentication\n  - 0.1.17 Added empty verify_databases_backup_archives() function implementation to match updated backup_databases() function in helm-toolkit\n  - 0.1.18 Updated postgres to 14.5 and replaced deprecated config item wal_keep_segments with wal_keep_size\n  - 0.1.19 Added staggered backups support\n  - 0.1.20 Added throttling remote backups\n  - 0.1.21 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.22 Update default images tags. Add 2024.1-ubuntu_jammy overrides.\n  - 0.1.23 Add 2024.2 overrides\n  - 0.1.24 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/powerdns.yaml",
    "content": "---\npowerdns:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Use full image ref for docker official images\n  - 0.1.3 Helm 3 - Fix Job labels\n  - 0.1.4 Update htk requirements\n  - 0.1.5 Update default image values\n  - 0.1.6 Added OCI registry authentication\n  - 0.1.7 Add 2023.1 Ubuntu Focal overrides\n  - 0.1.8 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.9 Add 2024.1 Ubuntu Jammy overrides\n  - 0.1.10 Add 2024.2 overrides\n  - 0.1.11 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/prometeus-alertmanager-293616e8a47a12e8.yaml",
    "content": "---\nprometheus-alertmanager:\n  - Add support of hostPath volume\n...\n"
  },
  {
    "path": "releasenotes/notes/prometeus-e9a80f262470313c.yaml",
    "content": "---\nprometheus:\n  - Add support of hostPath volume\n...\n"
  },
  {
    "path": "releasenotes/notes/prometheus-alertmanager.yaml",
    "content": "---\nprometheus-alertmanager:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Add extensible command line flags to Alertmanager\n  - 0.1.3 Add LDAP to Alertmanager\n  - 0.1.4 Remove snmp_notifier subchart from alertmanager\n  - 0.1.5 Add Prometheus Scrape Annotation\n  - 0.1.6 Remove Alerta from openstack-helm-infra repository\n  - 0.1.7 Use full image ref for docker official images\n  - 0.1.8 Update htk requirements\n  - 0.1.9 Added OCI registry authentication\n  - 0.1.10 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.11 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/prometheus-blackbox-exporter.yaml",
    "content": "---\nprometheus-blackbox-exporter:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Rename image key name\n  - 0.1.3 Update htk requirements\n  - 0.1.4 Fix indentation\n  - 0.1.5 Added OCI registry authentication\n  - 0.1.6 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/prometheus-kube-state-metrics-b1fc3bf8e9109ae4.yaml",
    "content": "---\nprometheus-kube-state-metrics:\n  - Update kube-state-metrics image\n...\n"
  },
  {
    "path": "releasenotes/notes/prometheus-kube-state-metrics.yaml",
    "content": "---\nprometheus-kube-state-metrics:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Update to make current\n  - 0.1.3 Update image version from v2.0.0-alpha to v2.0.0-alpha-1\n  - 0.1.4 Use full image ref for docker official images\n  - 0.1.5 Fix helm3 compatability\n  - 0.1.6 Update htk requirements\n  - 0.1.7 Added OCI registry authentication\n  - 0.1.8 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.9 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/prometheus-mysql-exporter.yaml",
    "content": "---\nprometheus-mysql-exporter:\n  - 0.0.1 Initial Chart\n  - 0.0.2 Add 2024.1 overrides\n  - 0.0.3 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.0.4 Fix typo in the values_overrides directory name\n  - 0.0.5 Add 2024.2 overrides\n  - 0.0.6 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/prometheus-node-exporter.yaml",
    "content": "---\nprometheus-node-exporter:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Add possibility to use overrides for some charts\n  - 0.1.3 Use full image ref for docker official images\n  - 0.1.4 Update htk requirements\n  - 0.1.5 Added OCI registry authentication\n  - 0.1.6 Replace node-role.kubernetes.io/master with control-plane\n  - 0.1.7 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.8 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/prometheus-openstack-exporter-39b2a7f52552033d.yaml",
    "content": "---\nprometheus-openstack-exporter:\n  - Upgrade openstack-exporter to the latest v1.7.0.\n...\n"
  },
  {
    "path": "releasenotes/notes/prometheus-openstack-exporter-d95d286faa68ea98.yaml",
    "content": "---\nprometheus-openstack-exporter:\n  - Swap to official openstack exporter image.\n...\n"
  },
  {
    "path": "releasenotes/notes/prometheus-openstack-exporter.yaml",
    "content": "---\nprometheus-openstack-exporter:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Unpin prometheus-openstack-exporter image\n  - 0.1.3 Add possibility to use overrides for some charts\n  - 0.1.4 Use full image ref for docker official images\n  - 0.1.5 Helm 3 - Fix Job labels\n  - 0.1.6 Update htk requirements\n  - 0.1.7 Added OCI registry authentication\n  - 0.1.8 Switch to jammy-based images\n  - 0.1.9 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.10 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/prometheus-process-exporter.yaml",
    "content": "---\nprometheus-process-exporter:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Fix values_overrides directory naming\n  - 0.1.3 Use full image ref for docker official images\n  - 0.1.4 Update htk requirements\n  - 0.1.5 Added OCI registry authentication\n  - 0.1.6 Replace node-role.kubernetes.io/master with control-plane\n  - 0.1.7 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.8 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/prometheus.yaml",
    "content": "---\nprometheus:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Add configurable readiness/liveness Probes\n  - 0.1.3 Revert \"Render Rules as Templates\"\n  - 0.1.4 Fix spacing inconsistencies with flags\n  - 0.1.5 Fix spacing inconsistencies with flags\n  - 0.1.6 Upgrade version to v2.25 fix/remove deprecated flags\n  - 0.1.7 Enable TLS for Prometheus\n  - 0.1.8 Change readiness probe from /status to /-/ready\n  - 0.1.9 Retrieve backend port name from values.yaml\n  - 0.1.10 Use full image ref for docker official images\n  - 0.1.11 Update htk requirements\n  - 0.1.12 Update default image value to Wallaby\n  - 0.1.13 Added OCI registry authentication\n  - 0.1.14 Added feature to launch Prometheus with custom script\n  - 0.1.15 Add 2023.1 Ubuntu Focal overrides\n  - 0.1.16 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.17 Add 2024.1 Ubuntu Jammy overrides\n  - 0.1.18 Add 2024.2 overrides\n  - 0.1.19 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/rabbitmq-04d68343d1f9dbec.yaml",
    "content": "---\nrabbitmq:\n  - |\n    Removing rabbitmq exporter due to EOL. Converted to use\n    built-in metrics instead.\n...\n"
  },
  {
    "path": "releasenotes/notes/rabbitmq.yaml",
    "content": "---\nrabbitmq:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 changes rmq-exporter secret src\n  - 0.1.4 Add configurable RABBIT_TIMEOUT parameter\n  - 0.1.5 Update Rabbitmq exporter version\n  - 0.1.6 Disallow privilege escalation in rabbitmq server container\n  - 0.1.7 Adding TLS logic to rabbitmq\n  - 0.1.8 Make helm test work with TLS\n  - 0.1.9 Use full image ref for docker official images\n  - 0.1.10 Set separate for HTTPS\n  - 0.1.11 Add TLS support for helm test\n  - 0.1.12 Added helm hook post-install and post-upgrade for rabbitmq wait cluster job\n  - 0.1.13 Add prestop action and version 3.8.x upgrade prep\n  - 0.1.14 Update readiness and liveness probes\n  - 0.1.15 Update htk requirements\n  - 0.1.16 Add force_boot command to rabbit start template\n  - 0.1.17 Updated naming for subchart compatibility\n  - 0.1.18 Revert naming for subchart compatibility\n  - 0.1.19 Enable taint toleration for Openstack services jobs\n  - 0.1.20 Bump Rabbitmq version to 3.9.0\n  - 0.1.21 Updated naming for subchart compatibility\n  - 0.1.22 Remove guest admin account\n  - 0.1.23 Fixed guest account removal\n  - 0.1.24 Added OCI registry authentication\n  - 0.1.25 Add hostPort support\n  - 0.1.26 Moved guest admin removal to init template\n  - 0.1.27 Replace node-role.kubernetes.io/master with control-plane\n  - 0.1.28 Add IPv6 environment support for rabbitmq\n  - 0.1.29 Add build-in prometheus plugin and disable external exporter\n  - 0.1.30 Add labels to rabbitmq service\n  - 0.1.31 Support management api metrics collection\n  - 0.1.32 Enable addition of default consumer prefetch count\n  - 0.1.33 Bump RabbitMQ image version to 3.13.0\n  - 0.1.34 Add 2024.1 overrides\n  - 0.1.35 Add configurable probes to rabbitmq container\n  - 0.1.36 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.37 Update rabbitmq readiness/liveness command\n  - 0.1.38 Do not use hardcoded username in rabbitmq chown container\n  - 0.1.39 Allow to bootstrap rabbitmq with initial config\n  - 0.1.40 Set password for guest user rabbitmq\n  - 0.1.41 Use short rabbitmq node name\n  - 0.1.42 Revert Use short rabbitmq node name\n  - 0.1.43 Add 2024.2 overrides\n  - 0.1.44 Allow to use default storage class\n  - 0.1.45 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/rally.yaml",
    "content": "---\nrally:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.2.0 Remove support for releases before T\n  - 0.2.1 Update htk requirements repo\n  - 0.2.2 Add values for backoffLimit and restartPolicy\n  - 0.2.3 Update default image values to Wallaby\n  - 0.2.4 Migrated PodDisruptionBudget resource to policy/v1 API version\n  - 0.2.5 Add helm hook for jobs\n  - 0.2.6 Added OCI registry authentication\n  - 0.2.7 Support TLS for identity endpoint\n  - 0.2.8 Bump Cirros version to 0.6.2\n  - 0.2.9 Enable custom annotations for Openstack secrets\n  - 0.2.10 Update images used by default\n  - 0.2.11 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.2.12 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/redis.yaml",
    "content": "---\nredis:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Use full image ref for docker official images\n  - 0.1.3 Update htk requirements\n  - 0.1.4 Added OCI registry authentication\n  - 0.1.5 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.6 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/registry-daf63a0fbe9771cb.yaml",
    "content": "---\nregistry:\n  - Update docker client image to 29\n  - |\n    Use quay.io/airshipit/keystone-entrypoint:latest-ubuntu_jammy\n    image by default for init containers\n...\n"
  },
  {
    "path": "releasenotes/notes/registry.yaml",
    "content": "---\nregistry:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.1.2 Update to container image repo k8s.gcr.io\n  - 0.1.3 Use full image ref for docker official images\n  - 0.1.4 Helm 3 - Fix Job labels\n  - 0.1.5 Update htk requirements\n  - 0.1.6 Added OCI registry authentication\n  - 0.1.7 Update kubernetes registry to registry.k8s.io\n  - 0.1.8 Update bootstrap image url for newer image format\n  - 0.1.9 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.10 Allow to use default storage class\n  - 0.1.11 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/remove-legacy-volume-apis-12d2d1abbb7fbe61.yaml",
    "content": "---\ncinder:\n  - |\n    Removed endpoints for v1 API and v2 API because these were already removed\n    from cinder.\n...\n"
  },
  {
    "path": "releasenotes/notes/remove-share-v1-api-d7b0b85e395bf131.yaml",
    "content": "---\nmanila:\n  - |\n    Removed endpoint for the legacy share API, becuase it has been removed from\n    manila.\n...\n"
  },
  {
    "path": "releasenotes/notes/rename-ceph-rbd-pool-app-name.yaml",
    "content": "---\nother:\n  - |\n    rbd_pool_app_name is a Ceph pool attribute. Moving it from conf.software.rbd to\n    conf.ceph.pools as app_name. This means that conf.software.rbd.rbd_pool_app_name\n    is now conf.ceph.pools.cinder.volumes.app_name and conf.software.rbd.rbd_pool_app_name_backup\n    is now conf.ceph.pools.backup.app_name.\n...\n"
  },
  {
    "path": "releasenotes/notes/skyline-0cc4caaea4f05714.yaml",
    "content": "---\nskyline:\n  - |\n    Adding a missing oci_image_registry endpoint and secret in the Skyline Helm\n    chart.\n...\n"
  },
  {
    "path": "releasenotes/notes/skyline-4763b3a9c14ace98.yaml",
    "content": "---\nskyline:\n  - |\n    Bring out database migrations and nginx config generation\n    to separate scripts so that they can be run independently\n    during init container phase.\n  - |\n    Use Loci Skyline image by default.\n...\n"
  },
  {
    "path": "releasenotes/notes/skyline-794e9be9cc48f98d.yaml",
    "content": "---\nskyline:\n  - |\n    Initial release of the Skyline chart\n...\n"
  },
  {
    "path": "releasenotes/notes/skyline-db-sync-image-b56ba0a4cad85c9c.yaml",
    "content": "---\nskyline:\n  - |\n    Fix db-sync container image tag variable to match the style of all other\n    charts to be prefixed with the project name.\n...\n"
  },
  {
    "path": "releasenotes/notes/skyline-de744253bec9dfa3.yaml",
    "content": "---\nfixes:\n  - |\n    Mount Skyline apiserver socket directory to the nginx container.\n\n    The Skyline pods have two containers: nginx and skyline apiserver.\n    The nginx container needs access to the apiserver socket to proxy\n    requests to it.\n...\n"
  },
  {
    "path": "releasenotes/notes/tacker.yaml",
    "content": "---\ntacker:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Add Ubuntu Jammy overrides\n  - 0.1.2 Add 2023.2 Ubuntu Jammy overrides\n  - 0.1.3 Add 2024.1 overrides\n  - 0.1.4 Enable custom annotations for Openstack secrets\n  - 0.1.5 Update images used by default\n  - 0.1.6 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.1.7 Add Tacker Test Job\n  - 0.1.8 Add 2024.2 Ubuntu Jammy overrides\n  - 0.1.9 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/tempest.yaml",
    "content": "---\ntempest:\n  - 0.1.0 Initial Chart\n  - 0.1.1 Change helm-toolkit dependency version to \">= 0.1.0\"\n  - 0.2.0 Remove support for releases before T\n  - 0.2.1 Update htk requirements repo\n  - 0.2.2 Add helm hook for ks job\n  - 0.2.3 Fix logging config\n  - 0.2.4 Update default image values to Wallaby\n  - 0.2.5 Added OCI registry authentication\n  - 0.2.6 Support SSL openstack endpoints\n  - 0.2.7 Add configuration for heat-tempest-plugin\n  - 0.2.8 Bump Cirros version to 0.6.2\n  - 0.2.9 Enable custom annotations for Openstack secrets\n  - 0.2.10 Update images used by default\n  - 0.2.11 Use quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal by default\n  - 0.2.12 Update Chart.yaml apiVersion to v2\n  - 2024.2.0 Update version to align with the Openstack release cycle\n...\n"
  },
  {
    "path": "releasenotes/notes/trove-a1b2c3d4e5f6g7h8.yaml",
    "content": "---\nfeatures:\n  - |\n    Added configuration for required OpenStack service endpoints (Nova, Neutron,\n    Cinder, and Glance) in the Trove Helm chart.\n  - |\n    Added service credential definitions to support integration with dependent\n    OpenStack services.\n...\n"
  },
  {
    "path": "releasenotes/notes/trove.yaml",
    "content": "---\ntrove:\n  - Initial Chart - OpenStack Database as a Service (DBaaS)\n...\n"
  },
  {
    "path": "releasenotes/notes/watcher.yaml",
    "content": "---\nwatcher:\n  - 0.1.0 Initial Chart\n...\n"
  },
  {
    "path": "releasenotes/notes/zaqar-e43f9b2ace992d92.yaml",
    "content": "---\nzaqar:\n  - |\n    Added initial support for OpenStack Zaqar messaging service deployment\n    in Kubernetes environments through Helm charts. This enables users to\n    deploy and manage Zaqar services alongside other OpenStack components.\n\nfeatures:\n  - |\n    Introduced Zaqar Helm chart with support for:\n    - Zaqar API service deployment and configuration\n    - Support for HTTP-based RESTful API and WebSocket messaging\n    - Integration with existing OpenStack identity services (Keystone)\n    - Support for custom Zaqar configuration through values.yaml\n...\n"
  },
  {
    "path": "releasenotes/requirements.txt",
    "content": "# The order of packages is significant, because pip processes them in the order\n# of appearance. Changing the order has an impact on the overall integration\n# process, which may cause wedges in the gate later.\n\nsphinx>=2.0.0,!=2.1.0 # BSD\nopenstackdocstheme>=2.2.1 # Apache-2.0\nreno>=3.1.0 # Apache-2.0\n"
  },
  {
    "path": "releasenotes/source/conf.py",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#    http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n    'openstackdocstheme',\n    'reno.sphinxext',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# -- Options for Internationalization output ------------------------------\nlocale_dirs = ['locale/']\n"
  },
  {
    "path": "releasenotes/source/current.rst",
    "content": "==============================\n Current Series Release Notes\n==============================\n\n.. release-notes::\n"
  },
  {
    "path": "releasenotes/source/index.rst",
    "content": "=============================\n OpenStack-Helm Release Notes\n=============================\n\n.. toctree::\n   :maxdepth: 1\n\n   current\n"
  },
  {
    "path": "releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po",
    "content": "# Andi Chandler <andi@gowling.com>, 2021. #zanata\n# Andi Chandler <andi@gowling.com>, 2022. #zanata\n# Andi Chandler <andi@gowling.com>, 2023. #zanata\nmsgid \"\"\nmsgstr \"\"\n\"Project-Id-Version: Python\\n\"\n\"Report-Msgid-Bugs-To: \\n\"\n\"POT-Creation-Date: 2023-02-21 00:39+0000\\n\"\n\"MIME-Version: 1.0\\n\"\n\"Content-Type: text/plain; charset=UTF-8\\n\"\n\"Content-Transfer-Encoding: 8bit\\n\"\n\"PO-Revision-Date: 2023-02-03 04:48+0000\\n\"\n\"Last-Translator: Andi Chandler <andi@gowling.com>\\n\"\n\"Language-Team: English (United Kingdom)\\n\"\n\"Language: en_GB\\n\"\n\"X-Generator: Zanata 4.3.3\\n\"\n\"Plural-Forms: nplurals=2; plural=(n != 1)\\n\"\n\nmsgid \"0.1.0 Initial Chart\"\nmsgstr \"0.1.0 Initial Chart\"\n\nmsgid \"0.1.1 Change helm-toolkit dependency to >= 0.1.0\"\nmsgstr \"0.1.1 Change helm-toolkit dependency to >= 0.1.0\"\n\nmsgid \"0.1.1 Change helm-toolkit dependency version to \\\">= 0.1.0\\\"\"\nmsgstr \"0.1.1 Change helm-toolkit dependency version to \\\">= 0.1.0\\\"\"\n\nmsgid \"0.1.1 UPDATE\"\nmsgstr \"0.1.1 UPDATE\"\n\nmsgid \"0.1.10 Change Issuer to ClusterIssuer\"\nmsgstr \"0.1.10 Change Issuer to ClusterIssuer\"\n\nmsgid \"0.1.10 Update RBAC apiVersion from /v1beta1 to /v1\"\nmsgstr \"0.1.10 Update RBAC apiVersion from /v1beta1 to /v1\"\n\nmsgid \"0.1.10 Use HostToContainer mount propagation\"\nmsgstr \"0.1.10 Use HostToContainer mount propagation\"\n\nmsgid \"0.1.11 Added the helm.sh/hook, helm.sh/hook-weight annotations\"\nmsgstr \"0.1.11 Added the helm.sh/hook, helm.sh/hook-weight annotations\"\n\nmsgid \"0.1.11 Remove congress residue\"\nmsgstr \"0.1.11 Remove congress residue\"\n\nmsgid \"\"\n\"0.1.11 Secure libvirt connection from using 127.0.0.1 to use unix socket\"\nmsgstr \"\"\n\"0.1.11 Secure libvirt connection from using 127.0.0.1 to use Unix socket\"\n\nmsgid \"0.1.11 Update RBAC apiVersion from /v1beta1 to /v1\"\nmsgstr \"0.1.11 Update RBAC apiVersion from /v1beta1 to /v1\"\n\nmsgid \"0.1.12 Add helm hook conditional\"\nmsgstr \"0.1.12 Add helm hook conditional\"\n\nmsgid \"0.1.12 Removed \\\"name\\\" parameter from Rally tests\"\nmsgstr \"0.1.12 Removed \\\"name\\\" parameter from Rally tests\"\n\nmsgid \"0.1.12 Update RBAC apiVersion from /v1beta1 to /v1\"\nmsgstr \"0.1.12 Update RBAC apiVersion from /v1beta1 to /v1\"\n\nmsgid \"0.1.12 Update volume type creation bootstrap logic\"\nmsgstr \"0.1.12 Update volume type creation bootstrap logic\"\n\nmsgid \"0.1.13 Add NFS cinder backup override\"\nmsgstr \"0.1.13 Add NFS cinder backup override\"\n\nmsgid \"0.1.13 Change Issuer to ClusterIssuer\"\nmsgstr \"0.1.13 Change Issuer to ClusterIssuer\"\n\nmsgid \"0.1.13 Fix Error - wrong number of args for set\"\nmsgstr \"0.1.13 Fix Error - wrong number of args for set\"\n\nmsgid \"0.1.14 Add Multipathd support for ISCSI backed volumes\"\nmsgstr \"0.1.14 Add Multipathd support for ISCSI backed volumes\"\n\nmsgid \"0.1.14 BUG for deploying multiple compute nodes\"\nmsgstr \"0.1.14 BUG for deploying multiple compute nodes\"\n\nmsgid \"0.1.14 Remove setup helm hooks\"\nmsgstr \"0.1.14 Remove setup Helm hooks\"\n\nmsgid \"0.1.15 Fix the problem in hostNetwork mode\"\nmsgstr \"0.1.15 Fix the problem in hostNetwork mode\"\n\nmsgid \"0.1.15 Mount /dev/pts in Nova compute container\"\nmsgstr \"0.1.15 Mount /dev/pts in Nova compute container\"\n\nmsgid \"0.1.16 Use first IP address for migration\"\nmsgstr \"0.1.16 Use first IP address for migration\"\n\nmsgid \"0.1.17 Add multipathd support for ISCSI backed volume VMs\"\nmsgstr \"0.1.17 Add multipathd support for ISCSI backed volume VMs\"\n\nmsgid \"0.1.18 Fix the nova-compute-ironic label issue\"\nmsgstr \"0.1.18 Fix the nova-compute-ironic label issue\"\n\nmsgid \"0.1.19 Host resource scale adjustment about ironic\"\nmsgstr \"0.1.19 Host resource scale adjustment about Ironic\"\n\nmsgid \"0.1.2 Add ssh to Nova compute\"\nmsgstr \"0.1.2 Add ssh to Nova compute\"\n\nmsgid \"0.1.2 Added post-install and post-upgrade helm hook for Jobs\"\nmsgstr \"0.1.2 Added post-install and post-upgrade helm hook for Jobs\"\n\nmsgid \"0.1.2 Added post-install and post-upgrade helm hook for jobs\"\nmsgstr \"0.1.2 Added post-install and post-upgrade helm hook for jobs\"\n\nmsgid \"0.1.2 Added post-install and post-upgrade helm hooks on Jobs\"\nmsgstr \"0.1.2 Added post-install and post-upgrade helm hooks on Jobs\"\n\nmsgid \"0.1.2 Added post-install and post-upgrade helm.sh/hook for jobs\"\nmsgstr \"0.1.2 Added post-install and post-upgrade helm.sh/hook for jobs\"\n\nmsgid \"0.1.2 Added post-install and post-upgrade hook for Jobs\"\nmsgstr \"0.1.2 Added post-install and post-upgrade hook for Jobs\"\n\nmsgid \"0.1.2 Change issuer to clusterissuer\"\nmsgstr \"0.1.2 Change issuer to clusterissuer\"\n\nmsgid \"0.1.2 Establish Nova/Placement dependencies\"\nmsgstr \"0.1.2 Establish Nova/Placement dependencies\"\n\nmsgid \"0.1.2 Remove tls values override for clients_heat\"\nmsgstr \"0.1.2 Remove tls values override for clients_heat\"\n\nmsgid \"0.1.2 Support service tokens to prevent long-running job failures\"\nmsgstr \"0.1.2 Support service tokens to prevent long-running job failures\"\n\nmsgid \"0.1.2 To avoid wrong version check for mysqlclient\"\nmsgstr \"0.1.2 To avoid wrong version check for mysqlclient\"\n\nmsgid \"0.1.2 UPDATE\"\nmsgstr \"0.1.2 UPDATE\"\n\nmsgid \"0.1.2 fixes tls issue\"\nmsgstr \"0.1.2 fixes tls issue\"\n\nmsgid \"0.1.3 Change Issuer to ClusterIssuer\"\nmsgstr \"0.1.3 Change Issuer to ClusterIssuer\"\n\nmsgid \"0.1.3 Establish Nova and Placement dependencies\"\nmsgstr \"0.1.3 Establish Nova and Placement dependencies\"\n\nmsgid \"0.1.3 Modify Password validator related settings in Horizon\"\nmsgstr \"0.1.3 Modify Password validator related settings in Horizon\"\n\nmsgid \"0.1.3 Revert - Change issuer to clusterissuer\"\nmsgstr \"0.1.3 Revert - Change issuer to clusterissuer\"\n\nmsgid \"0.1.3 Support of external ceph backend\"\nmsgstr \"0.1.3 Support of external CEPH backend\"\n\nmsgid \"0.1.3 UPDATE\"\nmsgstr \"0.1.3 UPDATE\"\n\nmsgid \"0.1.3 Update neutron to use Nginx apparmor profile\"\nmsgstr \"0.1.3 Update neutron to use Nginx apparmor profile\"\n\nmsgid \"0.1.3 Use proper default placement image\"\nmsgstr \"0.1.3 Use proper default placement image\"\n\nmsgid \"0.1.4 Add null check condition in placement deployment manifest\"\nmsgstr \"0.1.4 Add null check condition in placement deployment manifest\"\n\nmsgid \"0.1.4 Change Issuer to ClusterIssuer\"\nmsgstr \"0.1.4 Change Issuer to ClusterIssuer\"\n\nmsgid \"0.1.4 Enable iscsi to work correctly in cinder volume\"\nmsgstr \"0.1.4 Enable iSCSI to work correctly in Cinder volume\"\n\nmsgid \"0.1.4 Pass ovs agent config to dhcp agent\"\nmsgstr \"0.1.4 Pass ovs agent config to DHCP agent\"\n\nmsgid \"0.1.4 Remove deprecated os_region_name for placement\"\nmsgstr \"0.1.4 Remove deprecated os_region_name for placement\"\n\nmsgid \"0.1.4 Revert - Change Issuer to ClusterIssuer\"\nmsgstr \"0.1.4 Revert - Change Issuer to ClusterIssuer\"\n\nmsgid \"0.1.4 UPDATE\"\nmsgstr \"0.1.4 UPDATE\"\n\nmsgid \"0.1.4 Update RBAC apiVersion from /v1beta1 to /v1\"\nmsgstr \"0.1.4 Update RBAC apiVersion from /v1beta1 to /v1\"\n\nmsgid \"0.1.5 Add missing flags to nginx container in neutron chart\"\nmsgstr \"0.1.5 Add missing flags to Nginx container in Neutron chart\"\n\nmsgid \"0.1.5 Change Issuer to ClusterIssuer\"\nmsgstr \"0.1.5 Change Issuer to ClusterIssuer\"\n\nmsgid \"0.1.5 Enable hostIPC\"\nmsgstr \"0.1.5 Enable hostIPC\"\n\nmsgid \"0.1.5 Resolves mount issue with termination-log\"\nmsgstr \"0.1.5 Resolves mount issue with termination-log\"\n\nmsgid \"0.1.5 Revert - Change Issuer to ClusterIssuer\"\nmsgstr \"0.1.5 Revert - Change Issuer to ClusterIssuer\"\n\nmsgid \"0.1.5 Revert clusterissuer change\"\nmsgstr \"0.1.5 Revert clusterissuer change\"\n\nmsgid \"0.1.6 Change Issuer to ClusterIssuer\"\nmsgstr \"0.1.6 Change Issuer to ClusterIssuer\"\n\nmsgid \"0.1.6 Enable volume backup for iSCSI based volumes\"\nmsgstr \"0.1.6 Enable volume backup for iSCSI based volumes\"\n\nmsgid \"0.1.6 Fix typo in subPath entry\"\nmsgstr \"0.1.6 Fix typo in subPath entry\"\n\nmsgid \"0.1.6 Revert - Change Issuer to ClusterIssuer\"\nmsgstr \"0.1.6 Revert - Change Issuer to ClusterIssuer\"\n\nmsgid \"0.1.6 Swap SSH key names to reflect the correct key\"\nmsgstr \"0.1.6 Swap SSH key names to reflect the correct key\"\n\nmsgid \"0.1.6 Update glance default policy values\"\nmsgstr \"0.1.6 Update glance default policy values\"\n\nmsgid \"0.1.6 Use HostToContainer mountPropagation\"\nmsgstr \"0.1.6 Use HostToContainer mountPropagation\"\n\nmsgid \"0.1.7 Change Issuer to ClusterIssuer\"\nmsgstr \"0.1.7 Change Issuer to ClusterIssuer\"\n\nmsgid \"0.1.7 Move rabbit-init to dynamic dependency\"\nmsgstr \"0.1.7 Move rabbit-init to dynamic dependency\"\n\nmsgid \"0.1.7 Update glance default policy values\"\nmsgstr \"0.1.7 Update glance default policy values\"\n\nmsgid \"0.1.7 Update storage init script with cacert\"\nmsgstr \"0.1.7 Update storage init script with cacert\"\n\nmsgid \"0.1.7 Use HostToContainer mountPropagation\"\nmsgstr \"0.1.7 Use HostToContainer mountPropagation\"\n\nmsgid \"0.1.8 Change Issuer to ClusterIssuer\"\nmsgstr \"0.1.8 Change Issuer to ClusterIssuer\"\n\nmsgid \"0.1.8 Implement \\\"CSRF_COOKIE_HTTPONLY\\\" option support in horizon\"\nmsgstr \"0.1.8 Implement \\\"CSRF_COOKIE_HTTPONLY\\\" option support in horizon\"\n\nmsgid \"0.1.8 Revert - Change Issuer to ClusterIssuer\"\nmsgstr \"0.1.8 Revert - Change Issuer to ClusterIssuer\"\n\nmsgid \"0.1.8 Revert Change Issuer to ClusterIssuer\"\nmsgstr \"0.1.8 Revert Change Issuer to ClusterIssuer\"\n\nmsgid \"0.1.8 Update glance default policy values\"\nmsgstr \"0.1.8 Update glance default policy values\"\n\nmsgid \"0.1.9 Add helm.sh/hook related annotations\"\nmsgstr \"0.1.9 Add helm.sh/hook related annotations\"\n\nmsgid \"0.1.9 Revert \\\"Change Issuer to ClusterIssuer\\\"\"\nmsgstr \"0.1.9 Revert \\\"Change Issuer to ClusterIssuer\\\"\"\n\nmsgid \"0.1.9 Update ovs agent to support host/label overrides\"\nmsgstr \"0.1.9 Update ovs agent to support host/label overrides\"\n\nmsgid \"0.1.9 Use HostToContainer mount propagation\"\nmsgstr \"0.1.9 Use HostToContainer mount propagation\"\n\nmsgid \"0.2.0 Remove support for releases before T\"\nmsgstr \"0.2.0 Remove support for releases before T\"\n\nmsgid \"0.2.1 Add Ussuri release support\"\nmsgstr \"0.2.1 Add Ussuri release support\"\n\nmsgid \"0.2.1 Adding rabbitmq TLS logic\"\nmsgstr \"0.2.1 Adding RabbitMQ TLS logic\"\n\nmsgid \"0.2.1 Fix dnsPolicy for housekeeping service\"\nmsgstr \"0.2.1 Fix dnsPolicy for housekeeping service\"\n\nmsgid \"0.2.1 Fix the ceph pool creations for openstack services\"\nmsgstr \"0.2.1 Fix the Ceph pool creations for OpenStack services\"\n\nmsgid \"0.2.1 Make python script PEP8 compliant\"\nmsgstr \"0.2.1 Make Python script PEP8 compliant\"\n\nmsgid \"0.2.1 Remove paste ini config settings\"\nmsgstr \"0.2.1 Remove paste ini config settings\"\n\nmsgid \"0.2.1 Remove unnecessary +x permission on gotpl files\"\nmsgstr \"0.2.1 Remove unnecessary +x permission on gotpl files\"\n\nmsgid \"0.2.1 Update htk requirements repo\"\nmsgstr \"0.2.1 Update htk requirements repo\"\n\nmsgid \"0.2.1 Use policies in yaml format\"\nmsgstr \"0.2.1 Use policies in yaml format\"\n\nmsgid \"0.2.10 Add tls cert mounting to nova-novnc\"\nmsgstr \"0.2.10 Add tls cert mounting to nova-novnc\"\n\nmsgid \"0.2.10 Added OCI registry authentication\"\nmsgstr \"0.2.10 Added OCI registry authentication\"\n\nmsgid \"0.2.10 Enable taint toleration for Openstack services\"\nmsgstr \"0.2.10 Enable taint toleration for OpenStack services\"\n\nmsgid \"0.2.10 Helm 3 - Fix Job Labels\"\nmsgstr \"0.2.10 Helm 3 - Fix Job Labels\"\n\nmsgid \"0.2.10 Make internal TLS more robust\"\nmsgstr \"0.2.10 Make internal TLS more robust\"\n\nmsgid \"0.2.10 Update htk requirements repo\"\nmsgstr \"0.2.10 Update htk requirements repo\"\n\nmsgid \"0.2.10 Updated naming for subchart compatibility\"\nmsgstr \"0.2.10 Updated naming for subchart compatibility\"\n\nmsgid \"0.2.11 Add Victoria and Wallaby releases support\"\nmsgstr \"0.2.11 Add Victoria and Wallaby releases support\"\n\nmsgid \"0.2.11 Add missing slash\"\nmsgstr \"0.2.11 Add missing slash\"\n\nmsgid \"\"\n\"0.2.11 Distinguish between port number of internal endpoint and binding port \"\n\"number\"\nmsgstr \"\"\n\"0.2.11 Distinguish between the port number of internal endpoint and binding \"\n\"port number\"\n\nmsgid \"0.2.11 Fix job annotations for db init job\"\nmsgstr \"0.2.11 Fix job annotations for db init job\"\n\nmsgid \"0.2.11 Improve health probe logging\"\nmsgstr \"0.2.11 Improve health probe logging\"\n\nmsgid \"0.2.11 Remove old releases values override in heat\"\nmsgstr \"0.2.11 Remove old releases values override in heat\"\n\nmsgid \"0.2.11 Update htk requirements repo\"\nmsgstr \"0.2.11 Update htk requirements repo\"\n\nmsgid \"0.2.12 Bootstrap flavor creation efficiencies\"\nmsgstr \"0.2.12 Bootstrap flavour creation efficiencies\"\n\nmsgid \"0.2.12 Fix infinite recursion deadlock on netns cleanup cron\"\nmsgstr \"0.2.12 Fix infinite recursion deadlock on netns cleanup cron\"\n\nmsgid \"0.2.12 Helm 3 - Fix Job Labels\"\nmsgstr \"0.2.12 Helm 3 - Fix Job Labels\"\n\nmsgid \"\"\n\"0.2.12 Migrated CronJob resource to batch/v1 API version & \"\n\"PodDisruptionBudget to policy/v1\"\nmsgstr \"\"\n\"0.2.12 Migrated CronJob resource to batch/v1 API version & \"\n\"PodDisruptionBudget to policy/v1\"\n\nmsgid \"0.2.12 Remove cinder v1/v2 defaults\"\nmsgstr \"0.2.12 Remove Cinder v1/v2 defaults\"\n\nmsgid \"0.2.12 Remove older values overrides\"\nmsgstr \"0.2.12 Remove older values overrides\"\n\nmsgid \"0.2.12 Support both json and yaml RBAC Policy Format\"\nmsgstr \"0.2.12 Support both JSON and YAML RBAC Policy Format\"\n\nmsgid \"0.2.12 Use HTTP probe instead of TCP probe\"\nmsgstr \"0.2.12 Use HTTP probe instead of TCP probe\"\n\nmsgid \"0.2.13 Add Xena and Yoga values overrides\"\nmsgstr \"0.2.13 Add Xena and Yoga values overrides\"\n\nmsgid \"0.2.13 Add container infra api version in values\"\nmsgstr \"0.2.13 Add container infra API version in values\"\n\nmsgid \"0.2.13 Add missing 'runlock' hostMount when enable_scsi\"\nmsgstr \"0.2.13 Add missing 'runlock' hostMount when enable_scsi\"\n\nmsgid \"0.2.13 Enable taint toleration for Openstack services\"\nmsgstr \"0.2.13 Enable taint toleration for OpenStack services\"\n\nmsgid \"0.2.13 Helm 3 - Fix more Job Labels\"\nmsgstr \"0.2.13 Helm 3 - Fix more Job Labels\"\n\nmsgid \"0.2.13 Migrated PodDisruptionBudget resource to policy/v1 API version\"\nmsgstr \"0.2.13 Migrated PodDisruptionBudget resource to policy/v1 API version\"\n\nmsgid \"0.2.13 Support TLS endpoints\"\nmsgstr \"0.2.13 Support TLS endpoints\"\n\nmsgid \"0.2.13 Upgrade default images to ussuri\"\nmsgstr \"0.2.13 Upgrade default images to Ussuri\"\n\nmsgid \"0.2.14 Add OPENSTACK_ENDPOINT_TYPE value\"\nmsgstr \"0.2.14 Add OPENSTACK_ENDPOINT_TYPE value\"\n\nmsgid \"0.2.14 Add Xena and Yoga values overrides\"\nmsgstr \"0.2.14 Add Xena and Yoga values overrides\"\n\nmsgid \"0.2.14 Added OCI registry authentication\"\nmsgstr \"0.2.14 Added OCI registry authentication\"\n\nmsgid \"0.2.14 Fix notifications\"\nmsgstr \"0.2.14 Fix notifications\"\n\nmsgid \"0.2.14 Migrate IP from bridge for auto_bridge_add\"\nmsgstr \"0.2.14 Migrate IP from bridge for auto_bridge_add\"\n\nmsgid \"0.2.14 Update htk requirements repo\"\nmsgstr \"0.2.14 Update htk requirements repo\"\n\nmsgid \"0.2.14 Use helm.sh/hook annotations for jobs\"\nmsgstr \"0.2.14 Use helm.sh/hook annotations for jobs\"\n\nmsgid \"0.2.15 Add local_settings.d\"\nmsgstr \"0.2.15 Add local_settings.d\"\n\nmsgid \"0.2.15 Added OCI registry authentication\"\nmsgstr \"0.2.15 Added OCI registry authentication\"\n\nmsgid \"\"\n\"0.2.15 Distinguish between port number of internal endpoint and binding port \"\n\"number\"\nmsgstr \"\"\n\"0.2.15 Distinguish between the port number of the internal endpoint and the \"\n\"binding port number\"\n\nmsgid \"\"\n\"0.2.15 Fix archive-deleted-rows for enabling date command as value for \"\n\"before option\"\nmsgstr \"\"\n\"0.2.15 Fix archive-deleted-rows for enabling date command as value for \"\n\"before option\"\n\nmsgid \"0.2.15 Reduce log chattiness\"\nmsgstr \"0.2.15 Reduce log chattiness\"\n\nmsgid \"0.2.15 Remove glance registry\"\nmsgstr \"0.2.15 Remove glance registry\"\n\nmsgid \"0.2.15 Remove unsupported values overrides\"\nmsgstr \"0.2.15 Remove unsupported values overrides\"\n\nmsgid \"\"\n\"0.2.16 Distinguish between port number of internal endpoint and binding port \"\n\"number\"\nmsgstr \"\"\n\"0.2.16 Distinguish between the port number of the internal endpoint and the \"\n\"binding port number\"\n\nmsgid \"0.2.16 Enable taint toleration for Openstack services\"\nmsgstr \"0.2.16 Enable taint toleration for OpenStack services\"\n\nmsgid \"0.2.16 Fix container-infra value\"\nmsgstr \"0.2.16 Fix container-infra value\"\n\nmsgid \"0.2.16 Remove extra fsGroup\"\nmsgstr \"0.2.16 Remove extra fsGroup\"\n\nmsgid \"0.2.16 Remove the policy document in values file\"\nmsgstr \"0.2.16 Remove the policy document in values file\"\n\nmsgid \"0.2.16 Remove usage of six\"\nmsgstr \"0.2.16 Remove usage of six\"\n\nmsgid \"0.2.16 Support TLS endpoints\"\nmsgstr \"0.2.16 Support TLS endpoints\"\n\nmsgid \"0.2.17 Add custom logo\"\nmsgstr \"0.2.17 Add custom logo\"\n\nmsgid \"0.2.17 Fix disablement of helm.sh/hook for Helm v2\"\nmsgstr \"0.2.17 Fix disablement of helm.sh/hook for Helm v2\"\n\nmsgid \"0.2.17 Migrated PodDisruptionBudget resource to policy/v1 API version\"\nmsgstr \"0.2.17 Migrated PodDisruptionBudget resource to policy/v1 API version\"\n\nmsgid \"0.2.17 Remove unsupported values overrides\"\nmsgstr \"0.2.17 Remove unsupported values overrides\"\n\nmsgid \"0.2.17 Update default image references\"\nmsgstr \"0.2.17 Update default image references\"\n\nmsgid \"0.2.17 Use HTTP probe instead of TCP probe\"\nmsgstr \"0.2.17 Use HTTP probe instead of TCP probe\"\n\nmsgid \"0.2.18 Add helm hook in bootstrap job\"\nmsgstr \"0.2.18 Add Helm hook in bootstrap job\"\n\nmsgid \"0.2.18 Change hook weight for bootstrap job\"\nmsgstr \"0.2.18 Change hook weight for bootstrap job\"\n\nmsgid \"0.2.18 Enable taint toleration for Openstack services\"\nmsgstr \"0.2.18 Enable taint toleration for OpenStack services\"\n\nmsgid \"0.2.18 Give service time to restore\"\nmsgstr \"0.2.18 Give service time to restore\"\n\nmsgid \"0.2.18 Remove default policy\"\nmsgstr \"0.2.18 Remove default policy\"\n\nmsgid \"0.2.18 Support TLS for ks jobs\"\nmsgstr \"0.2.18 Support TLS for ks jobs\"\n\nmsgid \"0.2.18 Updated naming for subchart compatibility\"\nmsgstr \"0.2.18 Updated naming for subchart compatibility\"\n\nmsgid \"0.2.19 Add volume types visibility (public/private)\"\nmsgstr \"0.2.19 Add volume types visibility (public/private)\"\n\nmsgid \"0.2.19 Added qdhcp NS host validation for deleting wrong namespaces.\"\nmsgstr \"0.2.19 Added qdhcp NS host validation for deleting wrong namespaces.\"\n\nmsgid \"0.2.19 Define service cleaner sleep time\"\nmsgstr \"0.2.19 Define service cleaner sleep time\"\n\nmsgid \"0.2.19 Remove unsupported value overrides\"\nmsgstr \"0.2.19 Remove unsupported value overrides\"\n\nmsgid \"0.2.19 Revert Reduce log chattiness\"\nmsgstr \"0.2.19 Revert Reduce log chattiness\"\n\nmsgid \"\"\n\"0.2.19 Support SSL offloading at reverse proxy for internal and admin \"\n\"endpoints\"\nmsgstr \"\"\n\"0.2.19 Support SSL offloading at reverse proxy for internal and admin \"\n\"endpoints\"\n\nmsgid \"0.2.2 Add Victoria and Wallaby releases support\"\nmsgstr \"0.2.2 Add Victoria and Wallaby release support\"\n\nmsgid \"0.2.2 Add helm hook conditional\"\nmsgstr \"0.2.2 Add helm hook conditional\"\n\nmsgid \"0.2.2 Add helm hook for ks job\"\nmsgstr \"0.2.2 Add Helm hook for ks job\"\n\nmsgid \"0.2.2 Add values for backoffLimit and restartPolicy\"\nmsgstr \"0.2.2 Add values for backoffLimit and restartPolicy\"\n\nmsgid \"0.2.2 Adding rabbitmq TLS logic\"\nmsgstr \"0.2.2 Adding RabbitMQ TLS logic\"\n\nmsgid \"0.2.2 Fix restarting of magnum-conductor pods\"\nmsgstr \"0.2.2 Fix restarting of magnum-conductor pods\"\n\nmsgid \"0.2.2 Make python script PEP8 compliant\"\nmsgstr \"0.2.2 Make Python script PEP8 compliant\"\n\nmsgid \"0.2.2 Update htk requirements repo\"\nmsgstr \"0.2.2 Update htk requirements repo\"\n\nmsgid \"0.2.2 Use policies in yaml format\"\nmsgstr \"0.2.2 Use policies in yaml format\"\n\nmsgid \"0.2.20 Add SHOW_OPENRC_FILE value\"\nmsgstr \"0.2.20 Add SHOW_OPENRC_FILE value\"\n\nmsgid \"0.2.20 Add Xena and Yoga values overrides\"\nmsgstr \"0.2.20 Add Xena and Yoga values overrides\"\n\nmsgid \"0.2.20 Allow cinder v1/v2 endpoint creation if needed\"\nmsgstr \"0.2.20 Allow Cinder v1/v2 endpoint creation if needed\"\n\nmsgid \"0.2.20 Enable taint toleration for Openstack services\"\nmsgstr \"0.2.20 Enable taint toleration for OpenStack services\"\n\nmsgid \"0.2.20 Update script to true of grep does get anything.\"\nmsgstr \"0.2.20 Update script to true of grep does get anything.\"\n\nmsgid \"0.2.21 Add helm hook annotations in db-sync and db-init jobs\"\nmsgstr \"0.2.21 Add Helm hook annotations in db-sync and db-init jobs\"\n\nmsgid \"0.2.21 Fix for qdhcp NS host validation for deleting wrong namespaces.\"\nmsgstr \"0.2.21 Fix for qdhcp NS host validation for deleting wrong namespaces.\"\n\nmsgid \"0.2.21 Helm 3 - Fix Job Labels\"\nmsgstr \"0.2.21 Helm 3 - Fix Job Labels\"\n\nmsgid \"\"\n\"0.2.21 Migrated CronJob resource to batch/v1 API version & \"\n\"PodDisruptionBudget to policy/v1\"\nmsgstr \"\"\n\"0.2.21 Migrated CronJob resource to batch/v1 API version & \"\n\"PodDisruptionBudget to policy/v1\"\n\nmsgid \"0.2.21 Updated naming for subchart compatibility\"\nmsgstr \"0.2.21 Updated naming for subchart compatibility\"\n\nmsgid \"0.2.22 Add Xena and Yoga values overrides\"\nmsgstr \"0.2.22 Add Xena and Yoga values overrides\"\n\nmsgid \"0.2.22 Fix /run/xtables.lock may be a directory\"\nmsgstr \"0.2.22 Fix /run/xtables.lock may be a directory\"\n\nmsgid \"0.2.22 Migrated PodDisruptionBudget resource to policy/v1 API version\"\nmsgstr \"0.2.22 Migrated PodDisruptionBudget resource to policy/v1 API version\"\n\nmsgid \"0.2.22 Remove older values overrides\"\nmsgstr \"0.2.22 Remove older values overrides\"\n\nmsgid \"0.2.22 Update htk requirements repo\"\nmsgstr \"0.2.22 Update htk requirements repo\"\n\nmsgid \"0.2.23 Add Xena and Yoga value overrides\"\nmsgstr \"0.2.23 Add Xena and Yoga value overrides\"\n\nmsgid \"\"\n\"0.2.23 Add neutron_netns_cleanup_cron release image override, so that the \"\n\"respective release image is used\"\nmsgstr \"\"\n\"0.2.23 Add neutron_netns_cleanup_cron release image override, so that the \"\n\"respective release image is used\"\n\nmsgid \"0.2.23 Add option to enable extra wait for cell-setup-init\"\nmsgstr \"0.2.23 Add option to enable extra wait for cell-setup-init\"\n\nmsgid \"0.2.23 Added OCI registry authentication\"\nmsgstr \"0.2.23 Added OCI registry authentication\"\n\nmsgid \"0.2.23 Remove usage of six\"\nmsgstr \"0.2.23 Remove usage of six\"\n\nmsgid \"0.2.24 Added OCI registry authentication\"\nmsgstr \"0.2.24 Added OCI registry authentication\"\n\nmsgid \"0.2.24 Fix conditional check for cinder.utils.has_ceph_backend template\"\nmsgstr \"\"\n\"0.2.24 Fix conditional check for cinder.utils.has_ceph_backend template\"\n\nmsgid \"0.2.24 Fix nova-bootstrap job labels\"\nmsgstr \"0.2.24 Fix nova-bootstrap job labels\"\n\nmsgid \"0.2.24 Remove blank lines in logo configmap\"\nmsgstr \"0.2.24 Remove blank lines in logo configmap\"\n\nmsgid \"0.2.24 Remove unused admin port in keystone\"\nmsgstr \"0.2.24 Remove unused admin port in Keystone\"\n\nmsgid \"0.2.25 Add check for compute nodes\"\nmsgstr \"0.2.25 Add check for compute nodes\"\n\nmsgid \"0.2.25 Added OCI registry authentication\"\nmsgstr \"0.2.25 Added OCI registry authentication\"\n\nmsgid \"\"\n\"0.2.25 Migrated CronJob resource to batch/v1 API version & \"\n\"PodDisruptionBudget to policy/v1\"\nmsgstr \"\"\n\"0.2.25 Migrated CronJob resource to batch/v1 API version & \"\n\"PodDisruptionBudget to policy/v1\"\n\nmsgid \"\"\n\"0.2.25 Remove volumes unrelated with ceph backend from conditional volume \"\n\"list in cinder-volume deployment\"\nmsgstr \"\"\n\"0.2.25 Remove volumes unrelated with Ceph backend from conditional volume \"\n\"list in cinder-volume deployment\"\n\nmsgid \"0.2.25 Support TLS endpoints\"\nmsgstr \"0.2.25 Support TLS endpoints\"\n\nmsgid \"0.2.26 Add Xena and Yoga values overrides\"\nmsgstr \"0.2.26 Add Xena and Yoga values overrides\"\n\nmsgid \"\"\n\"0.2.26 Distinguish between port number of internal endpoint and binding port \"\n\"number\"\nmsgstr \"\"\n\"0.2.26 Distinguish between port number of internal endpoint and binding port \"\n\"number\"\n\nmsgid \"\"\n\"0.2.26 Fix _ssh-init.sh.tpl to copy the ssh keys to the user on the security \"\n\"context\"\nmsgstr \"\"\n\"0.2.26 Fix _ssh-init.sh.tpl to copy the ssh keys to the user on the security \"\n\"context\"\n\nmsgid \"0.2.26 Support SSL identity endpoint\"\nmsgstr \"0.2.26 Support SSL identity endpoint\"\n\nmsgid \"0.2.26 Use HTTP probe instead of TCP probe\"\nmsgstr \"0.2.26 Use HTTP probe instead of TCP probe\"\n\nmsgid \"0.2.27 Add tls1.2 minimum version to tls overrides\"\nmsgstr \"0.2.27 Add tls1.2 minimum version to TLS overrides\"\n\nmsgid \"\"\n\"0.2.27 Distinguish between port number of internal endpoint and binding port \"\n\"number\"\nmsgstr \"\"\n\"0.2.27 Distinguish between port number of internal endpoint and binding port \"\n\"number\"\n\nmsgid \"0.2.27 Support TLS endpoints\"\nmsgstr \"0.2.27 Support TLS endpoints\"\n\nmsgid \"0.2.27 Use LOG.warning instead of deprecated LOG.warn\"\nmsgstr \"0.2.27 Use LOG.warning instead of deprecated LOG.warn\"\n\nmsgid \"0.2.28 Added OCI registry authentication\"\nmsgstr \"0.2.28 Added OCI registry authentication\"\n\nmsgid \"0.2.28 Move ssl_minimum_version to console section\"\nmsgstr \"0.2.28 Move ssl_minimum_version to console section\"\n\nmsgid \"0.2.28 Use HTTP probe instead of TCP probe\"\nmsgstr \"0.2.28 Use HTTP probe instead of TCP probe\"\n\nmsgid \"0.2.29 Add SYS_ADMIN capability in cinder-volume\"\nmsgstr \"0.2.29 Add SYS_ADMIN capability in cinder-volume\"\n\nmsgid \"0.2.29 Remove ssh-config\"\nmsgstr \"0.2.29 Remove ssh-config\"\n\nmsgid \"0.2.29 Support TLS endpoints\"\nmsgstr \"0.2.29 Support TLS endpoints\"\n\nmsgid \"0.2.3 Add conductor & health manager\"\nmsgstr \"0.2.3 Add conductor & health manager\"\n\nmsgid \"0.2.3 Add openstack_enable_password_retrieve variable in value\"\nmsgstr \"0.2.3 Add openstack_enable_password_retrieve variable in value\"\n\nmsgid \"0.2.3 Add support for master kek rotation\"\nmsgstr \"0.2.3 Add support for master kek rotation\"\n\nmsgid \"0.2.3 Added helm.sh/hook annotations for Jobs\"\nmsgstr \"0.2.3 Added helm.sh/hook annotations for Jobs\"\n\nmsgid \"0.2.3 Adding rabbitmq TLS logic\"\nmsgstr \"0.2.3 Adding RabbitMQ TLS logic\"\n\nmsgid \"0.2.3 Allow using log_config_append=null\"\nmsgstr \"0.2.3 Allow using log_config_append=null\"\n\nmsgid \"0.2.3 Enable taint toleration for Openstack services\"\nmsgstr \"0.2.3 Enable taint toleration for OpenStack services\"\n\nmsgid \"0.2.3 Fix extra volume mounts\"\nmsgstr \"0.2.3 Fix extra volume mounts\"\n\nmsgid \"0.2.3 Fix logging config\"\nmsgstr \"0.2.3 Fix logging config\"\n\nmsgid \"0.2.3 Mount rabbitmq TLS secret\"\nmsgstr \"0.2.3 Mount RabbitMQ TLS secret\"\n\nmsgid \"\"\n\"0.2.3 Replace deprecated configuration ``[vnc]/\"\n\"vncserver_proxyclient_address``\"\nmsgstr \"\"\n\"0.2.3 Replace deprecated configuration ``[vnc]/\"\n\"vncserver_proxyclient_address``\"\n\nmsgid \"0.2.3 Update default imaage values to Wallaby\"\nmsgstr \"0.2.3 Update default image values to Wallaby\"\n\nmsgid \"0.2.3 Update default image values to Wallaby\"\nmsgstr \"0.2.3 Update default image values to Wallaby\"\n\nmsgid \"0.2.3 Update htk requirements repo\"\nmsgstr \"0.2.3 Update htk requirements repo\"\n\nmsgid \"0.2.3 Use policies in yaml format\"\nmsgstr \"0.2.3 Use policies in yaml format\"\n\nmsgid \"\"\n\"0.2.30 Distinguish between port number of internal endpoint and binding port \"\n\"number\"\nmsgstr \"\"\n\"0.2.30 Distinguish between port number of internal endpoint and binding port \"\n\"number\"\n\nmsgid \"0.2.30 Improve health probe logging\"\nmsgstr \"0.2.30 Improve health probe logging\"\n\nmsgid \"\"\n\"0.2.30 Specify a existing configmap name for external ceph configuration\"\nmsgstr \"\"\n\"0.2.30 Specify a existing configmap name for external Ceph configuration\"\n\nmsgid \"\"\n\"0.2.31 Remove fixed node name from default values and add service cleaner \"\n\"cronjob\"\nmsgstr \"\"\n\"0.2.31 Remove fixed node name from default values and add service cleaner \"\n\"cronjob\"\n\nmsgid \"0.2.31 Update oslo messaging get_transport\"\nmsgstr \"0.2.31 Update Oslo messaging get_transport\"\n\nmsgid \"0.2.32 Host of ironic compute service equals pod name\"\nmsgstr \"0.2.32 Host of Ironic compute service equals pod name\"\n\nmsgid \"\"\n\"0.2.32 Revert \\\"Remove fixed node name from default values and add service \"\n\"cleaner cronjob\\\"\"\nmsgstr \"\"\n\"0.2.32 Revert \\\"Remove fixed node name from default values and add service \"\n\"cleaner cronjob\\\"\"\n\nmsgid \"0.2.33 Cleanup old releases\"\nmsgstr \"0.2.33 Cleanup old releases\"\n\nmsgid \"0.2.34 Remove consoleauth in nova\"\nmsgstr \"0.2.34 Remove consoleauth in Nova\"\n\nmsgid \"0.2.35 Enable taint toleration for Openstack services\"\nmsgstr \"0.2.35 Enable taint toleration for OpenStack services\"\n\nmsgid \"0.2.36 Support TLS endpoints\"\nmsgstr \"0.2.36 Support TLS endpoints\"\n\nmsgid \"0.2.37 Remove nova-placement\"\nmsgstr \"0.2.37 Remove nova-placement\"\n\nmsgid \"0.2.38 Update nova image defaults\"\nmsgstr \"0.2.38 Update Nova image defaults\"\n\nmsgid \"\"\n\"0.2.39 Migrated CronJob resource to batch/v1 API version & \"\n\"PodDisruptionBudget to policy/v1\"\nmsgstr \"\"\n\"0.2.39 Migrated CronJob resource to batch/v1 API version & \"\n\"PodDisruptionBudget to policy/v1\"\n\nmsgid \"0.2.4 Add Ussuri release support\"\nmsgstr \"0.2.4 Add Ussuri release support\"\n\nmsgid \"0.2.4 Fix OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT value\"\nmsgstr \"0.2.4 Fix OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT value\"\n\nmsgid \"0.2.4 Fix transport_url\"\nmsgstr \"0.2.4 Fix transport_url\"\n\nmsgid \"0.2.4 Helm 3 - Fix Job Labels\"\nmsgstr \"0.2.4 Helm 3 - Fix Job Labels\"\n\nmsgid \"\"\n\"0.2.4 Migrated CronJob resource to batch/v1 API version & \"\n\"PodDisruptionBudget to policy/v1\"\nmsgstr \"\"\n\"0.2.4 Migrated CronJob resource to batch/v1 API version & \"\n\"PodDisruptionBudget to policy/v1\"\n\nmsgid \"0.2.4 Migrated PodDisruptionBudget resource to policy/v1 API version\"\nmsgstr \"0.2.4 Migrated PodDisruptionBudget resource to policy/v1 API version\"\n\nmsgid \"0.2.4 Mount empty temp_cache_dir for performance\"\nmsgstr \"0.2.4 Mount empty temp_cache_dir for performance\"\n\nmsgid \"0.2.4 Mount rabbitmq TLS secret\"\nmsgstr \"0.2.4 Mount RabbitMQ TLS secret\"\n\nmsgid \"0.2.4 Update default image release\"\nmsgstr \"0.2.4 Update default image release\"\n\nmsgid \"0.2.4 Update default image values to Wallaby\"\nmsgstr \"0.2.4 Update default image values to Wallaby\"\n\nmsgid \"0.2.4 Update defaults to W release\"\nmsgstr \"0.2.4 Update defaults to W release\"\n\nmsgid \"0.2.4 Use policies in yaml format\"\nmsgstr \"0.2.4 Use policies in yaml format\"\n\nmsgid \"0.2.40 Updated naming for subchart compatibility\"\nmsgstr \"0.2.40 Updated naming for subchart compatibility\"\n\nmsgid \"0.2.41 Add Xena and Yoga values overrides\"\nmsgstr \"0.2.41 Add Xena and Yoga values overrides\"\n\nmsgid \"0.2.42 Add missing configuration ``[vnc]/novncproxy_host``\"\nmsgstr \"0.2.42 Add missing configuration ``[vnc]/novncproxy_host``\"\n\nmsgid \"0.2.43 Added OCI registry authentication\"\nmsgstr \"0.2.43 Added OCI registry authentication\"\n\nmsgid \"\"\n\"0.2.44 Distinguish between port number of internal endpoint and binding port \"\n\"number\"\nmsgstr \"\"\n\"0.2.44 Distinguish between port number of internal endpoint and binding port \"\n\"number\"\n\nmsgid \"0.2.45 Support TLS endpoints for metadata-api\"\nmsgstr \"0.2.45 Support TLS endpoints for metadata-api\"\n\nmsgid \"0.2.46 Use HTTP probe instead of TCP probe\"\nmsgstr \"0.2.46 Use HTTP probe instead of TCP probe\"\n\nmsgid \"0.2.47 Remove list agents rally test\"\nmsgstr \"0.2.47 Remove list agents rally test\"\n\nmsgid \"0.2.5 Add Ussuri release support\"\nmsgstr \"0.2.5 Add Ussuri release support\"\n\nmsgid \"0.2.5 Add Victoria and Wallaby releases support\"\nmsgstr \"0.2.5 Add Victoria and Wallaby release support\"\n\nmsgid \"0.2.5 Add helm hook for jobs\"\nmsgstr \"0.2.5 Add Helm hook for jobs\"\n\nmsgid \"0.2.5 Add volume QoS support\"\nmsgstr \"0.2.5 Add volume QoS support\"\n\nmsgid \"0.2.5 Added OCI registry authentication\"\nmsgstr \"0.2.5 Added OCI registry authentication\"\n\nmsgid \"\"\n\"0.2.5 Migrated CronJob resource to batch/v1 API version & \"\n\"PodDisruptionBudget to policy/v1\"\nmsgstr \"\"\n\"0.2.5 Migrated CronJob resource to batch/v1 API version & \"\n\"PodDisruptionBudget to policy/v1\"\n\nmsgid \"0.2.5 Migrated PodDisruptionBudget resource to policy/v1 API version\"\nmsgstr \"0.2.5 Migrated PodDisruptionBudget resource to policy/v1 API version\"\n\nmsgid \"0.2.5 Mount rabbitmq TLS secret\"\nmsgstr \"0.2.5 Mount RabbitMQ TLS secret\"\n\nmsgid \"0.2.5 Set reasonable default probe timeouts\"\nmsgstr \"0.2.5 Set reasonable default probe timeouts\"\n\nmsgid \"0.2.5 Update default image values to wallaby\"\nmsgstr \"0.2.5 Update default image values to Wallaby\"\n\nmsgid \"0.2.5 Update htk requirements repo\"\nmsgstr \"0.2.5 Update htk requirements repo\"\n\nmsgid \"0.2.5 Use rootwrap daemon\"\nmsgstr \"0.2.5 Use rootwrap daemon\"\n\nmsgid \"0.2.6 Add Victoria and Wallaby releases support\"\nmsgstr \"0.2.6 Add Victoria and Wallaby release support\"\n\nmsgid \"0.2.6 Add helm.sh/hook annotations for Jobs\"\nmsgstr \"0.2.6 Add helm.sh/hook annotations for Jobs\"\n\nmsgid \"0.2.6 Added OCI registry authentication\"\nmsgstr \"0.2.6 Added OCI registry authentication\"\n\nmsgid \"\"\n\"0.2.6 Added cronJob with script for archive deleted rows which cleanup \"\n\"databases\"\nmsgstr \"\"\n\"0.2.6 Added cronJob with script for archive deleted rows which cleanup \"\n\"databases\"\n\nmsgid \"0.2.6 Added helm.sh/hook with value of post-install and post-upgrade\"\nmsgstr \"0.2.6 Added helm.sh/hook with value of post-install and post-upgrade\"\n\nmsgid \"0.2.6 Added post-install and post-upgrade helm-hook for jobs\"\nmsgstr \"0.2.6 Added post-install and post-upgrade helm-hook for jobs\"\n\nmsgid \"0.2.6 Allow Barbican to talk to Mariadb over TLS\"\nmsgstr \"0.2.6 Allow Barbican to talk to Mariadb over TLS\"\n\nmsgid \"0.2.6 Enable taint toleration for Openstack services\"\nmsgstr \"0.2.6 Enable taint toleration for OpenStack services\"\n\nmsgid \"0.2.6 Fix neutron agent-init script\"\nmsgstr \"0.2.6 Fix Neutron agent-init script\"\n\nmsgid \"0.2.6 Migrated PodDisruptionBudget resource to policy/v1 API version\"\nmsgstr \"0.2.6 Migrated PodDisruptionBudget resource to policy/v1 API version\"\n\nmsgid \"0.2.6 Modify default probe timings\"\nmsgstr \"0.2.6 Modify default probe timings\"\n\nmsgid \"0.2.6 Remove default policy rules\"\nmsgstr \"0.2.6 Remove default policy rules\"\n\nmsgid \"0.2.6 Support SSL openstack endpoints\"\nmsgstr \"0.2.6 Support SSL OpenStack endpoints\"\n\nmsgid \"0.2.6 Use HTTP probe instead of TCP probe\"\nmsgstr \"0.2.6 Use HTTP probe instead of TCP probe\"\n\nmsgid \"0.2.7 Add Ussuri release support\"\nmsgstr \"0.2.7 Add Ussuri release support\"\n\nmsgid \"0.2.7 Add Victoria and Wallaby releases support\"\nmsgstr \"0.2.7 Add Victoria and Wallaby release support\"\n\nmsgid \"0.2.7 Add configuration for heat-tempest-plugin\"\nmsgstr \"0.2.7 Add configuration for heat-tempest-plugin\"\n\nmsgid \"0.2.7 Add helm hook annotations for db-sync job\"\nmsgstr \"0.2.7 Add Helm hook annotations for db-sync job\"\n\nmsgid \"0.2.7 Added OCI registry authentication\"\nmsgstr \"0.2.7 Added OCI registry authentication\"\n\nmsgid \"0.2.7 Added helm.sh/hook for the jobs\"\nmsgstr \"0.2.7 Added helm.sh/hook for the jobs\"\n\nmsgid \"0.2.7 Fix OPENSTACK_ENABLE_PASSWORD_RETRIEVE value\"\nmsgstr \"0.2.7 Fix OPENSTACK_ENABLE_PASSWORD_RETRIEVE value\"\n\nmsgid \"0.2.7 Fix db connection key name\"\nmsgstr \"0.2.7 Fix db connection key name\"\n\nmsgid \"0.2.7 Helm 3 - Fix Job Labels\"\nmsgstr \"0.2.7 Helm 3 - Fix Job Labels\"\n\nmsgid \"0.2.7 Made dnsmasq.conf overridable in configmap-bin\"\nmsgstr \"0.2.7 Made dnsmasq.conf overridable in configmap-bin\"\n\nmsgid \"0.2.7 Remove default policy rules\"\nmsgstr \"0.2.7 Remove default policy rules\"\n\nmsgid \"0.2.7 Support TLS for identity endpoint\"\nmsgstr \"0.2.7 Support TLS for identity endpoint\"\n\nmsgid \"0.2.7 Use HTTP probe instead of TCP probe\"\nmsgstr \"0.2.7 Use HTTP probe instead of TCP probe\"\n\nmsgid \"0.2.8 Add Victoria and Wallaby releases support\"\nmsgstr \"0.2.8 Add Victoria and Wallaby release support\"\n\nmsgid \"0.2.8 Add default polices\"\nmsgstr \"0.2.8 Add default policies\"\n\nmsgid \"0.2.8 Add helm3 hook supports to allow things like terraform deploys\"\nmsgstr \"0.2.8 Add helm3 hook supports to allow things like Terraform deploys\"\n\nmsgid \"0.2.8 Add logic to bootstrap to handle upgrade timing issue\"\nmsgstr \"0.2.8 Add logic to bootstrap to handle upgrade timing issue\"\n\nmsgid \"\"\n\"0.2.8 Fix the cron archive_deleted_rows bash script for before and max-rows \"\n\"values\"\nmsgstr \"\"\n\"0.2.8 Fix the cron archive_deleted_rows bash script for before and max-rows \"\n\"values\"\n\nmsgid \"0.2.8 Helm 3 - Fix Job Labels\"\nmsgstr \"0.2.8 Helm 3 - Fix Job Labels\"\n\nmsgid \"0.2.8 Migrated PodDisruptionBudget resource to policy/v1 API version\"\nmsgstr \"0.2.8 Migrated PodDisruptionBudget resource to policy/v1 API version\"\n\nmsgid \"0.2.8 Remove default policy rules\"\nmsgstr \"0.2.8 Remove default policy rules\"\n\nmsgid \"0.2.8 Remove member bootstrap logic\"\nmsgstr \"0.2.8 Remove member bootstrap logic\"\n\nmsgid \"0.2.8 Update htk requirements repo\"\nmsgstr \"0.2.8 Update htk requirements repo\"\n\nmsgid \"0.2.9 Add Victoria and Wallaby releases support\"\nmsgstr \"0.2.9 Add Victoria and Wallaby releases support\"\n\nmsgid \"0.2.9 Add Xena and Yoga values overrides\"\nmsgstr \"0.2.9 Add Xena and Yoga values overrides\"\n\nmsgid \"0.2.9 Add image clean up to rally test\"\nmsgstr \"0.2.9 Add image clean-up to rally test\"\n\nmsgid \"0.2.9 Add option to disable helm.sh/hook annotations\"\nmsgstr \"0.2.9 Add option to disable helm.sh/hook annotations\"\n\nmsgid \"0.2.9 Enable taint toleration for Openstack services\"\nmsgstr \"0.2.9 Enable taint toleration for OpenStack services\"\n\nmsgid \"0.2.9 Helm 3 - Fix More Job Labels\"\nmsgstr \"0.2.9 Helm 3 - Fix More Job Labels\"\n\nmsgid \"0.2.9 Mount rabbitmq TLS secret for audit usage cronjob\"\nmsgstr \"0.2.9 Mount RabbitMQ TLS secret for audit usage cronjob\"\n\nmsgid \"\"\n\"0.2.9 Removed default policy in chart in favor of default policy in code\"\nmsgstr \"\"\n\"0.2.9 Removed default policy in chart in favour of default policy in code\"\n\nmsgid \"0.2.9 Removed default policy in favor in code policy\"\nmsgstr \"0.2.9 Removed default policy in favour in code policy\"\n\nmsgid \"0.3.0 Remove glance registry\"\nmsgstr \"0.3.0 Remove Glance registry\"\n\nmsgid \"0.3.0 Remove placement-migrate\"\nmsgstr \"0.3.0 Remove placement-migrate\"\n\nmsgid \"0.3.0 Remove support for Train and Ussuri\"\nmsgstr \"0.3.0 Remove support for Train and Ussuri\"\n\nmsgid \"0.3.1 Added backoffLimit for bootstrap job\"\nmsgstr \"0.3.1 Added backoffLimit for bootstrap job\"\n\nmsgid \"0.3.1 Change ceph-config-helper image tag\"\nmsgstr \"0.3.1 Change ceph-config-helper image tag\"\n\nmsgid \"0.3.1 Enable taint toleration for Openstack services\"\nmsgstr \"0.3.1 Enable taint toleration for OpenStack services\"\n\nmsgid \"0.3.1 Fix container infra api version in values\"\nmsgstr \"0.3.1 Fix container infra api version in values\"\n\nmsgid \"0.3.1 Remove default policy rules\"\nmsgstr \"0.3.1 Remove default policy rules\"\n\nmsgid \"0.3.1 Remove support for Train and Ussuri\"\nmsgstr \"0.3.1 Remove support for Train and Ussuri\"\n\nmsgid \"\"\n\"0.3.10 Distinguish between port number of internal endpoint and binding port \"\n\"number\"\nmsgstr \"\"\n\"0.3.10 Distinguish between port number of internal endpoint and binding port \"\n\"number\"\n\nmsgid \"0.3.11 Use HTTP probe instead of TCP probe\"\nmsgstr \"0.3.11 Use HTTP probe instead of TCP probe\"\n\nmsgid \"0.3.12 Add support for using Cinder as backend\"\nmsgstr \"0.3.12 Add support for using Cinder as a backend\"\n\nmsgid \"0.3.2 Decrease terminationGracePeriodSeconds on glance-api\"\nmsgstr \"0.3.2 Decrease terminationGracePeriodSeconds on Glance-api\"\n\nmsgid \"0.3.2 Remove default policy rules\"\nmsgstr \"0.3.2 Remove default policy rules\"\n\nmsgid \"0.3.2 Update mysql client version to 1.4.0\"\nmsgstr \"0.3.2 Update MySQL client version to 1.4.0\"\n\nmsgid \"\"\n\"0.3.2 Use correct labels for ovs which uses one daemonset for ovs-db and ovs-\"\n\"vswitchd\"\nmsgstr \"\"\n\"0.3.2 Use correct labels for ovs which uses one daemonset for ovs-db and ovs-\"\n\"vswitchd\"\n\nmsgid \"0.3.3 Fix for creation endpoins and services when v1/v2 are disabled\"\nmsgstr \"0.3.3 Fix for creation endpoins and services when v1/v2 are disabled\"\n\nmsgid \"0.3.3 Update naming for subchart compatibility\"\nmsgstr \"0.3.3 Update naming for subchart compatibility\"\n\nmsgid \"0.3.4 Change image default version to wallaby\"\nmsgstr \"0.3.4 Change image default version to wallaby\"\n\nmsgid \"0.3.5 Migrated PodDisruptionBudget resource to policy/v1 API version\"\nmsgstr \"0.3.5 Migrated PodDisruptionBudget resource to policy/v1 API version\"\n\nmsgid \"0.3.6 Add Xena and Yoga values overrides\"\nmsgstr \"0.3.6 Add Xena and Yoga values overrides\"\n\nmsgid \"\"\n\"0.3.7 Fix glance-etc template changing due to comment and whitespace between \"\n\"install and first upgrade\"\nmsgstr \"\"\n\"0.3.7 Fix glance-etc template changing due to comment and whitespace between \"\n\"install and first upgrade\"\n\nmsgid \"0.3.8 Added OCI registry authentication\"\nmsgstr \"0.3.8 Added OCI registry authentication\"\n\nmsgid \"0.3.9 Support TLS endpoints\"\nmsgstr \"0.3.9 Support TLS endpoints\"\n\nmsgid \"0.4.0 Remove support for Train and Ussuri\"\nmsgstr \"0.4.0 Remove support for Train and Ussuri\"\n\nmsgid \"0.4.1 Remove default policy rules\"\nmsgstr \"0.4.1 Remove default policy rules\"\n\nmsgid \"Current Series Release Notes\"\nmsgstr \"Current Series Release Notes\"\n\nmsgid \"OpenStack-Helm Release Notes\"\nmsgstr \"OpenStack-Helm Release Notes\"\n\nmsgid \"aodh Chart\"\nmsgstr \"aodh Chart\"\n\nmsgid \"barbican Chart\"\nmsgstr \"barbican Chart\"\n\nmsgid \"ceilometer Chart\"\nmsgstr \"ceilometer Chart\"\n\nmsgid \"cinder Chart\"\nmsgstr \"cinder Chart\"\n\nmsgid \"designate Chart\"\nmsgstr \"designate Chart\"\n\nmsgid \"glance Chart\"\nmsgstr \"glance Chart\"\n\nmsgid \"heat Chart\"\nmsgstr \"heat Chart\"\n\nmsgid \"horizon Chart\"\nmsgstr \"horizon Chart\"\n\nmsgid \"ironic Chart\"\nmsgstr \"ironic Chart\"\n\nmsgid \"keystone Chart\"\nmsgstr \"keystone Chart\"\n\nmsgid \"magnum Chart\"\nmsgstr \"magnum Chart\"\n\nmsgid \"mistral Chart\"\nmsgstr \"mistral Chart\"\n\nmsgid \"neutron Chart\"\nmsgstr \"neutron Chart\"\n\nmsgid \"nova Chart\"\nmsgstr \"nova Chart\"\n\nmsgid \"octavia Chart\"\nmsgstr \"octavia Chart\"\n\nmsgid \"placement Chart\"\nmsgstr \"placement Chart\"\n\nmsgid \"rally Chart\"\nmsgstr \"rally Chart\"\n\nmsgid \"senlin Chart\"\nmsgstr \"senlin Chart\"\n\nmsgid \"tempest Chart\"\nmsgstr \"tempest Chart\"\n"
  },
  {
    "path": "roles/build-helm-packages/defaults/main.yml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nversion:\n  helm: v3.12.2\nurl:\n  helm_repo: https://get.helm.sh\n...\n"
  },
  {
    "path": "roles/build-helm-packages/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- include: setup-helm-serve.yaml\n\n- name: build all charts in repo\n  make:\n    chdir: \"{{ work_dir }}\"\n    target: all\n...\n"
  },
  {
    "path": "roles/build-helm-packages/tasks/setup-helm-serve.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- block:\n    - name: check if correct version of helm client already installed\n      shell: \"set -e; [ \\\"x$($(type -p helm) version --client --short | awk '{ print $NF }' | awk -F '+' '{ print $1 }')\\\" == \\\"x${HELM_VERSION}\\\" ] || exit 1\"\n      environment:\n        HELM_VERSION: \"{{ version.helm }}\"\n      args:\n        executable: /bin/bash\n      register: need_helm\n      ignore_errors: True\n    - name: install helm client\n      when: need_helm is failed\n      become_user: root\n      shell: |\n              TMP_DIR=$(mktemp -d)\n              curl -sSL ${HELM_REPO_URL}/helm-${HELM_VERSION}-linux-amd64.tar.gz | tar -zxv --strip-components=1 -C ${TMP_DIR}\n              sudo mv ${TMP_DIR}/helm /usr/bin/helm\n              rm -rf ${TMP_DIR}\n      environment:\n        HELM_VERSION: \"{{ version.helm }}\"\n        HELM_REPO_URL: \"{{ url.helm_repo }}\"\n      args:\n        executable: /bin/bash\n    - name: setting up helm client\n      command: helm init --client-only --skip-refresh --stable-repo-url \"https://charts.helm.sh/stable\"\n\n- block:\n    - name: checking if local helm server is running\n      shell: curl -s 127.0.0.1:8879 | grep -q 'Helm Repository'\n      args:\n        executable: /bin/bash\n      register: helm_server_running\n      ignore_errors: True\n    - name: getting current host user name\n      when: helm_server_running is failed\n      shell: id -un\n      args:\n        executable: /bin/bash\n      register: helm_server_user\n    - name: moving systemd unit into place for helm server\n      when: helm_server_running is failed\n      become: yes\n      become_user: root\n      template:\n        src: helm-serve.service.j2\n        dest: /etc/systemd/system/helm-serve.service\n        mode: 416\n    - name: starting helm serve service\n      when: helm_server_running is failed\n      become: yes\n      become_user: root\n      systemd:\n        state: restarted\n        daemon_reload: yes\n        name: helm-serve\n        enabled: yes\n    - name: wait for helm server to be ready\n      shell: curl -s 127.0.0.1:8879 | grep -q 'Helm Repository'\n      args:\n        executable: /bin/bash\n      register: wait_for_helm_server\n      until: wait_for_helm_server.rc == 0\n      retries: 120\n      delay: 5\n\n- block:\n    - name: checking if helm 'stable' repo is present\n      shell: helm repo list | grep -q \"^stable\"\n      args:\n        executable: /bin/bash\n      register: helm_stable_repo_present\n      ignore_errors: True\n    - name: remove helm 'stable' repo when exists\n      when: helm_stable_repo_present is succeeded\n      command: helm repo remove stable\n\n- name: adding helm local repo\n  command: helm repo add local http://localhost:8879/charts\n...\n"
  },
  {
    "path": "roles/build-helm-packages/templates/helm-serve.service.j2",
    "content": "[Unit]\nDescription=Helm Server\nAfter=network.target\n\n[Service]\nUser={{ helm_server_user.stdout }}\nRestart=always\nExecStart=/usr/bin/helm serve\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "roles/chart-testing/README.rst",
    "content": "Run chart-testing (for helm charts)\n\n**Role Variables**\n\n.. zuul:rolevar:: zuul_work_dir\n   :default: {{ zuul.project.src_dir }}\n\n   The location of the main working directory of the job.\n\n.. zuul:rolevar:: chart_testing_options\n   :default: --validate-maintainers=false --check-version-increment=false\n\n   Arguments passed to chart testing.\n\n   The defaults are suitable for a Zuul environment because\n   `validate-maintainers` requires a valid git remote (which is not\n   present in Zuul) and `check-version-increment` requires each commit\n   to have a new version; Zuul users are expected to set the version\n   when tagging/publishing a release.\n"
  },
  {
    "path": "roles/chart-testing/defaults/main.yaml",
    "content": "zuul_work_dir: \"{{ zuul.project.src_dir }}\"\nchart_testing_options: --validate-maintainers=false --check-version-increment=false\nvirtualenv: \"{{ ansible_user_dir }}/venv\"\n"
  },
  {
    "path": "roles/chart-testing/tasks/main.yaml",
    "content": "- name: Run chart-testing\n  shell: |\n    source \"{{ virtualenv }}/bin/activate\"\n    ct lint {{ chart_testing_options }}\n  args:\n    chdir: \"{{ zuul_work_dir }}\"\n    executable: /bin/bash\n"
  },
  {
    "path": "roles/clean-host/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: remove osh directory\n  become: yes\n  become_user: root\n  file:\n    path: \"{{ item }}\"\n    state: absent\n  with_items:\n    - /var/lib/openstack-helm\n...\n"
  },
  {
    "path": "roles/deploy-apparmor/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- block:\n    - name: ensuring AppArmor is deployed on host\n      when: ansible_distribution == 'Ubuntu'\n      include_role:\n        name: deploy-package\n        tasks_from: dist\n      vars:\n        packages:\n          deb:\n            - apparmor\n\n    - name: \"Enable AppArmor\"\n      when: ansible_distribution == 'Ubuntu'\n      become: true\n      become_user: root\n      shell: |-\n              set -xe\n              systemctl enable apparmor\n              systemctl start apparmor\n              systemctl status apparmor.service\n      args:\n        executable: /bin/bash\n      ignore_errors: True\n...\n"
  },
  {
    "path": "roles/deploy-docker/defaults/main.yml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nproxy:\n  http: null\n  https: null\n  noproxy: null\n...\n"
  },
  {
    "path": "roles/deploy-docker/tasks/deploy-ansible-docker-support.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: ensuring SELinux is disabled on centos & fedora\n  when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux' or ansible_distribution == 'Fedora'\n  become: true\n  become_user: root\n  command: setenforce 0\n  ignore_errors: True\n\n# NOTE(portdirect): See https://ask.openstack.org/en/question/110437/importerror-cannot-import-name-unrewindablebodyerror/\n- name: fix docker removal issue with ansible's docker_container on centos\n  when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux'\n  block:\n    - name: remove requests and urllib3 distro packages to fix docker removal issue with ansible's docker_container on centos\n      include_role:\n        name: deploy-package\n        tasks_from: dist\n      vars:\n        state: absent\n        packages:\n          rpm:\n            - python-urllib3\n            - python-requests\n    - name: restore requests and urllib3 distro packages to fix docker removal issue with ansible's docker_container on centos\n      include_role:\n        name: deploy-package\n        tasks_from: dist\n      vars:\n        state: present\n        packages:\n          rpm:\n            - python-urllib3\n            - python-requests\n\n- name: install additional packages\n  include_role:\n    name: deploy-package\n    tasks_from: dist\n  vars:\n    state: present\n    packages:\n      deb:\n        - conntrack\n        - bc\n        - nmap\n      rpm:\n        - conntrack-tools\n        - bc\n        - nmap\n\n- name: Ensure docker python packages deployed\n  include_role:\n    name: deploy-package\n    tasks_from: pip\n  vars:\n    packages:\n      - docker\n...\n"
  },
  {
    "path": "roles/deploy-docker/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: setting default limit memlock\n  shell: |\n    set -xe;\n    echo \"DefaultLimitMEMLOCK=16777216\" | sudo tee -a /etc/systemd/system.conf\n    sudo systemctl daemon-reexec\n    sudo systemctl daemon-reload\n\n- name: check if docker deploy is needed\n  raw: which docker\n  register: need_docker\n  ignore_errors: True\n\n- name: centos | moving systemd unit into place\n  when: ( ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux' ) and ( need_docker is failed )\n  template:\n    src: centos-docker.service.j2\n    dest: /etc/systemd/system/docker.service\n    mode: 416\n\n- name: fedora | moving systemd unit into place\n  when: ( ansible_distribution == 'Fedora' ) and ( need_docker is failed )\n  template:\n    src: fedora-docker.service.j2\n    dest: /etc/systemd/system/docker.service\n    mode: 416\n\n- name: ubuntu | moving systemd unit into place\n  when: ( ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' ) and ( need_docker is failed )\n  template:\n    src: ubuntu-docker.service.j2\n    dest: /etc/systemd/system/docker.service\n    mode: 416\n\n# NOTE: (lamt) Setting up the proxy before installing docker\n- name: ensure docker.service.d directory exists\n  when: proxy.http\n  file:\n    path: /etc/systemd/system/docker.service.d\n    state: directory\n\n- name: proxy | moving proxy systemd unit into place\n  when: proxy.http\n  template:\n    src: http-proxy.conf.j2\n    dest: /etc/systemd/system/docker.service.d/http-proxy.conf\n    mode: 416\n\n- name: deploy docker packages\n  when: need_docker is failed\n  include_role:\n    name: deploy-package\n    tasks_from: dist\n  vars:\n    packages:\n      deb:\n        - docker.io\n      rpm:\n        - docker\n\n- name: restarting docker\n  systemd:\n    state: restarted\n    daemon_reload: yes\n    name: docker\n\n- include: deploy-ansible-docker-support.yaml\n...\n"
  },
  {
    "path": "roles/deploy-docker/templates/centos-docker.service.j2",
    "content": "[Unit]\nDescription=Docker Application Container Engine\nDocumentation=http://docs.docker.com\nAfter=network.target\n\n[Service]\nType=notify\nNotifyAccess=all\nEnvironment=GOTRACEBACK=crash\nEnvironment=DOCKER_HTTP_HOST_COMPAT=1\nEnvironment=PATH=/usr/libexec/docker:/usr/bin:/usr/sbin\nExecStart=/usr/bin/dockerd-current \\\n          --add-runtime docker-runc=/usr/libexec/docker/docker-runc-current \\\n          --default-runtime=docker-runc \\\n          --exec-opt native.cgroupdriver=systemd \\\n          --userland-proxy-path=/usr/libexec/docker/docker-proxy-current \\\n          --seccomp-profile=/etc/docker/seccomp.json \\\n          --graph=/var/lib/docker \\\n          --storage-driver=overlay2 \\\n          --log-driver=json-file \\\n          --iptables=false\n# NOTE(portdirect): fix mount propagation for CentOS, this is done post start,\n# as docker seems to reset this.\nExecStartPost=/usr/bin/mount --make-rshared /\nExecReload=/bin/kill -s HUP $MAINPID\nLimitNOFILE=1048576\nLimitNPROC=1048576\nLimitCORE=infinity\nTimeoutStartSec=0\nRestart=on-abnormal\nMountFlags=share\nKillMode=process\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "roles/deploy-docker/templates/fedora-docker.service.j2",
    "content": "[Unit]\nDescription=Docker Application Container Engine\nDocumentation=http://docs.docker.com\nAfter=network.target docker-containerd.service\nRequires=docker-containerd.service\n\n[Service]\nType=notify\nEnvironment=GOTRACEBACK=crash\nExecStart=/usr/bin/dockerd-current \\\n          --add-runtime oci=/usr/libexec/docker/docker-runc-current \\\n          --default-runtime=oci \\\n          --containerd /run/containerd.sock \\\n          --exec-opt native.cgroupdriver=systemd \\\n          --userland-proxy-path=/usr/libexec/docker/docker-proxy-current \\\n          --init-path=/usr/libexec/docker/docker-init-current \\\n          --seccomp-profile=/etc/docker/seccomp.json \\\n          --graph=/var/lib/docker \\\n          --storage-driver=overlay2 \\\n          --log-driver=json-file \\\n          --iptables=false\n# NOTE(portdirect): fix mount propagation for Fedora, this is done post start,\n# as docker seems to reset this.\nExecStartPost=/usr/bin/mount --make-rshared /\nExecReload=/bin/kill -s HUP $MAINPID\nTasksMax=8192\nLimitNOFILE=1048576\nLimitNPROC=1048576\nLimitCORE=infinity\nTimeoutStartSec=0\nRestart=on-abnormal\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "roles/deploy-docker/templates/http-proxy.conf.j2",
    "content": "[Service]\nEnvironment=\"HTTP_PROXY={{ proxy.http }}\"\nEnvironment=\"HTTPS_PROXY={{ proxy.https }}\"\nEnvironment=\"NO_PROXY={{ proxy.noproxy }}\"\n"
  },
  {
    "path": "roles/deploy-docker/templates/ubuntu-docker.service.j2",
    "content": "[Unit]\nDescription=Docker Application Container Engine\nDocumentation=https://docs.docker.com\nAfter=network.target docker.socket firewalld.service\nRequires=docker.socket\n\n[Service]\nType=notify\n# the default is not to use systemd for cgroups because the delegate issues still\n# exists and systemd currently does not support the cgroup feature set required\n# for containers run by docker\nEnvironmentFile=-/etc/default/docker\nExecStart=/usr/bin/dockerd --iptables=false -H fd:// $DOCKER_OPTS\nExecReload=/bin/kill -s HUP $MAINPID\nLimitNOFILE=1048576\n# Having non-zero Limit*s causes performance problems due to accounting overhead\n# in the kernel. We recommend using cgroups to do container-local accounting.\nLimitNPROC=infinity\nLimitCORE=infinity\n# Uncomment TasksMax if your systemd version supports it.\n# Only systemd 226 and above support this version.\nTasksMax=infinity\nTimeoutStartSec=0\n# set delegate yes so that systemd does not reset the cgroups of docker containers\nDelegate=yes\n# kill only the docker process, not all processes in the cgroup\nKillMode=process\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "roles/deploy-env/README.md",
    "content": "This role is used to deploy test environment which includes\n- install necessary prerequisites including Helm\n- deploy Containerd and a container runtime for Kubernetes\n- deploy Kubernetes using Kubeadm with a single control plane node\n- install Calico as a Kubernetes networking\n- establish tunnel between primary node and K8s control plane ndoe\n\nThe role works both for single-node and multi-node inventories. The role\ntotally relies on inventory groups. The `primary` and `k8s_control_plane`\ngroups must include only one node and this can be the same node for these two\ngroups.\n\nThe `primary` group is where we install `kubectl` and `helm` CLI tools.\nYou can consider this group as a deployer's machine.\n\nThe `k8s_control_plane` is where we deploy the K8s control plane.\n\nThe `k8s_cluster` group must include all the K8s nodes including control plane\nand worker nodes.\n\nIn case of running tests on a single-node environment the group `k8s_nodes`\nmust be empty. This means the K8s cluster will consist of a single control plane\nnode where all the workloads will be running.\n\nSee for example:\n\n```yaml\nall:\n  vars:\n    ansible_port: 22\n    ansible_user: ubuntu\n    ansible_ssh_private_key_file: /home/ubuntu/.ssh/id_rsa\n    ansible_ssh_extra_args: -o StrictHostKeyChecking=no\n  hosts:\n    primary:\n      ansible_host: 10.10.10.10\n    node-1:\n      ansible_host: 10.10.10.11\n    node-2:\n      ansible_host: 10.10.10.12\n    node-3:\n      ansible_host: 10.10.10.13\n  children:\n    primary:\n      hosts:\n        primary:\n    k8s_cluster:\n      hosts:\n        node-1:\n        node-2:\n        node-3:\n    k8s_control_plane:\n      hosts:\n        node-1:\n    k8s_nodes:\n      hosts:\n        node-2:\n        node-3:\n```\n"
  },
  {
    "path": "roles/deploy-env/defaults/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n---\nkube_version_repo: \"v1.35\"\n# the list of k8s package versions are available here\n# https://pkgs.k8s.io/core:/stable:/{{ kube_version_repo }}/deb/Packages\nkube_version: \"1.35.0-1.1\"\nhelm_version: \"4.1.1\"\ncrictl_version: \"v1.35.0\"\n\ncalico_setup: true\ncalico_version: \"v3.31.3\"\ncalico_manifest_url: \"https://raw.githubusercontent.com/projectcalico/calico/{{ calico_version }}/manifests/calico.yaml\"\n\ncilium_setup: false\ncilium_version: \"1.17.4\"\n\nflannel_setup: false\nflannel_version: v0.26.7\n\ngatewayapi_setup: true\ngatewayapi_implementation: \"envoy\"  # options: envoy\ngatewayapi_envoy_version: \"v1.7.0\"\n\ningress_setup: false\ningress_implementation: \"haproxy\"  # options: haproxy, nginx\ningress_nginx_version: \"4.12.2\"\ningress_haproxy_version: \"0.15.0\"\ningress_openstack_setup: true\ningress_ceph_setup: true\ningress_osh_infra_setup: false\n\nkubectl:\n  user: zuul\n  group: zuul\n\nosh_plugin_repo: \"https://opendev.org/openstack/openstack-helm-plugin.git\"\n\nkubeadm:\n  pod_network_cidr: \"10.244.0.0/16\"\n  service_cidr: \"10.96.0.0/16\"\ndocker:\n  root_path: /var/lib/docker\ndocker_users:\n  - zuul\ncontainerd:\n  root_path: /var/lib/containerd\nloopback_setup: false\nloopback_device: /dev/loop100\nloopback_image: /var/lib/openstack-helm/ceph-loop.img\nloopback_image_size: 12G\nloopback_format: false\nloopback_format_fs_type: ext4\nloopback_mount: false\nloopback_mount_path: /srv/node/loop100\n\ncoredns_resolver_setup: false\ncoredns_dns_server: \"8.8.8.8\"\n# This will be appended to the Corefile after the main configuration block\n# Example: |\n#   example.com:53 {\n#     forward . 1.2.3.4\n#   }\ncoredns_extra_config: \"\"\n\nmetallb_setup: true\nmetallb_version: \"0.15.3\"\nmetallb_pool_cidr: \"172.24.128.0/24\"\nmetallb_ingress_openstack_endpoint_cidr: \"172.24.128.100/24\"\nmetallb_ingress_osh_infra_endpoint_cidr: \"172.24.128.101/24\"\nmetallb_gatewayapi_endpoint_cidr: \"172.24.128.102/24\"\n\nclient_cluster_ssh_setup: true\nclient_ssh_user: zuul\ncluster_ssh_user: zuul\n\nfloating_network_setup: false\nfloating_network_cidr: \"172.24.4.0/24\"\nfloating_network_gateway_cidr: \"172.24.4.1/24\"\n\ntcpproxy_cidr: \"172.24.6.0/24\"\ntcpproxy_gatewayapi_cidr: \"172.24.6.1/24\"\ntcpproxy_ingress_openstack_cidr: \"172.24.6.2/24\"\n\ntunnel_network_cidr: \"172.24.5.0/24\"\ntunnel_client_cidr: \"172.24.5.2/24\"\ntunnel_cluster_cidr: \"172.24.5.1/24\"\n\ndnsmasq_image: \"quay.io/airshipit/neutron:2024.2-ubuntu_jammy\"\ndnsmasq_dns_server: \"8.8.8.8\"\ndnsmasq_extra_args: \"\"\n\nnginx_image: \"quay.io/airshipit/nginx:alpine3.18\"\n\noverlay_network_setup: true\noverlay_network_prefix: \"10.248.0.\"\noverlay_network_vxlan_iface: vxlan42\noverlay_network_vxlan_id: 42\n# NOTE: This is to avoid conflicts with the vxlan overlay managed by Openstack\n# which uses 4789 by default. Some alternative implementations used to\n# leverage 8472, so let's use it.\noverlay_network_vxlan_port: 8472\noverlay_network_bridge_name: brvxlan\noverlay_network_bridge_ip: \"{{ overlay_network_prefix }}{{ (groups['all'] | sort).index(inventory_hostname) + 1 }}\"\noverlay_network_underlay_dev: \"{{ hostvars[inventory_hostname]['ansible_default_ipv4']['interface'] }}\"\n...\n"
  },
  {
    "path": "roles/deploy-env/files/calico_patch.yaml",
    "content": "---\nspec:\n  template:\n    spec:\n      containers:\n        - name: calico-node\n          env:\n            # we need Calico to skip this interface while discovering the\n            # network changes on the host to prevent announcing unnecessary networks.\n            - name: IP_AUTODETECTION_METHOD\n              value: \"skip-interface=br-ex|tcpproxy.*|provider.*|client.*|o-hm.*|o-w.*\"\n...\n"
  },
  {
    "path": "roles/deploy-env/files/cluster_resolv.conf",
    "content": "nameserver 10.96.0.10\n"
  },
  {
    "path": "roles/deploy-env/files/containerd_config.toml",
    "content": "disabled_plugins = []\nimports = []\noom_score = 0\nplugin_dir = \"\"\nrequired_plugins = []\nroot = \"{{ containerd.root_path }}\"\nstate = \"/run/containerd\"\ntemp = \"\"\nversion = 2\n\n[cgroup]\n  path = \"\"\n\n[debug]\n  address = \"\"\n  format = \"\"\n  gid = 0\n  level = \"\"\n  uid = 0\n\n[grpc]\n  address = \"/run/containerd/containerd.sock\"\n  gid = 0\n  max_recv_message_size = 16777216\n  max_send_message_size = 16777216\n  tcp_address = \"\"\n  tcp_tls_ca = \"\"\n  tcp_tls_cert = \"\"\n  tcp_tls_key = \"\"\n  uid = 0\n\n[metrics]\n  address = \"\"\n  grpc_histogram = false\n\n[plugins]\n\n  [plugins.\"io.containerd.gc.v1.scheduler\"]\n    deletion_threshold = 0\n    mutation_threshold = 100\n    pause_threshold = 0.02\n    schedule_delay = \"0s\"\n    startup_delay = \"100ms\"\n\n  [plugins.\"io.containerd.grpc.v1.cri\"]\n    device_ownership_from_security_context = false\n    disable_apparmor = false\n    disable_cgroup = false\n    disable_hugetlb_controller = true\n    disable_proc_mount = false\n    disable_tcp_service = true\n    enable_selinux = false\n    enable_tls_streaming = false\n    enable_unprivileged_icmp = false\n    enable_unprivileged_ports = false\n    ignore_image_defined_volumes = false\n    max_concurrent_downloads = 3\n    max_container_log_line_size = 16384\n    netns_mounts_under_state_dir = false\n    restrict_oom_score_adj = false\n    sandbox_image = \"registry.k8s.io/pause:3.9\"\n    selinux_category_range = 1024\n    stats_collect_period = 10\n    stream_idle_timeout = \"4h0m0s\"\n    stream_server_address = \"127.0.0.1\"\n    stream_server_port = \"0\"\n    systemd_cgroup = false\n    tolerate_missing_hugetlb_controller = true\n    unset_seccomp_profile = \"\"\n\n    [plugins.\"io.containerd.grpc.v1.cri\".cni]\n      bin_dir = \"/opt/cni/bin\"\n      conf_dir = \"/etc/cni/net.d\"\n      conf_template = \"\"\n      ip_pref = \"\"\n      max_conf_num = 1\n\n    [plugins.\"io.containerd.grpc.v1.cri\".containerd]\n      default_runtime_name = \"runc\"\n      disable_snapshot_annotations = true\n      discard_unpacked_layers = false\n      ignore_rdt_not_enabled_errors = false\n      no_pivot = false\n      snapshotter = \"overlayfs\"\n\n      [plugins.\"io.containerd.grpc.v1.cri\".containerd.default_runtime]\n        base_runtime_spec = \"\"\n        cni_conf_dir = \"\"\n        cni_max_conf_num = 0\n        container_annotations = []\n        pod_annotations = []\n        privileged_without_host_devices = false\n        runtime_engine = \"\"\n        runtime_path = \"\"\n        runtime_root = \"\"\n        runtime_type = \"\"\n\n        [plugins.\"io.containerd.grpc.v1.cri\".containerd.default_runtime.options]\n\n      [plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes]\n\n        [plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.runc]\n          base_runtime_spec = \"\"\n          cni_conf_dir = \"\"\n          cni_max_conf_num = 0\n          container_annotations = []\n          pod_annotations = []\n          privileged_without_host_devices = false\n          runtime_engine = \"\"\n          runtime_path = \"\"\n          runtime_root = \"\"\n          runtime_type = \"io.containerd.runc.v2\"\n\n          [plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.runc.options]\n            BinaryName = \"\"\n            CriuImagePath = \"\"\n            CriuPath = \"\"\n            CriuWorkPath = \"\"\n            IoGid = 0\n            IoUid = 0\n            NoNewKeyring = false\n            NoPivotRoot = false\n            Root = \"\"\n            ShimCgroup = \"\"\n            SystemdCgroup = true\n\n      [plugins.\"io.containerd.grpc.v1.cri\".containerd.untrusted_workload_runtime]\n        base_runtime_spec = \"\"\n        cni_conf_dir = \"\"\n        cni_max_conf_num = 0\n        container_annotations = []\n        pod_annotations = []\n        privileged_without_host_devices = false\n        runtime_engine = \"\"\n        runtime_path = \"\"\n        runtime_root = \"\"\n        runtime_type = \"\"\n\n        [plugins.\"io.containerd.grpc.v1.cri\".containerd.untrusted_workload_runtime.options]\n\n    [plugins.\"io.containerd.grpc.v1.cri\".image_decryption]\n      key_model = \"node\"\n\n    [plugins.\"io.containerd.grpc.v1.cri\".registry]\n      config_path = \"/etc/containerd/certs.d\"\n\n      [plugins.\"io.containerd.grpc.v1.cri\".registry.auths]\n\n      [plugins.\"io.containerd.grpc.v1.cri\".registry.configs]\n{% for item in registry_namespaces %}\n{% if item.auth is defined %}\n        [plugins.\"io.containerd.grpc.v1.cri\".registry.configs.\"{{ item.namespace }}\".auth]\n          auth = \"{{ item.auth }}\"\n{% endif %}\n{% endfor %}\n\n      [plugins.\"io.containerd.grpc.v1.cri\".registry.headers]\n\n      [plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors]\n\n    [plugins.\"io.containerd.grpc.v1.cri\".x509_key_pair_streaming]\n      tls_cert_file = \"\"\n      tls_key_file = \"\"\n\n  [plugins.\"io.containerd.internal.v1.opt\"]\n    path = \"/opt/containerd\"\n\n  [plugins.\"io.containerd.internal.v1.restart\"]\n    interval = \"10s\"\n\n  [plugins.\"io.containerd.internal.v1.tracing\"]\n    sampling_ratio = 1.0\n    service_name = \"containerd\"\n\n  [plugins.\"io.containerd.metadata.v1.bolt\"]\n    content_sharing_policy = \"shared\"\n\n  [plugins.\"io.containerd.monitor.v1.cgroups\"]\n    no_prometheus = false\n\n  [plugins.\"io.containerd.runtime.v1.linux\"]\n    no_shim = false\n    runtime = \"runc\"\n    runtime_root = \"\"\n    shim = \"containerd-shim\"\n    shim_debug = false\n\n  [plugins.\"io.containerd.runtime.v2.task\"]\n    platforms = [\"linux/amd64\"]\n    sched_core = false\n\n  [plugins.\"io.containerd.service.v1.diff-service\"]\n    default = [\"walking\"]\n\n  [plugins.\"io.containerd.service.v1.tasks-service\"]\n    rdt_config_file = \"\"\n\n  [plugins.\"io.containerd.snapshotter.v1.aufs\"]\n    root_path = \"\"\n\n  [plugins.\"io.containerd.snapshotter.v1.btrfs\"]\n    root_path = \"\"\n\n  [plugins.\"io.containerd.snapshotter.v1.devmapper\"]\n    async_remove = false\n    base_image_size = \"\"\n    discard_blocks = false\n    fs_options = \"\"\n    fs_type = \"\"\n    pool_name = \"\"\n    root_path = \"\"\n\n  [plugins.\"io.containerd.snapshotter.v1.native\"]\n    root_path = \"\"\n\n  [plugins.\"io.containerd.snapshotter.v1.overlayfs\"]\n    root_path = \"\"\n    upperdir_label = false\n\n  [plugins.\"io.containerd.snapshotter.v1.zfs\"]\n    root_path = \"\"\n\n  [plugins.\"io.containerd.tracing.processor.v1.otlp\"]\n    endpoint = \"\"\n    insecure = false\n    protocol = \"\"\n\n[proxy_plugins]\n\n[stream_processors]\n\n  [stream_processors.\"io.containerd.ocicrypt.decoder.v1.tar\"]\n    accepts = [\"application/vnd.oci.image.layer.v1.tar+encrypted\"]\n    args = [\"--decryption-keys-path\", \"/etc/containerd/ocicrypt/keys\"]\n    env = [\"OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf\"]\n    path = \"ctd-decoder\"\n    returns = \"application/vnd.oci.image.layer.v1.tar\"\n\n  [stream_processors.\"io.containerd.ocicrypt.decoder.v1.tar.gzip\"]\n    accepts = [\"application/vnd.oci.image.layer.v1.tar+gzip+encrypted\"]\n    args = [\"--decryption-keys-path\", \"/etc/containerd/ocicrypt/keys\"]\n    env = [\"OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf\"]\n    path = \"ctd-decoder\"\n    returns = \"application/vnd.oci.image.layer.v1.tar+gzip\"\n\n[timeouts]\n  \"io.containerd.timeout.bolt.open\" = \"0s\"\n  \"io.containerd.timeout.shim.cleanup\" = \"5s\"\n  \"io.containerd.timeout.shim.load\" = \"5s\"\n  \"io.containerd.timeout.shim.shutdown\" = \"3s\"\n  \"io.containerd.timeout.task.state\" = \"2s\"\n\n[ttrpc]\n  address = \"\"\n  gid = 0\n  uid = 0\n"
  },
  {
    "path": "roles/deploy-env/files/daemon.json",
    "content": "{\n    \"data-root\": \"{{ docker.root_path }}\",\n    \"exec-opts\": [\"native.cgroupdriver=systemd\"],\n    \"log-driver\": \"json-file\",\n    \"log-opts\": {\n        \"max-size\": \"100m\"\n    },\n{% if registry_mirror is defined %}\n    \"registry-mirrors\": [\"{{ registry_mirror }}\"],\n{% endif %}\n{% if insecure_registries is defined %}\n    \"insecure-registries\": [\"{{ insecure_registries }}\"],\n{% endif %}\n    \"storage-driver\": \"overlay2\",\n    \"live-restore\": true\n}\n"
  },
  {
    "path": "roles/deploy-env/files/etc_default_kubelet.j2",
    "content": "KUBELET_EXTRA_ARGS=\"{% if overlay_network_setup %}--node-ip {{ overlay_network_bridge_ip }}{% endif %}\"\n"
  },
  {
    "path": "roles/deploy-env/files/hosts",
    "content": "127.0.0.1 localhost\n{{ ansible_default_ipv4['address'] }} {{ ansible_hostname }}\n{% if buildset_registry is defined and (buildset_registry.host | ipaddr) %}\n{{ buildset_registry.host }} zuul-jobs.buildset-registry\n{% endif %}\n"
  },
  {
    "path": "roles/deploy-env/files/hosts.toml",
    "content": "{% if item.skip_server is not defined or not item.skip_server %}\nserver = \"{{ item.server | default('https://' + item.namespace) }}\"\n{% endif %}\n\n[host.\"{{ item.mirror }}\"]\ncapabilities = [\"pull\", \"resolve\", \"push\"]\n{% if item.ca is defined %}\nca = \"{{ item.ca }}\"\n{% endif %}\n{% if item.skip_verify is defined and item.skip_verify %}\nskip_verify = true\n{% endif %}\n"
  },
  {
    "path": "roles/deploy-env/files/kubeadm_config.yaml.j2",
    "content": "---\napiVersion: kubeproxy.config.k8s.io/v1alpha1\nkind: KubeProxyConfiguration\nmode: ipvs\nipvs:\n  strictARP: true\n...\n---\napiVersion: kubeadm.k8s.io/v1beta4\nkind: ClusterConfiguration\nnetworking:\n  serviceSubnet: \"{{ kubeadm.service_cidr }}\"  # --service-cidr\n  podSubnet: \"{{ kubeadm.pod_network_cidr }}\"  # --pod-network-cidr\n  dnsDomain: \"cluster.local\"\napiServer:\n  ControlPlaneComponent:\n    extraArgs:\n      - name: kubelet-preferred-address-types\n        value: \"InternalIP,Hostname,InternalDNS,ExternalIP,ExternalDNS\"\n...\n---\napiVersion: kubeadm.k8s.io/v1beta4\nkind: InitConfiguration\nnodeRegistration:\n  criSocket: unix:///run/containerd/containerd.sock\n  taints: []\n  ignorePreflightErrors:\n    - NumCPU\nlocalAPIEndpoint:\n{% if overlay_network_setup %}\n  advertiseAddress: \"{{ overlay_network_prefix }}{{ (groups['all'] | sort).index(groups['k8s_control_plane'][0]) + 1 }}\"\n{% endif %}\n  bindPort: 6443\n...\n---\napiVersion: kubeadm.k8s.io/v1beta4\nkind: JoinConfiguration\nnodeRegistration:\n  criSocket: unix:///run/containerd/containerd.sock\n  taints: []\n  ignorePreflightErrors:\n    - NumCPU\n...\n"
  },
  {
    "path": "roles/deploy-env/files/loop-setup.service",
    "content": "[Unit]\nDescription=Setup loop devices\nDefaultDependencies=no\nConflicts=umount.target\nBefore=local-fs.target\nAfter=systemd-udevd.service\nRequires=systemd-udevd.service\n\n[Service]\nType=oneshot\nExecStart=/sbin/losetup {{ loopback_device }} '{{ loopback_image }}'\nExecStop=/sbin/losetup -d {{ loopback_device }}\nTimeoutSec=60\nRemainAfterExit=yes\n\n[Install]\nWantedBy=local-fs.target\nAlso=systemd-udevd.service\n"
  },
  {
    "path": "roles/deploy-env/files/nginx_tcp_proxy.conf",
    "content": "user  nginx;\nworker_processes  auto;\n\nerror_log  /dev/stdout warn;\npid        /var/run/nginx.pid;\n\nevents {\n    worker_connections  1024;\n}\n\nstream {\n    access_log off;\n\n    # Ingress controller proxy (for services not using Gateway API)\n    server {\n        listen {{ tcpproxy_ingress_openstack_cidr | ipaddr('address') }}:80;\n        proxy_pass {{ metallb_ingress_openstack_endpoint_cidr | ipaddr('address') }}:80;\n        proxy_bind {{ tcpproxy_ingress_openstack_cidr | ipaddr('address') }} transparent;\n    }\n\n    server {\n        listen {{ tcpproxy_ingress_openstack_cidr | ipaddr('address') }}:443;\n        proxy_pass {{ metallb_ingress_openstack_endpoint_cidr | ipaddr('address') }}:443;\n        proxy_bind {{ tcpproxy_ingress_openstack_cidr | ipaddr('address') }} transparent;\n    }\n\n    # Gateway API proxy (for services using HTTPRoute)\n    server {\n        listen {{ tcpproxy_gatewayapi_cidr | ipaddr('address') }}:80;\n        proxy_pass {{ metallb_gatewayapi_endpoint_cidr | ipaddr('address') }}:80;\n        proxy_bind {{ tcpproxy_gatewayapi_cidr | ipaddr('address') }} transparent;\n    }\n\n    server {\n        listen {{ tcpproxy_gatewayapi_cidr | ipaddr('address') }}:443;\n        proxy_pass {{ metallb_gatewayapi_endpoint_cidr | ipaddr('address') }}:443;\n        proxy_bind {{ tcpproxy_gatewayapi_cidr | ipaddr('address') }} transparent;\n    }\n}\n"
  },
  {
    "path": "roles/deploy-env/files/resolv.conf",
    "content": "nameserver {{ nameserver_ip }}\n"
  },
  {
    "path": "roles/deploy-env/files/ssh_config",
    "content": "StrictHostKeyChecking no\n"
  },
  {
    "path": "roles/deploy-env/handlers/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Systemd reload\n  shell: systemctl daemon-reload\n\n- name: Restart loop-setup\n  service:\n    name: loop-setup\n    state: started\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/buildset_registry_alias.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Set buildset_registry alias variable when using ip\n  set_fact:\n    buildset_registry_alias: zuul-jobs.buildset-registry\n  when:\n    - buildset_registry.host | ipaddr\n\n- name: Set buildset_registry alias variable when using name\n  set_fact:\n    buildset_registry_alias: \"{{ buildset_registry.host }}\"\n  when:\n    - not ( buildset_registry.host | ipaddr )\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/calico.yaml",
    "content": "---\n# We download Calico manifest on all nodes because we then want to download\n# Calico images BEFORE deploying it, so that `kubectl wait` timeout\n# for `k8s-app=kube-dns` isn't reached by slow download speeds\n- name: Prepare Calico configuration\n  become: false\n  block:\n    - name: Download Calico manifest\n      get_url:\n        url: \"{{ calico_manifest_url }}\"\n        dest: /tmp/calico.yaml\n      register: calico_config\n\n    - name: Change registry URL\n      shell: sed -i -e 's#docker.io/calico/#quay.io/calico/#g' {{ calico_config.dest }}\n\n    - name: Amend settings for VXLAN setup\n      when: overlay_network_setup\n      shell: sed -i '/CALICO_IPV4POOL_IPIP/{n;s/Always/Never/}' {{ calico_config.dest }}\n\n- name: Download Calico images on K8s nodes beforehand\n  when: inventory_hostname in (groups['k8s_cluster'] | default([]))\n  shell: awk '/image:/ { print $2 }' {{ calico_config.dest }} | xargs -I{} crictl pull {}\n  environment:\n    CONTAINER_RUNTIME_ENDPOINT: \"unix:///run/containerd/containerd.sock\"\n    IMAGE_SERVICE_ENDPOINT: \"unix:///run/containerd/containerd.sock\"\n  args:\n    executable: /bin/bash\n\n- name: Deploy Calico from Control Plane node\n  become: false\n  when: inventory_hostname in (groups['primary'] | default([]))\n  block:\n    - name: Deploy Calico\n      command: kubectl apply -f {{ calico_config.dest }}\n\n    - name: Sleep before trying to check Calico pods\n      pause:\n        seconds: 30\n\n    - name: Wait for Calico pods ready\n      command: kubectl -n kube-system wait --timeout=20s --for=condition=Ready pods -l k8s-app=calico-node\n      register: calico_pods_wait\n      until: calico_pods_wait is succeeded\n      retries: 10\n\n    - name: Prepare Calico patch\n      copy:\n        src: files/calico_patch.yaml\n        dest: /tmp/calico_patch.yaml\n      register: calico_patch_config\n\n    - name: Patch Calico\n      command: kubectl -n kube-system patch daemonset calico-node --patch-file {{ calico_patch_config.dest }}\n\n    - name: Delete Calico pods (for hard restart)\n      command: kubectl -n kube-system delete pods -l k8s-app=calico-node\n\n    - name: Wait for Calico pods ready (after patch)\n      command: kubectl -n kube-system wait --timeout=20s --for=condition=Ready pods -l k8s-app=calico-node\n      register: calico_pods_wait\n      until: calico_pods_wait is succeeded\n      retries: 10\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/cilium.yaml",
    "content": "---\n- name: Download Cilium\n  shell: |\n    CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt)\n    CLI_ARCH=amd64\n    curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}\n    sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum\n    tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin\n    rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}\n  args:\n    executable: /bin/bash\n    chdir: /tmp\n  when: inventory_hostname in (groups['primary'] | default([]))\n\n- name: Deploy Cilium\n  become: false\n  shell: |\n    cilium install --version {{ cilium_version }}\n  args:\n    executable: /bin/bash\n  when: inventory_hostname in (groups['primary'] | default([]))\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/client_cluster_ssh.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Set client user home directory\n  set_fact:\n    client_user_home_directory: /home/{{ client_ssh_user }}\n  when: client_ssh_user != \"root\"\n\n- name: Set client user home directory\n  set_fact:\n    client_user_home_directory: /root\n  when: client_ssh_user == \"root\"\n\n- name: Set cluster user home directory\n  set_fact:\n    cluster_user_home_directory: /home/{{ cluster_ssh_user }}\n  when: cluster_ssh_user != \"root\"\n\n- name: Set cluster user home directory\n  set_fact:\n    cluster_user_home_directory: /root\n  when: cluster_ssh_user == \"root\"\n\n- name: Setup ssh keys\n  become_user: \"{{ client_ssh_user }}\"\n  block:\n    - name: Generate ssh key pair\n      shell: |\n        ssh-keygen -t ed25519 -q -N \"\" -f {{ client_user_home_directory }}/.ssh/id_ed25519\n      args:\n        creates: \"{{ client_user_home_directory }}/.ssh/id_ed25519.pub\"\n      when: (inventory_hostname in (groups['primary'] | default([])))\n\n    - name: Read ssh public key\n      command: cat \"{{ client_user_home_directory }}/.ssh/id_ed25519.pub\"\n      register: ssh_public_key\n      when: (inventory_hostname in (groups['primary'] | default([])))\n\n- name: Setup passwordless ssh from primary and cluster nodes\n  become_user: \"{{ cluster_ssh_user }}\"\n  block:\n    - name: Set primary ssh public key\n      set_fact:\n        client_ssh_public_key: \"{{ (groups['primary'] | map('extract', hostvars, ['ssh_public_key', 'stdout']))[0] }}\"\n      when: inventory_hostname in (groups['k8s_cluster'] | default([]))\n\n    - name: Put keys to .ssh/authorized_keys\n      lineinfile:\n        path: \"{{ cluster_user_home_directory }}/.ssh/authorized_keys\"\n        state: present\n        line: \"{{ client_ssh_public_key }}\"\n      when: inventory_hostname in (groups['k8s_cluster'] | default([]))\n\n    - name: Disable strict host key checking\n      template:\n        src: \"files/ssh_config\"\n        dest: \"{{ client_user_home_directory }}/.ssh/config\"\n        owner: \"{{ client_ssh_user }}\"\n        mode: 0644\n        backup: true\n      when: (inventory_hostname in (groups['primary'] | default([])))\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/client_cluster_tunnel.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Set cluster IP\n  set_fact:\n    cluster_default_ip: \"{{ (groups['k8s_control_plane'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']))[0] }}\"\n\n- name: Set client IP\n  set_fact:\n    client_default_ip: \"{{ (groups['primary'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']))[0] }}\"\n\n- name: Setup wireguard keys\n  when: (groups['primary'] | difference(groups['k8s_control_plane']) | length > 0)\n  block:\n    - name: Generate wireguard key pair\n      shell: |\n        wg genkey | tee /root/wg-private-key | wg pubkey > /root/wg-public-key\n        chmod 600 /root/wg-private-key\n      when: (inventory_hostname in (groups['primary'] | default([]))) or (inventory_hostname in (groups['k8s_control_plane'] | default([])))\n\n    - name: Register public wireguard key variable\n      command: cat /root/wg-public-key\n      register: wg_public_key\n      when: (inventory_hostname in (groups['primary'] | default([]))) or (inventory_hostname in (groups['k8s_control_plane'] | default([])))\n\n- name: Setup wireguard tunnel between primary and cluster control-plane node\n  when: (groups['primary'] | difference(groups['k8s_control_plane']) | length > 0)\n  block:\n    - name: Set primary wireguard public key\n      set_fact:\n        client_wg_public_key: \"{{ (groups['primary'] | map('extract', hostvars, ['wg_public_key', 'stdout']))[0] }}\"\n      when: inventory_hostname in (groups['k8s_control_plane'] | default([]))\n\n    - name: Set cluster wireguard public key\n      set_fact:\n        cluster_wg_public_key: \"{{ (groups['k8s_control_plane'] | map('extract', hostvars, ['wg_public_key', 'stdout']))[0] }}\"\n      when: inventory_hostname in (groups['primary'] | default([]))\n\n    - name: Set up wireguard tunnel on cluster control-plane node\n      shell: |\n        cat > /tmp/configure_cluster_tunnel.sh <<EOF\n        ip link add client-wg type wireguard\n        ip addr add {{ tunnel_cluster_cidr }} dev client-wg\n        wg set client-wg listen-port 51820 private-key /root/wg-private-key peer {{ client_wg_public_key }} allowed-ips {{ tunnel_network_cidr }} endpoint {{ client_default_ip }}:51820\n        ip link set client-wg up\n        iptables -t filter -P FORWARD ACCEPT\n        iptables -t filter -I FORWARD -o client-wg -j ACCEPT\n        EOF\n        chmod +x /tmp/configure_cluster_tunnel.sh\n        /tmp/configure_cluster_tunnel.sh\n      when: inventory_hostname in (groups['k8s_control_plane'] | default([]))\n\n    - name: Set up wireguard tunnel on primary node\n      shell: |\n        cat > /tmp/configure_client_tunnel.sh <<EOF\n        ip link add client-wg type wireguard\n        ip addr add {{ tunnel_client_cidr }} dev client-wg\n        wg set client-wg listen-port 51820 private-key /root/wg-private-key peer {{ cluster_wg_public_key }} allowed-ips {{ tunnel_network_cidr }},{{ openstack_provider_network_cidr }},{{ metallb_pool_cidr }} endpoint {{ cluster_default_ip }}:51820\n        ip link set client-wg up\n        ip route add {{ metallb_pool_cidr }} via {{ tunnel_cluster_cidr | ipaddr('address') }} dev client-wg\n        ip route add {{ openstack_provider_network_cidr }} via {{ tunnel_cluster_cidr | ipaddr('address') }} dev client-wg\n        EOF\n        chmod +x /tmp/configure_client_tunnel.sh\n        /tmp/configure_client_tunnel.sh\n      when: inventory_hostname in (groups['primary'] | default([]))\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/containerd.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Remove old docker packages\n  apt:\n    pkg:\n      - docker.io\n      - docker-doc\n      - docker-compose\n      - podman-docker\n      - containerd\n      - runc\n    state: absent\n\n- name: Ensure APT keyrings directory exists\n  file:\n    path: /etc/apt/keyrings\n    state: directory\n    mode: \"0755\"\n\n- name: Add Docker apt repository key\n  get_url:\n    url: https://download.docker.com/linux/ubuntu/gpg\n    dest: /etc/apt/keyrings/docker.asc\n    mode: \"0644\"\n\n- name: Get dpkg arch\n  command: dpkg --print-architecture\n  register: dpkg_architecture\n\n- name: Add Docker apt repository\n  apt_repository:\n    repo: \"deb [arch={{ dpkg_architecture.stdout }} signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable\"\n    state: present\n    filename: docker\n\n- name: Install docker packages\n  apt:\n    pkg:\n      - docker-ce\n      - docker-ce-cli\n      - containerd.io\n      - docker-buildx-plugin\n      - docker-compose-plugin\n    state: present\n    update_cache: true\n\n- name: Ensure docker group exists\n  group:\n    name: docker\n    state: present\n\n- name: Add users to docker group\n  command: \"adduser {{ item }} docker\"\n  loop: \"{{ docker_users }}\"\n\n- name: Reset ssh connection to apply user changes.\n  meta: reset_connection\n\n- name: Install Crictl\n  shell: |\n    wget https://github.com/kubernetes-sigs/cri-tools/releases/download/{{crictl_version}}/crictl-{{crictl_version}}-linux-amd64.tar.gz\n    sudo tar zxvf crictl-{{crictl_version}}-linux-amd64.tar.gz -C /usr/local/bin\n    rm -f crictl-{{crictl_version}}-linux-amd64.tar.gz\n  args:\n    executable: /bin/bash\n\n- name: Set registry_mirror fact\n  when:\n    - registry_mirror is not defined\n    - zuul_site_mirror_fqdn is defined\n  set_fact:\n    registry_mirror: \"http://{{ zuul_site_mirror_fqdn }}:8082\"\n\n- name: Set insecure_registries fact for Docker\n  when:\n    - insecure_registries is not defined\n    - zuul_site_mirror_fqdn is defined\n  set_fact:\n    insecure_registries: \"{{ zuul_site_mirror_fqdn }}:8082\"\n\n- name: Set registry_namespaces fact\n  set_fact:\n    registry_namespaces:\n      - namespace: \"_default\"\n        mirror: \"{{ registry_mirror }}\"\n        skip_server: true\n        skip_verify: true\n  when: registry_mirror is defined\n\n- name: Init registry_namespaces if not defined\n  set_fact:\n    registry_namespaces: \"[]\"\n  when: not registry_namespaces is defined\n\n- name: Buildset registry namespace\n  when: buildset_registry is defined\n  block:\n    - name: Buildset registry alias\n      include_tasks:\n        file: buildset_registry_alias.yaml\n\n    - name: Write buildset registry TLS certificate\n      copy:\n        content: \"{{ buildset_registry.cert }}\"\n        dest: \"/usr/local/share/ca-certificates/{{ buildset_registry_alias }}.crt\"\n        mode: 0644\n      register: buildset_registry_tls_ca\n\n    - name: Update CA certs\n      command: \"update-ca-certificates\"\n      when: buildset_registry_tls_ca is changed\n\n    - name: Set buildset registry namespace\n      set_fact:\n        buildset_registry_namespace:\n          namespace: '{{ buildset_registry_alias }}:{{ buildset_registry.port }}'\n          mirror: 'https://{{ buildset_registry_alias }}:{{ buildset_registry.port }}'\n          ca: \"/usr/local/share/ca-certificates/{{ buildset_registry_alias }}.crt\"\n          auth: \"{{ (buildset_registry.username + ':' + buildset_registry.password) | b64encode }}\"\n\n    - name: Append buildset_registry to registry namespaces\n      when:\n        - buildset_registry_namespace is defined\n        - registry_namespaces is defined\n      set_fact:\n        registry_namespaces: \"{{ registry_namespaces + [ buildset_registry_namespace ] }}\"\n\n- name: Configure containerd\n  template:\n    src: files/containerd_config.toml\n    dest: /etc/containerd/config.toml\n\n- name: Create containerd config directory hierarchy\n  file:\n    state: directory\n    path: /etc/containerd/certs.d\n\n- name: Create host namespace directory\n  file:\n    state: directory\n    path: \"/etc/containerd/certs.d/{{ item.namespace }}\"\n  loop: \"{{ registry_namespaces }}\"\n\n- name: Create hosts.toml file\n  template:\n    src: files/hosts.toml\n    dest: \"/etc/containerd/certs.d/{{ item.namespace }}/hosts.toml\"\n  loop: \"{{ registry_namespaces }}\"\n\n- name: Restart containerd\n  service:\n    name: containerd\n    daemon_reload: yes\n    state: restarted\n\n- name: Configure Docker daemon\n  template:\n    src: files/daemon.json\n    dest: /etc/docker/daemon.json\n\n- name: Restart docker\n  service:\n    name: docker\n    daemon_reload: yes\n    state: restarted\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/coredns_resolver.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Enable recursive queries for coredns\n  become: false\n  shell: |\n    tee > /tmp/coredns_configmap.yaml <<EOF\n    apiVersion: v1\n    kind: ConfigMap\n    metadata:\n      name: coredns\n      namespace: kube-system\n    data:\n      Corefile: |\n        .:53 {\n            errors\n            health {\n              lameduck 5s\n            }\n            header {\n                response set ra\n            }\n            ready\n            kubernetes cluster.local in-addr.arpa ip6.arpa {\n              pods insecure\n              fallthrough in-addr.arpa ip6.arpa\n              ttl 30\n            }\n            prometheus :9153\n            forward . {{ coredns_dns_server }} {\n              max_concurrent 1000\n            }\n            cache 30\n            loop\n            reload\n            loadbalance\n        }\n        {{ coredns_extra_config | indent(4) }}\n    EOF\n    kubectl apply -f /tmp/coredns_configmap.yaml\n    kubectl rollout restart -n kube-system deployment/coredns\n    kubectl rollout status -n kube-system deployment/coredns\n  when: inventory_hostname in (groups['primary'] | default([]))\n\n- name: Give coredns time to restart\n  pause:\n    seconds: 30\n  when: inventory_hostname in (groups['primary'] | default([]))\n\n- name: Get coredns rollout restart status\n  become: false\n  shell: |\n    kubectl rollout status -n kube-system deployment/coredns\n  when: inventory_hostname in (groups['primary'] | default([]))\n\n- name: Use coredns as default DNS resolver\n  copy:\n    src: files/cluster_resolv.conf\n    dest: /etc/resolv.conf\n    owner: root\n    group: root\n    mode: 0644\n  when: inventory_hostname in (groups['k8s_cluster'] | default([]))\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/env_inventory.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Set cluster device\n  set_fact:\n    default_dev: \"{{ hostvars[inventory_hostname]['ansible_default_ipv4']['interface'] }}\"\n\n- name: Stats\n  shell: |\n    echo {{ default_dev }} > /tmp/inventory_default_dev.txt\n\n    echo -n > /tmp/inventory_k8s_control_plane.txt\n    {% for host in (groups['k8s_control_plane'] | default([])) %}\n      echo {{ hostvars[host].ansible_hostname }} >> /tmp/inventory_k8s_control_plane.txt\n    {% endfor %}\n\n    echo -n > /tmp/inventory_k8s_nodes.txt\n    {% for host in (groups['k8s_nodes'] | default([])) %}\n      echo {{ hostvars[host].ansible_hostname }} >> /tmp/inventory_k8s_nodes.txt\n    {% endfor %}\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/flannel.yaml",
    "content": "---\n- name: Add Flannel Helm repo\n  become_user: \"{{ kubectl.user }}\"\n  when: inventory_hostname in (groups['primary'] | default([]))\n  block:\n    - name: Add Flannel chart repo\n      shell: |\n        helm repo add flannel https://flannel-io.github.io/flannel/\n\n    - name: Install Flannel\n      shell: |\n        helm upgrade --install flannel flannel/flannel \\\n          --version {{ flannel_version }} \\\n          --namespace kube-flannel \\\n          --create-namespace \\\n          --set podCidr=\"{{ kubeadm.pod_network_cidr }}\"\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/floating_network.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Set cluster device\n  set_fact:\n    # cluster_default_dev: \"{{ (groups['k8s_control_plane'] | map('extract', hostvars, ['ansible_default_ipv4', 'interface']))[0] }}\"\n    cluster_default_dev: \"{{ hostvars[inventory_hostname]['ansible_default_ipv4']['interface'] }}\"\n\n- name: Set up TAP interface on cluster control-plane node\n  shell: |\n    ip tuntap add name provider1 mode tap\n    ip link set provider1 up\n    ip addr add {{ floating_network_gateway_cidr }} dev provider1\n\n- name: Set up SNAT for packets going outside the cluster\n  shell: |\n    iptables -t nat -A POSTROUTING -o {{ cluster_default_dev }} -s {{ floating_network_cidr }} -j MASQUERADE\n\n- name: Set up FORWARD for packets going from VMs\n  shell: |\n    iptables -t filter -I FORWARD -s {{ floating_network_cidr }} -j ACCEPT\n\n\n- name: Set up tcp proxy TAP interfaces on cluster control-plane node\n  shell: |\n    ip tuntap add name tcpproxy1 mode tap\n    ip link set tcpproxy1 up\n    ip addr add {{ tcpproxy_gatewayapi_cidr }} dev tcpproxy1\n    ip addr add {{ tcpproxy_ingress_openstack_cidr }} dev tcpproxy1\n\n# We use tcp proxy to forward traffic to make it possible to connect\n# to the Openstack public endpoint (managed by Metallb) from VMs.\n- name: Setup TCP proxy\n  when: metallb_setup\n  block:\n    - name: Prepare nginx tcp proxy config\n      template:\n        src: files/nginx_tcp_proxy.conf\n        dest: /tmp/nginx_tcp_proxy.conf\n        owner: root\n        group: root\n        mode: 0644\n\n    - name: Start provider network tcp proxy\n      docker_container:\n        name: nginx_tcp_proxy\n        image: \"{{ nginx_image }}\"\n        network_mode: host\n        capabilities:\n          - NET_ADMIN\n          - NET_RAW\n        mounts:\n          - source: /tmp/nginx_tcp_proxy.conf\n            target: /etc/nginx/nginx.conf\n            type: bind\n        entrypoint: nginx\n        command: |\n          -g 'daemon off;'\n        state: started\n        recreate: yes\n\n- name: Start provider network dnsmasq\n  docker_container:\n    name: provider_dnsmasq\n    image: \"{{ dnsmasq_image }}\"\n    network_mode: host\n    capabilities:\n      - NET_ADMIN\n    entrypoint: dnsmasq\n    command: >-\n      --keep-in-foreground\n      --no-hosts\n      --bind-interfaces\n      --address=\"/openstack-helm.org/{{ tcpproxy_gatewayapi_cidr | ipaddr('address') }}\"\n      --listen-address=\"{{ floating_network_gateway_cidr | ipaddr('address') }}\"\n      --no-resolv\n      --server={{ dnsmasq_dns_server }}\n      {{ dnsmasq_extra_args | default('') }}\n    state: started\n    recreate: yes\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/gatewayapi_envoy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Deploy Envoy Gateway\n  become: false\n  block:\n    - name: Install Envoy Gateway\n      become_user: \"{{ kubectl.user }}\"\n      shell: |\n        helm upgrade --install envoy-gateway oci://docker.io/envoyproxy/gateway-helm \\\n          --version {{ gatewayapi_envoy_version }} \\\n          --namespace envoy-gateway-system \\\n          --create-namespace \\\n          --wait\n\n    - name: Sleep before checking Envoy Gateway pods\n      pause:\n        seconds: 30\n\n    - name: Wait for Envoy Gateway pods to be ready\n      command: kubectl -n envoy-gateway-system wait --timeout=5m --for=condition=Available deployment/envoy-gateway\n\n    - name: Create GatewayClass for Envoy Gateway\n      shell: |\n        tee > /tmp/gatewayapi_envoy_class.yaml <<EOF\n        ---\n        apiVersion: gateway.networking.k8s.io/v1\n        kind: GatewayClass\n        metadata:\n          name: default\n        spec:\n          controllerName: gateway.envoyproxy.io/gatewayclass-controller\n        EOF\n        kubectl apply -f /tmp/gatewayapi_envoy_class.yaml\n"
  },
  {
    "path": "roles/deploy-env/tasks/ingress_haproxy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Add haproxy-ingress helm repo\n  become_user: \"{{ kubectl.user }}\"\n  shell: |\n    helm repo add haproxy-ingress https://haproxy-ingress.github.io/charts\n\n- name: Deploy HAProxy ingress in openstack namespace\n  become_user: \"{{ kubectl.user }}\"\n  when: ingress_openstack_setup\n  shell: |\n    helm upgrade --install --create-namespace haproxy-ingress-openstack haproxy-ingress/haproxy-ingress \\\n      --version {{ ingress_haproxy_version }} \\\n      --namespace=openstack \\\n      --set controller.kind=Deployment \\\n      --set controller.ingressClassResource.enabled=\"true\" \\\n      --set controller.ingressClass=ingress-openstack \\\n      --set controller.podLabels.app=ingress-api\n\n- name: Deploy HAProxy ingress in ceph namespace\n  become_user: \"{{ kubectl.user }}\"\n  when: ingress_ceph_setup\n  shell: |\n    helm upgrade --install --create-namespace haproxy-ingress-ceph haproxy-ingress/haproxy-ingress \\\n      --version {{ ingress_haproxy_version }} \\\n      --namespace=ceph \\\n      --set controller.kind=Deployment \\\n      --set controller.ingressClassResource.enabled=\"true\" \\\n      --set controller.ingressClass=ingress-ceph \\\n      --set controller.podLabels.app=ingress-api\n\n- name: Deploy HAProxy ingress in osh-infra namespace\n  become_user: \"{{ kubectl.user }}\"\n  when: ingress_osh_infra_setup\n  shell: |\n    helm upgrade --install --create-namespace haproxy-ingress-osh-infra haproxy-ingress/haproxy-ingress \\\n      --version {{ ingress_haproxy_version }} \\\n      --namespace=osh-infra \\\n      --set controller.kind=Deployment \\\n      --set controller.ingressClassResource.enabled=\"true\" \\\n      --set controller.ingressClass=ingress-osh-infra \\\n      --set controller.podLabels.app=ingress-api\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/ingress_nginx.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Add ingress-nginx helm repo\n  become_user: \"{{ kubectl.user }}\"\n  shell: |\n    helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx\n\n- name: Deploy cluster ingress-nginx in kube-system namespace if not using metallb\n  become_user: \"{{ kubectl.user }}\"\n  when: not metallb_setup\n  shell: |\n    helm upgrade --install ingress-nginx-cluster ingress-nginx/ingress-nginx \\\n      --version {{ ingress_nginx_version }} \\\n      --namespace=kube-system \\\n      --set controller.admissionWebhooks.enabled=\"false\" \\\n      --set controller.kind=DaemonSet \\\n      --set controller.service.type=ClusterIP \\\n      --set controller.scope.enabled=\"false\" \\\n      --set controller.hostNetwork=\"true\" \\\n      --set controller.ingressClassResource.name=ingress-cluster \\\n      --set controller.ingressClassResource.controllerValue=\"k8s.io/ingress-nginx-cluster\" \\\n      --set controller.ingressClassResource.default=\"true\" \\\n      --set controller.ingressClass=ingress-cluster \\\n      --set controller.labels.app=ingress-api\n\n- name: Deploy ingress-nginx in openstack namespace\n  become_user: \"{{ kubectl.user }}\"\n  when: ingress_openstack_setup\n  shell: |\n    helm upgrade --install --create-namespace ingress-nginx-openstack ingress-nginx/ingress-nginx \\\n      --version {{ ingress_nginx_version }} \\\n      --namespace=openstack \\\n      --set controller.kind=DaemonSet \\\n      --set controller.admissionWebhooks.enabled=\"false\" \\\n      --set controller.scope.enabled=\"true\" \\\n      --set controller.service.enabled=\"false\" \\\n      --set controller.ingressClassResource.name=ingress-openstack \\\n      --set controller.ingressClassResource.controllerValue=\"k8s.io/ingress-nginx-openstack\" \\\n      --set controller.ingressClass=ingress-openstack \\\n      --set controller.labels.app=ingress-api \\\n      --set controller.allowSnippetAnnotations=true\n\n- name: Deploy ingress-nginx in ceph namespace\n  become_user: \"{{ kubectl.user }}\"\n  when: ingress_ceph_setup\n  shell: |\n    helm upgrade --install --create-namespace ingress-nginx-ceph ingress-nginx/ingress-nginx \\\n      --version {{ ingress_nginx_version }} \\\n      --namespace=ceph \\\n      --set controller.kind=DaemonSet \\\n      --set controller.admissionWebhooks.enabled=\"false\" \\\n      --set controller.scope.enabled=\"true\" \\\n      --set controller.service.enabled=\"false\" \\\n      --set controller.ingressClassResource.name=ingress-ceph \\\n      --set controller.ingressClassResource.controllerValue=\"k8s.io/ingress-nginx-ceph\" \\\n      --set controller.ingressClass=ingress-ceph \\\n      --set controller.labels.app=ingress-api \\\n      --set controller.allowSnippetAnnotations=true\n\n- name: Deploy ingress-nginx in osh-infra namespace\n  become_user: \"{{ kubectl.user }}\"\n  when: ingress_osh_infra_setup\n  shell: |\n    helm upgrade --install --create-namespace ingress-nginx-osh-infra ingress-nginx/ingress-nginx \\\n      --version {{ ingress_nginx_version }} \\\n      --namespace=osh-infra \\\n      --set controller.admissionWebhooks.enabled=\"false\" \\\n      --set controller.scope.enabled=\"true\" \\\n      --set controller.service.enabled=\"false\" \\\n      --set controller.ingressClassResource.name=ingress-osh-infra \\\n      --set controller.ingressClassResource.controllerValue=\"k8s.io/ingress-nginx-osh-infra\" \\\n      --set controller.ingressClass=ingress-osh-infra \\\n      --set controller.labels.app=ingress-api \\\n      --set controller.allowSnippetAnnotations=true\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/k8s_client.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Install Kubectl\n  apt:\n    state: present\n    update_cache: true\n    allow_downgrade: true\n    pkg:\n      - \"kubectl={{ kube_version }}\"\n\n- name: Set user home directory\n  set_fact:\n    user_home_directory: /home/{{ kubectl.user }}\n  when: kubectl.user != \"root\"\n\n- name: Set root home directory\n  set_fact:\n    user_home_directory: /root\n  when: kubectl.user == \"root\"\n\n- name: \"Setup kubeconfig directory for {{ kubectl.user }} user\"\n  shell: |\n    mkdir -p {{ user_home_directory }}/.kube\n\n- name: \"Copy kube_config file for {{ kubectl.user }} user\"\n  synchronize:\n    src: /tmp/kube_config\n    dest: \"{{ user_home_directory }}/.kube/config\"\n\n- name: \"Set kubconfig file ownership for {{ kubectl.user }} user\"\n  shell: |\n    chown -R {{ kubectl.user }}:{{ kubectl.group }} {{ user_home_directory }}/.kube\n\n- name: Deploy Helm\n  block:\n    - name: Install Helm\n      include_role:\n        name: ensure-helm\n\n    - name: Install osh helm plugin\n      become_user: \"{{ kubectl.user }}\"\n      shell: |\n        helm plugin install {{ osh_plugin_repo }}\n\n    # This is to improve build time\n    - name: Remove stable Helm repo\n      become_user: \"{{ kubectl.user }}\"\n      command: helm repo remove stable\n      ignore_errors: true\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/k8s_common.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Load necessary modules\n  modprobe:\n    name: \"{{ item }}\"\n    state: present\n  with_items:\n    - overlay\n    - br_netfilter\n\n- name: Configure sysctl\n  sysctl:\n    name: \"{{ item }}\"\n    value: \"1\"\n    state: present\n  loop:\n    - net.ipv6.conf.default.disable_ipv6\n    - net.ipv6.conf.all.disable_ipv6\n    - net.ipv6.conf.lo.disable_ipv6\n    - net.bridge.bridge-nf-call-iptables\n    - net.bridge.bridge-nf-call-ip6tables\n    - net.ipv4.ip_forward\n  ignore_errors: true\n\n# This is necessary when we run dnsmasq.\n# Otherwise, we get the error:\n# failed to create inotify: Too many open files\n- name: Configure number of inotify instances\n  sysctl:\n    name: \"fs.inotify.max_user_instances\"\n    value: \"256\"\n    state: present\n  ignore_errors: true\n\n- name: Configure number of inotify instances\n  sysctl:\n    name: \"{{ item }}\"\n    value: \"0\"\n    state: present\n  loop:\n    - net.ipv4.conf.all.rp_filter\n    - net.ipv4.conf.default.rp_filter\n  ignore_errors: true\n\n- name: Remove swapfile from /etc/fstab\n  mount:\n    name: \"{{ item }}\"\n    fstype: swap\n    state: absent\n  with_items:\n    - swap\n    - none\n\n- name: Disable swap\n  command: swapoff -a\n  when: ansible_swaptotal_mb > 0\n\n- name: Install Kubernetes binaries\n  apt:\n    state: present\n    update_cache: true\n    allow_downgrade: true\n    pkg:\n      - \"kubelet={{ kube_version }}\"\n      - \"kubeadm={{ kube_version }}\"\n      - \"kubectl={{ kube_version }}\"\n\n- name: Restart kubelet\n  service:\n    name: kubelet\n    daemon_reload: yes\n    state: restarted\n\n- name: Configure resolv.conf\n  template:\n    src: files/resolv.conf\n    dest: /etc/resolv.conf\n    owner: root\n    group: root\n    mode: 0644\n  vars:\n    nameserver_ip: \"8.8.8.8\"\n\n- name: Disable systemd-resolved\n  service:\n    name: systemd-resolved\n    enabled: false\n    state: stopped\n  ignore_errors: true\n\n- name: Disable unbound\n  service:\n    name: unbound\n    enabled: false\n    state: stopped\n  ignore_errors: true\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/k8s_control_plane.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Mount tmpfs to /var/lib/etcd\n  mount:\n    path: /var/lib/etcd\n    src: tmpfs\n    fstype: tmpfs\n    opts: size=1g\n    state: mounted\n\n- name: Prepare kubeadm config\n  template:\n    src: files/kubeadm_config.yaml.j2\n    dest: /tmp/kubeadm_config.yaml\n\n- name: Initialize the Kubernetes cluster using kubeadm\n  command: kubeadm init --config /tmp/kubeadm_config.yaml\n\n- name: Generate join command\n  command: kubeadm token create --print-join-command\n  register: join_command\n\n- name: \"Copy kube config to localhost\"\n  synchronize:\n    mode: pull\n    src: /etc/kubernetes/admin.conf\n    dest: /tmp/kube_config\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/loopback_devices.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Create loop device image\n  shell: |\n    mkdir -p {{ loopback_image | dirname }}\n    truncate -s {{ loopback_image_size }} {{ loopback_image }}\n\n- name: Create loop device\n  shell: |\n    mknod {{ loopback_device }} b $(grep loop /proc/devices | cut -c3) {{ loopback_device | regex_search('[0-9]+') }}\n\n- name: Create loop-setup systemd unit\n  template:\n    src: files/loop-setup.service\n    dest: /etc/systemd/system/loop-setup.service\n  notify:\n    - Systemd reload\n\n- name: Systemd reload\n  shell: systemctl daemon-reload\n\n- name: Configure loop-setup systemd unit\n  service:\n    name: loop-setup\n    enabled: yes\n    state: started\n  notify:\n    - Systemd reload\n    - Restart loop-setup\n\n- name: Check {{ loopback_device }} is attached\n  shell: |\n    losetup | grep -i {{ loopback_device }}\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/loopback_devices_mount.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Format loop device\n  filesystem:\n    fstype: \"{{ loopback_format_fs_type }}\"\n    dev: \"{{ loopback_device }}\"\n  when: loopback_format\n\n- name: Mount loop device\n  mount:\n    src: \"{{ loopback_device }}\"\n    path: \"{{ loopback_mount_path }}\"\n    fstype: \"{{ loopback_format_fs_type }}\"\n    state: mounted\n  when: loopback_mount\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Include prerequisites tasks\n  include_tasks:\n    file: prerequisites.yaml\n\n- name: Overlay network\n  include_tasks:\n    file: overlay.yaml\n  when: overlay_network_setup\n\n- name: Configure /etc/hosts\n  template:\n    src: files/hosts\n    dest: /etc/hosts\n\n- name: Loop devices\n  include_tasks:\n    file: loopback_devices.yaml\n  when: loopback_setup and inventory_hostname in (groups['k8s_cluster'] | default([]))\n\n- name: Loop device mount\n  include_tasks:\n    file: loopback_devices_mount.yaml\n  when: loopback_setup and inventory_hostname in (groups['k8s_cluster'] | default([]))\n\n- name: Deploy Containerd\n  include_tasks:\n    file: containerd.yaml\n\n- name: Include K8s common tasks\n  include_tasks:\n    file: k8s_common.yaml\n  when: inventory_hostname in (groups['k8s_cluster'] | default([]))\n\n- name: Include K8s control-plane tasks\n  include_tasks:\n    file: k8s_control_plane.yaml\n  when: inventory_hostname in (groups['k8s_control_plane'] | default([]))\n\n- name: Prepare kubeadm config\n  template:\n    src: files/etc_default_kubelet.j2\n    dest: /etc/default/kubelet\n  when: inventory_hostname in (groups['k8s_cluster'] | default([]))\n\n- name: Join workload nodes to cluster\n  command: \"{{ (groups['k8s_control_plane'] | map('extract', hostvars, ['join_command', 'stdout_lines', 0]))[0] }}\"\n  when: inventory_hostname in (groups['k8s_nodes'] | default([]))\n\n- name: Include K8s client tasks\n  include_tasks:\n    file: k8s_client.yaml\n  when: inventory_hostname in (groups['primary'] | default([]))\n\n- name: Include Calico tasks\n  include_tasks:\n    file: calico.yaml\n  when: calico_setup\n\n- name: Include Cilium tasks\n  include_tasks:\n    file: cilium.yaml\n  when: cilium_setup\n\n- name: Include Flannel tasks\n  include_tasks:\n    file: flannel.yaml\n  when: flannel_setup\n\n- name: Include coredns resolver tasks\n  include_tasks:\n    file: coredns_resolver.yaml\n  when: coredns_resolver_setup\n\n- name: Include floating network tasks\n  include_tasks:\n    file: floating_network.yaml\n  when:\n    - floating_network_setup\n    - inventory_hostname in (groups['k8s_control_plane'] | default([]))\n\n- name: Include Metallb tasks\n  include_tasks:\n    file: metallb.yaml\n  when: metallb_setup\n\n- name: Include client-to-cluster tunnel tasks\n  include_tasks:\n    file: client_cluster_tunnel.yaml\n  when: (groups['primary'] | difference(groups['k8s_control_plane']) | length > 0)\n\n- name: Include client-to-cluster ssh key tasks\n  include_tasks:\n    file: client_cluster_ssh.yaml\n  when: client_cluster_ssh_setup\n\n- name: Include Ingress-nginx tasks\n  include_tasks:\n    file: ingress_nginx.yaml\n  when:\n    - ingress_setup\n    - ingress_implementation == \"nginx\"\n    - inventory_hostname in (groups['primary'] | default([]))\n\n- name: Include HAProxy ingress tasks\n  include_tasks:\n    file: ingress_haproxy.yaml\n  when:\n    - ingress_setup\n    - ingress_implementation == \"haproxy\"\n    - inventory_hostname in (groups['primary'] | default([]))\n\n- name: Include Envoy Gateway tasks\n  include_tasks:\n    file: gatewayapi_envoy.yaml\n  when:\n    - gatewayapi_setup\n    - gatewayapi_implementation == \"envoy\"\n    - inventory_hostname in (groups['primary'] | default([]))\n\n- name: Include public endpoints tasks\n  include_tasks:\n    file: public_endpoints.yaml\n  when:\n    - ingress_setup or gatewayapi_setup\n    - metallb_setup\n\n- name: Include env inventory tasks\n  include_tasks:\n    file: env_inventory.yaml\n  when:\n    - inventory_hostname in (groups['primary'] | default([]))\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/metallb.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Deploy MetalLB\n  become: false\n  when: inventory_hostname in (groups['primary'] | default([]))\n  block:\n    - name: Add MetalLB chart repo\n      become_user: \"{{ kubectl.user }}\"\n      shell: |\n        helm repo add metallb https://metallb.github.io/metallb\n\n    - name: Install MetalLB\n      become_user: \"{{ kubectl.user }}\"\n      shell: |\n        helm upgrade --install metallb metallb/metallb \\\n          --version {{ metallb_version }} \\\n          --namespace metallb-system \\\n          --create-namespace \\\n          --wait --timeout 5m\n\n    - name: Wait for MetalLB webhook to become ready\n      command: kubectl -n metallb-system wait --timeout=120s --for=condition=Ready pods -l 'app.kubernetes.io/component=controller'\n\n    - name: Create MetalLB address pool\n      shell: |\n        tee > /tmp/metallb_ipaddresspool.yaml <<EOF\n        ---\n        apiVersion: metallb.io/v1beta1\n        kind: IPAddressPool\n        metadata:\n          name: public\n          namespace: metallb-system\n        spec:\n          addresses:\n            - \"{{ metallb_pool_cidr }}\"\n        EOF\n        kubectl apply -f /tmp/metallb_ipaddresspool.yaml\n\n        tee > /tmp/metallb_l2advertisement.yaml <<EOF\n        ---\n        apiVersion: metallb.io/v1beta1\n        kind: L2Advertisement\n        metadata:\n          name: public\n          namespace: metallb-system\n        spec:\n          ipAddressPools:\n            - public\n        EOF\n        kubectl apply -f /tmp/metallb_l2advertisement.yaml\n      retries: 6\n      delay: 10\n      register: metallb_pool_result\n      until: metallb_pool_result.rc == 0\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/overlay.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Create vxlan bridge\n  shell: |\n    ip link add name {{ overlay_network_bridge_name }} type bridge\n    ip link set dev {{ overlay_network_bridge_name }} up\n    ip addr add {{ overlay_network_bridge_ip }}/24 dev {{ overlay_network_bridge_name }}\n  args:\n    creates: \"/sys/class/net/{{ overlay_network_bridge_name }}\"\n\n- name: Create vxlan interface\n  shell: |\n    ip link add {{ overlay_network_vxlan_iface }} \\\n      type vxlan \\\n      id {{ overlay_network_vxlan_id }} \\\n      dev {{ overlay_network_underlay_dev }} \\\n      dstport {{ overlay_network_vxlan_port }} \\\n      local {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}\n    ip link set {{ overlay_network_vxlan_iface }} up\n    ip link set {{ overlay_network_vxlan_iface }} master {{ overlay_network_bridge_name }}\n  args:\n    creates: \"/sys/class/net/{{ overlay_network_vxlan_iface }}\"\n\n- name: Populate FDB\n  shell: |\n    bridge fdb append 00:00:00:00:00:00 \\\n      dev {{ overlay_network_vxlan_iface }} \\\n      dst {{ hostvars[item]['ansible_host'] }}\n  loop: \"{{ groups['all'] | sort }}\"\n  when: item != inventory_hostname\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/prerequisites.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Ensure APT keyrings directory exists\n  file:\n    path: /etc/apt/keyrings\n    state: directory\n    mode: \"0755\"\n\n- name: Add Kubernetes apt repository key\n  get_url:\n    url: \"https://pkgs.k8s.io/core:/stable:/{{ kube_version_repo }}/deb/Release.key\"\n    dest: /etc/apt/keyrings/kubernetes.asc\n    mode: \"0644\"\n\n- name: Add Kubernetes apt repository\n  apt_repository:\n    repo: \"deb [signed-by=/etc/apt/keyrings/kubernetes.asc] https://pkgs.k8s.io/core:/stable:/{{ kube_version_repo }}/deb/ /\"\n    state: present\n    filename: kubernetes\n\n- name: Install necessary packages\n  apt:\n    pkg:\n      - apt-transport-https\n      - bc\n      - bridge-utils\n      - ca-certificates\n      - conntrack\n      - curl\n      - ethtool\n      - git\n      - git-review\n      - gnupg2\n      - htop\n      - iptables\n      - ipvsadm\n      - jq\n      - less\n      - libffi-dev\n      - lvm2\n      - make\n      - net-tools\n      - nfs-common\n      - nmap\n      - notary\n      - python3-dev\n      - rbd-nbd\n      - socat\n      - tcpdump\n      - telnet\n      # needed for kubernetes-node-problem-detector chart\n      # which mounts /etc/localtime from the host\n      - tzdata\n      - util-linux\n      - uuid-runtime\n      - vim\n      - wireguard\n...\n"
  },
  {
    "path": "roles/deploy-env/tasks/public_endpoints.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Create openstack ingress service\n  when:\n    - ingress_setup\n    - ingress_openstack_setup\n    - inventory_hostname in (groups['primary'] | default([]))\n  become: false\n  shell: |\n    tee > /tmp/ingress_openstack_endpoint_service.yaml <<EOF\n    ---\n    apiVersion: v1\n    kind: Namespace\n    metadata:\n      labels:\n        kubernetes.io/metadata.name: openstack\n        name: openstack\n      name: openstack\n    ---\n    kind: Service\n    apiVersion: v1\n    metadata:\n      name: public-openstack\n      namespace: openstack\n      annotations:\n        metallb.universe.tf/loadBalancerIPs: \"{{ metallb_ingress_openstack_endpoint_cidr | ipaddr('address') }}\"\n    spec:\n      externalTrafficPolicy: Cluster\n      type: LoadBalancer\n      selector:\n        app: ingress-api\n      ports:\n        - name: http\n          port: 80\n        - name: https\n          port: 443\n    EOF\n    kubectl apply -f /tmp/ingress_openstack_endpoint_service.yaml\n\n- name: Create osh-infra ingress service\n  when:\n    - ingress_setup\n    - ingress_osh_infra_setup\n    - inventory_hostname in (groups['primary'] | default([]))\n  become: false\n  shell: |\n    tee > /tmp/ingress_osh_infra_endpoint_service.yaml <<EOF\n    ---\n    apiVersion: v1\n    kind: Namespace\n    metadata:\n      labels:\n        kubernetes.io/metadata.name: osh-infra\n        name: osh-infra\n      name: osh-infra\n    ---\n    kind: Service\n    apiVersion: v1\n    metadata:\n      name: public-osh-infra\n      namespace: osh-infra\n      annotations:\n        metallb.universe.tf/loadBalancerIPs: \"{{ metallb_ingress_osh_infra_endpoint_cidr | ipaddr('address') }}\"\n    spec:\n      externalTrafficPolicy: Cluster\n      type: LoadBalancer\n      selector:\n        app: ingress-api\n      ports:\n        - name: http\n          port: 80\n        - name: https\n          port: 443\n    EOF\n    kubectl apply -f /tmp/ingress_osh_infra_endpoint_service.yaml\n\n- name: Create Gateway for OpenStack services\n  when:\n    - gatewayapi_setup\n    - gatewayapi_implementation == \"envoy\"\n    - metallb_setup\n    - inventory_hostname in (groups['primary'] | default([]))\n  become: false\n  shell: |\n    tee > /tmp/gatewayapi_envoy_default.yaml <<EOF\n    ---\n    apiVersion: gateway.envoyproxy.io/v1alpha1\n    kind: EnvoyProxy\n    metadata:\n      name: gateway-proxy-default\n      namespace: envoy-gateway-system\n    spec:\n      provider:\n        type: Kubernetes\n        kubernetes:\n          envoyService:\n            type: LoadBalancer\n            externalTrafficPolicy: Cluster\n            annotations:\n              metallb.universe.tf/loadBalancerIPs: \"{{ metallb_gatewayapi_endpoint_cidr | ipaddr('address') }}\"\n            patch:\n              type: StrategicMerge\n              value:\n                spec:\n                  externalTrafficPolicy: Cluster\n    ---\n    apiVersion: gateway.networking.k8s.io/v1\n    kind: Gateway\n    metadata:\n      name: gateway-default\n      namespace: envoy-gateway-system\n    spec:\n      gatewayClassName: default\n      infrastructure:\n        parametersRef:\n          group: gateway.envoyproxy.io\n          kind: EnvoyProxy\n          name: gateway-proxy-default\n      listeners:\n        - name: http\n          protocol: HTTP\n          port: 80\n          allowedRoutes:\n            namespaces:\n              from: All\n    EOF\n    kubectl apply -f /tmp/gatewayapi_envoy_default.yaml\n\n- name: Set dnsmasq listen ip\n  set_fact:\n    nameserver_ip: >-\n      {{\n        overlay_network_prefix ~ ((groups['all'] | sort).index(groups['primary'][0]) + 1) if overlay_network_setup | bool\n        else (groups['primary'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | first)\n      }}\n\n- debug:\n    msg: \"nameserver_ip = {{ nameserver_ip }}\"\n\n- name: Start dnsmasq\n  when:\n    - inventory_hostname in (groups['primary'] | default([]))\n  docker_container:\n    name: endpoint_dnsmasq\n    image: \"{{ dnsmasq_image }}\"\n    network_mode: host\n    capabilities:\n      - NET_ADMIN\n    entrypoint: dnsmasq\n    command: >-\n      --keep-in-foreground\n      --no-hosts\n      --bind-interfaces\n      --address=\"/openstack-helm.org/{{ metallb_gatewayapi_endpoint_cidr | ipaddr('address') }}\"\n      --listen-address=\"{{ nameserver_ip }}\"\n      --no-resolv\n      --server={{ dnsmasq_dns_server }}\n      {{ dnsmasq_extra_args | default('') }}\n    state: started\n    recreate: yes\n\n- name: Configure /etc/resolv.conf\n  template:\n    src: files/resolv.conf\n    dest: /etc/resolv.conf\n    owner: root\n    group: root\n    mode: 0644\n  vars:\n    nameserver_ip: \"{{ nameserver_ip }}\"\n\n- name: Restart coredns to re-read resolv.conf changes\n  become: false\n  shell: |\n    kubectl rollout restart -n kube-system deployment/coredns\n    kubectl rollout status -n kube-system deployment/coredns --timeout=120s\n  when: inventory_hostname in (groups['primary'] | default([]))\n...\n"
  },
  {
    "path": "roles/deploy-jq/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- block:\n    - name: ensuring jq is deployed on host\n      when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' or ansible_distribution == 'Fedora'\n      include_role:\n        name: deploy-package\n        tasks_from: dist\n      vars:\n        packages:\n          deb:\n            - jq\n          rpm:\n            - jq\n\n    - name: installing jq 1.5 binary for centos\n      become: true\n      become_user: root\n      when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux'\n      get_url:\n        url: https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64\n        dest: /usr/bin/jq\n        mode: 365\n        force: yes\n...\n"
  },
  {
    "path": "roles/deploy-package/defaults/main.yml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nproxy:\n  http: null\n  https: null\n  noproxy: null\n...\n"
  },
  {
    "path": "roles/deploy-package/tasks/dist.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: managing distro packages for ubuntu\n  become: true\n  become_user: root\n  when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'\n  vars:\n    state: present\n  apt:\n    name: \"{{ item }}\"\n    state: \"{{ state }}\"\n  with_items: \"{{ packages.deb }}\"\n\n- name: managing distro packages for centos\n  become: true\n  become_user: root\n  when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux'\n  vars:\n    state: present\n  yum:\n    name: \"{{ item }}\"\n    state: \"{{ state }}\"\n  with_items: \"{{ packages.rpm }}\"\n\n- name: managing distro packages for fedora\n  become: true\n  become_user: root\n  when: ansible_distribution == 'Fedora'\n  vars:\n    state: present\n  dnf:\n    name: \"{{ item }}\"\n    state: \"{{ state }}\"\n  with_items: \"{{ packages.rpm }}\"\n...\n"
  },
  {
    "path": "roles/deploy-package/tasks/pip.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: managing pip packages\n  become: true\n  become_user: root\n  environment:\n    http_proxy: \"{{ proxy.http }}\"\n    https_proxy: \"{{ proxy.https }}\"\n    no_proxy: \"{{ proxy.noproxy }}\"\n  vars:\n    state: present\n  pip:\n    name: \"{{ item }}\"\n    state: \"{{ state }}\"\n  with_items: \"{{ packages }}\"\n...\n"
  },
  {
    "path": "roles/deploy-python/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: ensuring python3 is present on all hosts\n  raw: test -e /usr/bin/python3 || (sudo apt -y update && sudo apt install -y python3-minimal) || (sudo yum install -y python3) || (sudo dnf install -y python3)\n...\n"
  },
  {
    "path": "roles/deploy-python-pip/defaults/main.yml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nproxy:\n  http: null\n  https: null\n  noproxy: null\n...\n"
  },
  {
    "path": "roles/deploy-python-pip/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: check if pip installed\n  command: pip3 --version\n  register: pip_version_output\n  ignore_errors: yes\n  changed_when: false\n\n- name: ensuring python pip package is present for ubuntu\n  when: ( pip_version_output is failed ) and ( ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' )\n  apt:\n    name: python3-pip\n    state: present\n\n- name: ensuring python pip package is present for centos\n  when: ( pip_version_output is failed ) and ( ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux' )\n  block:\n    - name: ensuring epel-release package is present for centos as python3-pip is in the epel repo\n      yum:\n        name: epel-release\n        state: present\n    - name: ensuring python pip package is present for centos\n      yum:\n        name: python3-pip\n        state: present\n\n- name: ensuring python pip package is present for fedora via the python3-pip rpm\n  when: ( pip_version_output is failed ) and ( ansible_distribution == 'Fedora' )\n  dnf:\n    name: python3-pip\n    state: present\n\n- name: ensuring pip is the latest version\n  become: true\n  become_user: root\n  environment:\n    http_proxy: \"{{ proxy.http }}\"\n    https_proxy: \"{{ proxy.https }}\"\n    no_proxy: \"{{ proxy.noproxy }}\"\n  pip:\n    name: pip\n    state: latest\n    executable: pip3\n...\n"
  },
  {
    "path": "roles/deploy-selenium/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Create selenium configuration directory\n  file:\n    path: /etc/selenium\n    state: directory\n\n- name: Install selenium and dependencies dependencies\n  when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'\n  apt:\n    name: \"{{ packages }}\"\n  vars:\n    packages:\n      - unzip\n      - wget\n      - xvfb\n      - jq\n      - python3-selenium\n\n- name: Add google chrome signing key\n  get_url:\n    url: https://dl-ssl.google.com/linux/linux_signing_key.pub\n    dest: /etc/apt/trusted.gpg.d/google-chrome.asc\n    timeout: 120\n  retries: 10\n  delay: 5\n\n- name: Add google chrome repository\n  apt_repository:\n    repo: \"deb [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/google-chrome.asc] http://dl.google.com/linux/chrome/deb/ stable main\"\n    filename: google-chrome\n    state: present\n\n- name: Update apt acquire config\n  shell: |\n    tee /etc/apt/apt.conf.d/99retries-timeouts <<EOF\n    Acquire::Retries \"10\";\n    Acquire::https::Timeout \"120\";\n    Acquire::http::Timeout \"120\";\n    Acquire::ForceIPv4 \"true\";\n    EOF\n  args:\n    creates: /etc/apt/apt.conf.d/99retries-timeouts\n    executable: /bin/bash\n\n- name: Install google chrome\n  when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'\n  apt:\n    name: google-chrome-stable\n    update_cache: yes\n    install_recommends: false\n\n# We need to install ChromeDriver compatible with Google Chrome version\n- name: Get selenium chromedriver archive\n  shell: |-\n    set -ex\n    CHROME_VERSION=$(dpkg -s google-chrome-stable | grep -Po '(?<=^Version: ).*' | awk -F'.' '{print $1\".\"$2\".\"$3}')\n    DRIVER_URL=$(wget -qO- https://googlechromelabs.github.io/chrome-for-testing/known-good-versions-with-downloads.json | jq -r --arg chrome_version \"$CHROME_VERSION\" '.versions[] | select(.version | test($chrome_version)) | .downloads.chromedriver[] | select(.platform==\"linux64\").url' | tail -1)\n    wget -O /tmp/chromedriver.zip ${DRIVER_URL}\n  args:\n    executable: /bin/bash\n\n- name: Unarchive selenium chromedriver\n  unarchive:\n    src: /tmp/chromedriver.zip\n    dest: /etc/selenium\n    extra_opts: [\"-j\"]\n    include: [\"*/chromedriver\"]\n    remote_src: yes\n...\n"
  },
  {
    "path": "roles/describe-kubernetes-objects/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: \"creating directory for cluster scoped objects\"\n  file:\n    path: \"{{ logs_dir }}/objects/cluster\"\n    state: directory\n\n- name: \"Gathering descriptions for cluster scoped objects\"\n  shell: |-\n          set -e\n          export OBJECT_TYPE=node,clusterrole,clusterrolebinding,storageclass,namespace\n          export PARALLELISM_FACTOR=2\n\n          function list_objects () {\n            printf ${OBJECT_TYPE} | xargs -d ',' -I {} -P1 -n1 bash -c 'echo \"$@\"' _ {}\n          }\n          export -f list_objects\n\n          function name_objects () {\n            export OBJECT=$1\n            kubectl get ${OBJECT} -o name | xargs -L1 -I {} -P1 -n1 bash -c 'echo \"${OBJECT} ${1#*/}\"' _ {}\n          }\n          export -f name_objects\n\n          function get_objects () {\n            input=($1)\n            export OBJECT=${input[0]}\n            export NAME=${input[1]#*/}\n            echo \"${OBJECT}/${NAME}\"\n            DIR=\"{{ logs_dir }}/objects/cluster/${OBJECT}\"\n            mkdir -p ${DIR}\n            kubectl get ${OBJECT} ${NAME} -o yaml > \"${DIR}/${NAME}.yaml\"\n            kubectl describe ${OBJECT} ${NAME} > \"${DIR}/${NAME}.txt\"\n          }\n          export -f get_objects\n\n          list_objects | \\\n            xargs -r -n 1 -P ${PARALLELISM_FACTOR} -I {} bash -c 'name_objects \"$@\"' _ {} | \\\n            xargs -r -n 1 -P ${PARALLELISM_FACTOR} -I {} bash -c 'get_objects \"$@\"' _ {}\n  args:\n    executable: /bin/bash\n  ignore_errors: True\n\n- name: \"creating directory for namespace scoped objects\"\n  file:\n    path: \"{{ logs_dir }}/objects/namespaced\"\n    state: directory\n\n- name: \"Gathering descriptions for namespace scoped objects\"\n  shell: |-\n          set -e\n          export OBJECT_TYPE=configmaps,cronjobs,daemonsets,deployment,endpoints,ingresses,jobs,networkpolicies,pods,podsecuritypolicies,persistentvolumeclaims,rolebindings,roles,secrets,serviceaccounts,services,statefulsets\n          export PARALLELISM_FACTOR=2\n          function get_namespaces () {\n            kubectl get namespaces -o name | awk -F '/' '{ print $NF }'\n          }\n\n          function list_namespaced_objects () {\n            export NAMESPACE=$1\n            printf ${OBJECT_TYPE} | xargs -d ',' -I {} -P1 -n1 bash -c 'echo \"${NAMESPACE} $@\"' _ {}\n          }\n          export -f list_namespaced_objects\n\n          function name_objects () {\n            input=($1)\n            export NAMESPACE=${input[0]}\n            export OBJECT=${input[1]}\n            kubectl get -n ${NAMESPACE} ${OBJECT} -o name | xargs -L1 -I {} -P1 -n1 bash -c 'echo \"${NAMESPACE} ${OBJECT} $@\"' _ {}\n          }\n          export -f name_objects\n\n          function get_objects () {\n            input=($1)\n            export NAMESPACE=${input[0]}\n            export OBJECT=${input[1]}\n            export NAME=${input[2]#*/}\n            echo \"${NAMESPACE}/${OBJECT}/${NAME}\"\n            DIR=\"{{ logs_dir }}/objects/namespaced/${NAMESPACE}/${OBJECT}\"\n            mkdir -p ${DIR}\n            kubectl get -n ${NAMESPACE} ${OBJECT} ${NAME} -o yaml > \"${DIR}/${NAME}.yaml\"\n            kubectl describe -n ${NAMESPACE} ${OBJECT} ${NAME} > \"${DIR}/${NAME}.txt\"\n          }\n          export -f get_objects\n\n          get_namespaces | \\\n            xargs -r -n 1 -P ${PARALLELISM_FACTOR} -I {} bash -c 'list_namespaced_objects \"$@\"' _ {} | \\\n            xargs -r -n 1 -P ${PARALLELISM_FACTOR} -I {} bash -c 'name_objects \"$@\"' _ {} | \\\n            xargs -r -n 1 -P ${PARALLELISM_FACTOR} -I {} bash -c 'get_objects \"$@\"' _ {}\n  args:\n    executable: /bin/bash\n  ignore_errors: True\n\n- name: \"Downloads logs to executor\"\n  synchronize:\n    src: \"{{ logs_dir }}/objects\"\n    dest: \"{{ zuul.executor.log_root }}/{{ inventory_hostname }}\"\n    mode: pull\n  ignore_errors: yes\n...\n"
  },
  {
    "path": "roles/disable-local-nameserver/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# NOTE(portdirect): We disable the local nameserver as it interferes with the\n# k8s dns-service and other local resolvers used for development use.\n# See the following for the original config:\n# * https://github.com/openstack/project-config/blob/0332c33dd134033e0620645c252f82b77e4c16f5/nodepool/elements/nodepool-base/finalise.d/89-unbound\n\n---\n- name: Disable local nameserver and systemd-resolved service\n  when: ansible_distribution == 'Ubuntu'\n  block:\n    - name: update rc.local\n      blockinfile:\n        path: /etc/rc.local\n        mode: 365\n        block: |\n          #!/bin/bash\n          set -o xtrace\n          # Some providers inject dynamic network config statically. Work around this\n          # for DNS nameservers. This is expected to fail on some nodes so remove -e.\n          set +e\n          sed -i -e 's/^\\(DNS[0-9]*=[.0-9]\\+\\)/#\\1/g' /etc/sysconfig/network-scripts/ifcfg-*\n          sed -i -e 's/^NETCONFIG_DNS_POLICY=.*/NETCONFIG_DNS_POLICY=\"\"/g' /etc/sysconfig/network/config\n          set -e\n          echo 'nameserver 208.67.222.222' > /etc/resolv.conf\n          echo 'nameserver 8.8.8.8' >> /etc/resolv.conf\n          exit 0\n    - name: write resolv.conf\n      blockinfile:\n        path: /etc/resolv.conf\n        mode: 644\n        block: |\n          nameserver 208.67.222.222\n          nameserver 8.8.8.8\n    - name: stop unbound service\n      systemd:\n        state: stopped\n        enabled: no\n        masked: yes\n        daemon_reload: yes\n        name: unbound\n    - name: stop systemd-resolved service\n      systemd:\n        state: stopped\n        enabled: no\n        masked: yes\n        daemon_reload: yes\n        name: systemd-resolved\n...\n"
  },
  {
    "path": "roles/enable-hugepages/defaults/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nhugepages:\n  enabled: false\n# This parameter sets the size of the huge pages, available options: 2M and 1G\n  size: \"2M\"\n# This parameter sets the number of huge pages to allocate\n  number: 1024\ngrub_default_config: \"/etc/default/grub\"\n...\n"
  },
  {
    "path": "roles/enable-hugepages/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Set up 1G hugepages\n  become: true\n  block:\n    - name: Configure grub\n      lineinfile:\n        dest: \"{{ grub_default_config }}\"\n        line: 'GRUB_CMDLINE_LINUX=\"default_hugepagesz={{ hugepages.size }} hugepagesz={{ hugepages.size }} hugepages={{ hugepages.number }}\"'\n        regexp: '^GRUB_CMDLINE_LINUX=\"'\n    - name: Update grub configuration\n      command: update-grub2\n    - name: Reboot host\n      reboot:\n        reboot_timeout: 600\n  when: hugepages.size == \"1G\"\n\n- name: Set up 2M hugepages\n  become: true\n  sysctl:\n    name: vm.nr_hugepages\n    value: \"{{ hugepages.number }}\"\n    sysctl_set: true\n    reload: true\n  when: hugepages.size == \"2M\"\n...\n"
  },
  {
    "path": "roles/ensure-chart-testing/README.rst",
    "content": "Ensure chart-testing is installed\n\n**Role Variables**\n\n.. zuul:rolevar:: chart_testing_version\n\n   Version of chart-testing to install.\n\n.. zuul:rolevar:: ensure_chart_testing_repo_name_helm_chart\n   :default: https://github.com/helm/chart-testing/releases/download\n\n   The root location to get the chart testing helm chart.\n\n.. zuul:rolevar:: ensure_chart_testing_repo_name_config\n   :default: https://raw.githubusercontent.com/helm/chart-testing\n\n   The root location to get the chart testing configuration files.\n"
  },
  {
    "path": "roles/ensure-chart-testing/defaults/main.yaml",
    "content": "---\nchart_testing_version: 2.4.0\nensure_chart_testing_repo_name_helm_chart: \"https://github.com/helm/chart-testing/releases/download\"\nensure_chart_testing_repo_name_config: \"https://raw.githubusercontent.com/helm/chart-testing\"\nvirtualenv: \"{{ ansible_user_dir }}/venv\""
  },
  {
    "path": "roles/ensure-chart-testing/tasks/main.yaml",
    "content": "---\n- name: Install pip\n  include_role:\n    name: ensure-pip\n\n- name: Install Python dependencies\n  become: false\n  pip:\n    name:\n    - yamale\n    - yamllint\n    virtualenv: \"{{ virtualenv }}\"\n    virtualenv_command: python3 -m venv\n\n- name: Install chart-testing\n  become: true\n  unarchive:\n    remote_src: true\n    src: \"{{ ensure_chart_testing_repo_name_helm_chart }}/v{{ chart_testing_version }}/chart-testing_{{ chart_testing_version }}_linux_amd64.tar.gz\"\n    dest: /usr/local/bin\n\n- name: Setup /etc/ct\n  become: true\n  file:\n    path: /etc/ct\n    state: directory\n    mode: 0755\n\n- name: Install configuration files\n  become: true\n  get_url:\n    url: \"{{ ensure_chart_testing_repo_name_config }}/v{{ chart_testing_version }}/etc/{{ zj_item }}\"\n    dest: \"/etc/ct/{{ zj_item }}\"\n  loop:\n  - chart_schema.yaml\n  - lintconf.yaml\n  loop_control:\n    loop_var: zj_item\n"
  },
  {
    "path": "roles/gather-host-logs/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: \"creating directory for system status\"\n  file:\n    path: \"{{ logs_dir }}/system\"\n    state: directory\n\n- name: \"Get logs for each host\"\n  become: yes\n  shell: |-\n          set -x\n          systemd-cgls --full --all --no-pager > {{ logs_dir }}/system/systemd-cgls.txt\n          ip addr > {{ logs_dir }}/system/ip-addr.txt\n          ip route > {{ logs_dir }}/system/ip-route.txt\n          lsblk > {{ logs_dir }}/system/lsblk.txt\n          mount > {{ logs_dir }}/system/mount.txt\n          docker images > {{ logs_dir }}/system/docker-images.txt\n          brctl show > {{ logs_dir }}/system/brctl-show.txt\n          ps aux --sort=-%mem > {{ logs_dir }}/system/ps.txt\n          dpkg -l > {{ logs_dir }}/system/packages.txt\n          CONTAINERS=($(docker ps -a --format {% raw %}'{{ .Names }}'{% endraw %} --filter label=zuul))\n          if [ ! -z \"$CONTAINERS\" ]; then\n            mkdir -p \"{{ logs_dir }}/system/containers\"\n            for CONTAINER in ${CONTAINERS}; do\n              docker logs \"${CONTAINER}\" > \"{{ logs_dir }}/system/containers/${CONTAINER}.txt\"\n            done\n          fi\n  args:\n    executable: /bin/bash\n  ignore_errors: True\n\n- name: \"Downloads logs to executor\"\n  synchronize:\n    src: \"{{ logs_dir }}/system\"\n    dest: \"{{ zuul.executor.log_root }}/{{ inventory_hostname }}\"\n    mode: pull\n  ignore_errors: True\n...\n"
  },
  {
    "path": "roles/gather-pod-logs/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: \"creating directory for pod logs\"\n  file:\n    path: \"{{ logs_dir }}/pod-logs\"\n    state: directory\n\n- name: \"creating directory for failed pod logs\"\n  file:\n    path: \"{{ logs_dir }}/pod-logs/failed-pods\"\n    state: directory\n\n- name: \"retrieve all kubernetes logs, current and previous (if they exist)\"\n  shell: |-\n          set -e\n          function get_namespaces () {\n            kubectl get namespaces -o name | awk -F '/' '{ print $NF }'\n          }\n          function get_pods () {\n            NAMESPACE=$1\n            kubectl get pods -n ${NAMESPACE} -o name | awk -F '/' '{ print $NF }' | xargs -I {} echo ${NAMESPACE} {}\n          }\n          export -f get_pods\n          function get_pod_logs () {\n            NAMESPACE=${1% *}\n            POD=${1#* }\n            INIT_CONTAINERS=$(kubectl get pod $POD -n ${NAMESPACE} -o json | jq -r '.spec.initContainers[]?.name')\n            CONTAINERS=$(kubectl get pod $POD -n ${NAMESPACE} -o json | jq -r '.spec.containers[].name')\n            for CONTAINER in ${INIT_CONTAINERS} ${CONTAINERS}; do\n              echo \"${NAMESPACE}/${POD}/${CONTAINER}\"\n              mkdir -p \"{{ logs_dir }}/pod-logs/${NAMESPACE}/${POD}\"\n              mkdir -p \"{{ logs_dir }}/pod-logs/failed-pods/${NAMESPACE}/${POD}\"\n              kubectl logs ${POD} -n ${NAMESPACE} -c ${CONTAINER} > \"{{ logs_dir }}/pod-logs/${NAMESPACE}/${POD}/${CONTAINER}.txt\"\n              kubectl logs --previous ${POD} -n ${NAMESPACE} -c ${CONTAINER} > \"{{ logs_dir }}/pod-logs/failed-pods/${NAMESPACE}/${POD}/${CONTAINER}.txt\"\n            done\n          }\n          export -f get_pod_logs\n          get_namespaces | \\\n            xargs -r -I {} bash -c 'get_pods \"$@\"' _ {} | \\\n            xargs -r -I {} bash -c 'get_pod_logs \"$@\"' _ {}\n  args:\n    executable: /bin/bash\n  ignore_errors: True\n\n- name: \"Downloads pod logs to executor\"\n  synchronize:\n    src: \"{{ logs_dir }}/pod-logs\"\n    dest: \"{{ zuul.executor.log_root }}/{{ inventory_hostname }}\"\n    mode: pull\n  ignore_errors: True\n...\n"
  },
  {
    "path": "roles/gather-prom-metrics/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: \"creating directory for helm release descriptions\"\n  file:\n    path: \"{{ logs_dir }}/prometheus\"\n    state: directory\n\n- name: \"Get metrics from exporter services in all namespaces\"\n  shell: |-\n          set -e\n          NAMESPACES=$(kubectl get namespaces -o json | jq -r '.items[].metadata.name')\n          for NS in $NAMESPACES; do\n            SERVICES=$(kubectl get svc -n $NS -o json | jq -r '.items[] | select(.spec.ports[].name==\"metrics\") | .metadata.name')\n            for SVC in $SERVICES; do\n              PORT=$(kubectl get svc $SVC -n $NS -o json | jq -r '.spec.ports[] | select(.name==\"metrics\") | .port')\n              echo \"Scraping $SVC.$NS:$PORT/metrics:\"\n              curl \"$SVC.$NS:$PORT/metrics\" >> \"{{ logs_dir }}\"/prometheus/$NS-$SVC.txt || true\n            done\n          done\n  args:\n    executable: /bin/bash\n  ignore_errors: True\n\n- name: \"Get ceph metrics from ceph-mgr\"\n  shell: |-\n          set -e\n          mgr_endpoints=$(kubectl get endpoints -n ceph -l component=manager -o json | jq -r '.items[].subsets[].addresses[].ip')\n          echo \"ceph-mgr endpoints: $mgr_endpoints\"\n          for endpoint in $mgr_endpoints; do\n            echo \"checking ceph-mgr at $endpoint\"\n            metrics_curl=\"curl $endpoint:9283/metrics\"\n            op=$(eval \"$metrics_curl\")\n            if [[ -n $op ]]; then\n              curl $endpoint:9283/metrics >> \"{{ logs_dir }}\"/prometheus/ceph-ceph-mgr.txt\n              break\n            else\n              echo \"$endpoint is a standby ceph-mgr. Trying next endpoint\"\n            fi\n          done\n  args:\n    executable: /bin/bash\n  ignore_errors: True\n\n- name: \"Get metrics from fluentd pods\"\n  shell: |-\n          set -e\n          NAMESPACE=\"osh-infra\"\n          APP_LABEL=\"fluentd\"\n          PODS=$(kubectl get pods -n $NAMESPACE -l application=$APP_LABEL -o json | jq -r '.items[].metadata.name')\n          for POD in $PODS; do\n            IP=$(kubectl get pod -n $NAMESPACE $POD -o json | jq -r '.status.podIP')\n            PORT=$(kubectl get pod -n $NAMESPACE $POD -o json |  jq -r '.spec.containers[0].ports[] | select(.name==\"metrics\") | .containerPort')\n            echo \"Scraping $POD at $IP:$PORT/metrics\"\n            curl \"$IP:$PORT/metrics\" >> \"{{ logs_dir }}\"/prometheus/$POD.txt || true\n          done\n  args:\n    executable: /bin/bash\n  ignore_errors: True\n\n- name: \"Downloads logs to executor\"\n  synchronize:\n    src: \"{{ logs_dir }}/prometheus\"\n    dest: \"{{ zuul.executor.log_root }}/{{ inventory_hostname }}\"\n    mode: pull\n  ignore_errors: True\n...\n"
  },
  {
    "path": "roles/gather-selenium-data/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: \"creating directory for helm release descriptions\"\n  file:\n    path: \"{{ logs_dir }}/selenium\"\n    state: directory\n\n- name: \"Get selenium data\"\n  shell: |-\n          set -x\n          cp /tmp/artifacts/* {{ logs_dir }}/selenium/.\n  args:\n    executable: /bin/bash\n  ignore_errors: True\n\n- name: \"Downloads logs to executor\"\n  synchronize:\n    src: \"{{ logs_dir }}/selenium\"\n    dest: \"{{ zuul.executor.log_root }}/{{ inventory_hostname }}\"\n    mode: pull\n  ignore_errors: True\n...\n"
  },
  {
    "path": "roles/helm-release-status/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: \"creating directory for helm release status\"\n  file:\n    path: \"{{ logs_dir }}/helm/{{ directory }}\"\n    state: directory\n  loop_control:\n    loop_var: directory\n  with_items:\n    - values\n    - releases\n\n- name: \"Gather get release status for helm charts\"\n  shell: |-\n          set -e\n\n          for namespace in $(kubectl get namespaces --no-headers --output custom-columns=\":metadata.name\"); do\n                # get all Helm releases including pending and failed releases\n                for release in $(helm list --all --short --namespace $namespace); do\n                        # Make respective directories only when a Helm release actually exists in the namespace\n                        # to prevent uploading a bunch of empty directories for namespaces without a Helm release.\n                        mkdir -p {{ logs_dir }}/helm/releases/$namespace\n                        mkdir -p {{ logs_dir }}/helm/values/$namespace\n\n                        helm status $release --namespace $namespace >> {{ logs_dir }}/helm/releases/$namespace/$release.txt\n                        helm get values $release --namespace $namespace --all >> {{ logs_dir }}/helm/values/$namespace/$release.yaml\n                done\n          done\n  args:\n    executable: /bin/bash\n  ignore_errors: True\n\n- name: \"Downloads logs to executor\"\n  synchronize:\n    src: \"{{ logs_dir }}/helm\"\n    dest: \"{{ zuul.executor.log_root }}/{{ inventory_hostname }}\"\n    mode: pull\n  ignore_errors: True\n...\n"
  },
  {
    "path": "roles/mount-extra-volume/defaults/main.yml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nextra_volume:\n  size: 80G\n  type: Linux\n  mount_point: /opt/ext_vol\n...\n"
  },
  {
    "path": "roles/mount-extra-volume/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Mount additional {{ extra_volume.size }} volume if available\n  when:\n    - ansible_distribution == 'Ubuntu'\n    - (ansible_mounts|selectattr(\"mount\", \"equalto\", \"/\")|list)[0].size_available < 50000000000\n  block:\n    - name: Mount additional {{ extra_volume.size }} volume if available\n      shell: |\n        set -ex\n        sudo fdisk --list\n        df -h\n        sudo mkdir -p ${EXTRA_VOLUME_MOUNT_POINT}\n        BIG_VOLUME=$(sudo fdisk -l 2>&1 | grep  -E ${EXTRA_VOLUME_SIZE} | grep ${EXTRA_VOLUME_TYPE} | awk '{print $1}')\n        if ! mount | grep \"${BIG_VOLUME}\"\n        then\n          sudo mkfs.ext4 \"${BIG_VOLUME}\"\n          sudo mount \"${BIG_VOLUME}\" ${EXTRA_VOLUME_MOUNT_POINT}\n          df -h\n        fi\n      environment:\n        EXTRA_VOLUME_MOUNT_POINT: \"{{ extra_volume.mount_point }}\"\n        EXTRA_VOLUME_SIZE: \"{{ extra_volume.size }}\"\n        EXTRA_VOLUME_TYPE: \"{{ extra_volume.type }}\"\n...\n"
  },
  {
    "path": "roles/osh-bandit/defaults/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nwork_dir: \"{{ zuul.project.src_dir }}\"\nhelm_version: \"3.18.1\"\nbandit_version: \"1.7.1\"\n...\n"
  },
  {
    "path": "roles/osh-bandit/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Install Helm\n  include_role:\n    name: ensure-helm\n\n- name: Install binary packages\n  become: true\n  apt:\n    name:\n      - jq\n    state: present\n    update_cache: yes\n\n- name: Install yq bandit\n  pip:\n    name:\n      - yq\n      - bandit=={{ bandit_version }}\n      - setuptools\n      - pbr\n    virtualenv: \"{{ virtualenv }}\"\n    virtualenv_command: python3 -m venv\n\n- name: Template out python files\n  shell: |\n    set -xe;\n    source \"{{ virtualenv }}/bin/activate\"\n    make all SKIP_CHANGELOG=1\n    mkdir -p python-files\n    EXCLUDES=\"helm-toolkit doc tests tools logs tmp roles playbooks releasenotes zuul.d python-files\"\n    DIRS=`ls -d */ | cut -f1 -d'/'`\n\n    for EX in $EXCLUDES; do\n      DIRS=`echo $DIRS | sed \"s/\\b$EX\\b//g\"`\n    done\n\n    for DIR in $DIRS; do\n      PYFILES=$(helm template $DIR | yq 'select(.data != null) | .data | to_entries | map(select(.key | test(\".*\\\\.py\"))) | select(length > 0) | values[] | {(.key) : (.value)}' | jq -s add)\n      PYKEYS=$(echo \"$PYFILES\" | jq -r 'select(. != null) | keys[]')\n      for KEY in $PYKEYS; do\n        echo \"$PYFILES\" | jq -r --arg KEY \"$KEY\" '.[$KEY]' > ./python-files/\"$DIR-$KEY\"\n      done\n    done\n  args:\n    chdir: \"{{ work_dir }}\"\n    executable: /bin/bash\n\n- name: Run bandit against python files\n  shell: |\n    source \"{{ virtualenv }}/bin/activate\"\n    bandit -r ./python-files -s B404,B603\n  args:\n    chdir: \"{{ work_dir }}\"\n    executable: /bin/bash\n...\n"
  },
  {
    "path": "roles/osh-run-script/defaults/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nceph_osd_data_device: \"/dev/loop0\"\nkubeadm:\n  pod_network_cidr: \"10.244.0.0/16\"\nosh_params:\n  container_distro_name: ubuntu\n  container_distro_version: jammy\nosh_values_overrides_path: \"../openstack-helm/values_overrides\"\ngate_scripts_relative_path: \"../openstack-helm\"\n...\n"
  },
  {
    "path": "roles/osh-run-script/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: \"Run script {{ workload[0] }}\"\n  shell: |\n    set -xe;\n    env\n    {{ gate_script_path }}\n  vars:\n    gate_script_path: \"{{ workload[0] }}\"\n  args:\n    chdir: \"{{ zuul.project.src_dir }}/{{ gate_scripts_relative_path }}\"\n  environment:\n    CEPH_OSD_DATA_DEVICE: \"{{ ceph_osd_data_device }}\"\n    POD_NETWORK_CIDR: \"{{ kubeadm.pod_network_cidr }}\"\n    zuul_site_mirror_fqdn: \"{{ zuul_site_mirror_fqdn }}\"\n    OSH_EXTRA_HELM_ARGS: \"{{ zuul_osh_extra_helm_args | default('') }}\"\n    OSH_HELM_REPO: \"{{ osh_helm_repo | default('../openstack-helm') }}\"\n    DOWNLOAD_OVERRIDES: \"{{ download_overrides | default('') }}\"\n    OSH_PATH: \"{{ zuul_osh_relative_path | default('../openstack-helm/') }}\"\n    OSH_VALUES_OVERRIDES_PATH: \"{{ osh_values_overrides_path }}\"\n    OPENSTACK_RELEASE: \"{{ osh_params.openstack_release | default('') }}\"\n    CONTAINER_DISTRO_NAME: \"{{ osh_params.container_distro_name | default('') }}\"\n    CONTAINER_DISTRO_VERSION: \"{{ osh_params.container_distro_version | default('') }}\"\n    FEATURES: \"{{ osh_params.feature_gates | default('') | regex_replace(',', ' ')  }} {{ osh_params.openstack_release | default('') }} {{ osh_params.container_distro_name | default('') }}_{{ osh_params.container_distro_version | default('') }} {{ osh_params.container_distro_name | default('') }}\"\n    RUN_HELM_TESTS: \"{{ run_helm_tests | default('yes') }}\"\n...\n"
  },
  {
    "path": "roles/osh-run-script-set/defaults/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nceph_osd_data_device: \"/dev/loop0\"\nkubeadm:\n  pod_network_cidr: \"10.244.0.0/16\"\nosh_params:\n  container_distro_name: ubuntu\n  container_distro_version: jammy\nosh_values_overrides_path: \"../openstack-helm/values_overrides\"\ngate_scripts_relative_path: \"../openstack-helm\"\n...\n"
  },
  {
    "path": "roles/osh-run-script-set/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- block:\n    - name: \"Run script set {{ workload }}\"\n      shell: |\n        set -xe;\n        env\n        {{ gate_script_path }}\n      loop: \"{{ workload }}\"\n      loop_control:\n        loop_var: gate_script_path\n        pause: 5\n      args:\n        chdir: \"{{ zuul.project.src_dir }}/{{ gate_scripts_relative_path }}\"\n      environment:\n        CEPH_OSD_DATA_DEVICE: \"{{ ceph_osd_data_device }}\"\n        POD_NETWORK_CIDR: \"{{ kubeadm.pod_network_cidr }}\"\n        zuul_site_mirror_fqdn: \"{{ zuul_site_mirror_fqdn }}\"\n        OSH_EXTRA_HELM_ARGS: \"{{ zuul_osh_extra_helm_args | default('') }}\"\n        OSH_HELM_REPO: \"{{ osh_helm_repo | default('../openstack-helm/') }}\"\n        DOWNLOAD_OVERRIDES: \"{{ download_overrides | default('') }}\"\n        OSH_PATH: \"{{ zuul_osh_relative_path | default('../openstack-helm/') }}\"\n        OSH_VALUES_OVERRIDES_PATH: \"{{ osh_values_overrides_path }}\"\n        OPENSTACK_RELEASE: \"{{ osh_params.openstack_release | default('') }}\"\n        CONTAINER_DISTRO_NAME: \"{{ osh_params.container_distro_name | default('') }}\"\n        CONTAINER_DISTRO_VERSION: \"{{ osh_params.container_distro_version | default('') }}\"\n        FEATURES: \"{{ osh_params.feature_gates | default('') | regex_replace(',', ' ')  }} {{ osh_params.openstack_release | default('') }} {{ osh_params.container_distro_name | default('') }}_{{ osh_params.container_distro_version | default('') }} {{ osh_params.container_distro_name | default('') }}\"\n        RUN_HELM_TESTS: \"{{ run_helm_tests | default('yes') }}\"\n      # NOTE(aostapenko) using bigger than async_status timeout due to async_status issue with\n      # not recognizing timed out jobs: https://github.com/ansible/ansible/issues/25637\n      async: 3600\n      poll: 0\n      register: async_results\n\n    - name: Wait for script set to finish\n      async_status:\n        jid: '{{ item.ansible_job_id }}'\n      register: jobs\n      until: jobs.finished\n      delay: 5\n      retries: 360\n      loop: \"{{ async_results.results }}\"\n\n  always:\n    - name: Print script set output\n      shell: |\n          # NOTE(aostapenko) safely retrieving items for the unlikely case if jobs timed out in async_status\n          echo 'STDOUT:\\n{{ item.get(\"stdout\") | regex_replace(\"\\'\", \"\") }}\\nSTDERR:\\n{{ item.get(\"stderr\") | regex_replace(\"\\'\", \"\") }}'\n      loop: \"{{ jobs.results }}\"\n...\n"
  },
  {
    "path": "roles/override-images/defaults/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nwork_dir: \"{{ zuul.project.src_dir }}\"\n...\n"
  },
  {
    "path": "roles/override-images/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Use buildset registry\n  include_role:\n    name: use-buildset-registry\n\n- name: Print zuul\n  debug:\n    var: zuul\n\n- name: Override proposed images from artifacts\n  shell: >\n    find {{ override_paths | join(\" \") }} -type f -exec sed -Ei\n    \"s#['\\\"]?docker\\.io/({{ repo }}):({{ tag }})['\\\"]?\\$#{{ buildset_registry_alias }}:{{ buildset_registry.port }}/\\1:\\2#g\" {} +\n  loop: \"{{ zuul.artifacts | default([]) }}\"\n  args:\n    chdir: \"{{ work_dir }}\"\n  loop_control:\n    loop_var: zj_zuul_artifact\n  when: \"'metadata' in zj_zuul_artifact and zj_zuul_artifact.metadata.type | default('') == 'container_image'\"\n  vars:\n    tag: \"{{ zj_zuul_artifact.metadata.tag }}\"\n    repo: \"{{ zj_zuul_artifact.metadata.repository }}\"\n    override_paths:\n      - ../openstack-helm*/*/values*\n      - ../openstack-helm/tools/deployment/\n\n- name: Diff\n  shell: |\n      set -ex;\n      for dir in openstack-helm; do\n        path=\"{{ work_dir }}/../${dir}/\"\n        if [ ! -d \"${path}\" ]; then continue; fi\n        echo \"${dir} diff\"\n        cd \"${path}\"; git diff; cd -;\n      done\n...\n"
  },
  {
    "path": "roles/setup-firewall/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# NOTE(portdirect): This needs refinement but drops the firewall on zuul nodes\n---\n- name: deploy iptables packages\n  include_role:\n    name: deploy-package\n    tasks_from: dist\n  vars:\n    packages:\n      deb:\n        - iptables\n      rpm:\n        - iptables\n- command: iptables -S\n- command: iptables -F\n- command: iptables -P INPUT ACCEPT\n- command: iptables -S\n...\n"
  },
  {
    "path": "roles/upgrade-host/defaults/main.yml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nubuntu_kernel_hwe: false\n...\n"
  },
  {
    "path": "roles/upgrade-host/tasks/main.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Upgrade to HWE kernel on Ubuntu Hosts\n  when:\n    - ansible_distribution == 'Ubuntu'\n    - ubuntu_kernel_hwe == true\n  block:\n    - name: Deploy HWE kernel on Ubuntu Hosts\n      include_role:\n        name: deploy-package\n        tasks_from: dist\n      vars:\n        packages:\n          deb:\n            - linux-generic-hwe-16.04\n    - name: Reboot Host following kernel upgrade\n      shell: sleep 2 && reboot\n      become: yes\n      async: 30\n      poll: 0\n      ignore_errors: true\n      args:\n        executable: /bin/bash\n    - name: Wait for hosts to come up following reboot\n      wait_for:\n        host: '{{ hostvars[item].ansible_host }}'\n        port: 22\n        state: started\n        delay: 60\n        timeout: 240\n      with_items: '{{ play_hosts }}'\n      connection: local\n...\n"
  },
  {
    "path": "skyline/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Skyline\nname: skyline\nversion: 2025.2.0\nhome: https://docs.openstack.org/skyline-apiserver/latest/\nsources:\n  - https://opendev.org/openstack/skyline-apiserver\n  - https://opendev.org/openstack/skyline-console\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "skyline/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n# FIXME: Add db sync executable endpoint to skyline-apiserver package and use it here\nsite_packages_dir=$(python -c 'import sysconfig; print(sysconfig.get_paths()[\"purelib\"])')\nalembic -c ${site_packages_dir}/skyline_apiserver/db/alembic/alembic.ini upgrade head\n"
  },
  {
    "path": "skyline/templates/bin/_skyline-apiserver-init.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nGENERATOR_ARGS=\"--output-file /etc/nginx/nginx.conf\"\nskyline-nginx-generator ${GENERATOR_ARGS}\n"
  },
  {
    "path": "skyline/templates/bin/_skyline-apiserver.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\ngunicorn -c /etc/skyline/gunicorn.py skyline_apiserver.main:app\n"
  },
  {
    "path": "skyline/templates/certificates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.certificates -}}\n{{ dict \"envAll\" . \"service\" \"skyline\" \"type\" \"internal\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end -}}\n"
  },
  {
    "path": "skyline/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: skyline-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  skyline-apiserver-init.sh: |\n{{ tuple \"bin/_skyline-apiserver-init.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  skyline-apiserver.sh: |\n{{ tuple \"bin/_skyline-apiserver.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "skyline/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- if (.Values.global).subchart_release_name }}\n{{- $_ := set . \"deployment_name\" .Chart.Name }}\n{{- else }}\n{{- $_ := set . \"deployment_name\" .Release.Name }}\n{{- end }}\n\n{{- define \"skyline.configmap.etc\" }}\n{{- $configMapName := index . 0 }}\n{{- $envAll := index . 1 }}\n{{- with $envAll }}\n\n{{- if empty .Values.conf.skyline.openstack.keystone_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.skyline.openstack \"keystone_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.skyline.openstack.default_region -}}\n{{- $_ := set .Values.conf.skyline.openstack \"default_region\" .Values.endpoints.identity.auth.skyline.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.skyline.openstack.system_project -}}\n{{- $_ := set .Values.conf.skyline.openstack \"system_project\" .Values.endpoints.identity.auth.skyline.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.skyline.openstack.system_project_domain -}}\n{{- $_ := set .Values.conf.skyline.openstack \"system_project_domain\" .Values.endpoints.identity.auth.skyline.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.skyline.openstack.system_user_domain -}}\n{{- $_ := set .Values.conf.skyline.openstack \"system_user_domain\" .Values.endpoints.identity.auth.skyline.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.skyline.openstack.system_user_name -}}\n{{- $_ := set .Values.conf.skyline.openstack \"system_user_name\" .Values.endpoints.identity.auth.skyline.username -}}\n{{- end -}}\n{{- if empty .Values.conf.skyline.openstack.system_user_password -}}\n{{- $_ := set .Values.conf.skyline.openstack \"system_user_password\" .Values.endpoints.identity.auth.skyline.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.skyline.default.database_url -}}\n{{- $connection := tuple \"oslo_db\" \"skyline\" \"skyline\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | set .Values.conf.skyline.default.database_url \"connection\" -}}\n{{- else -}}\n{{- $_ := set .Values.conf.skyline.default \"database_url\" $connection -}}\n{{- end -}}\n{{- end -}}\n\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $configMapName }}\ntype: Opaque\ndata:\n  skyline.yaml: {{ .Values.conf.skyline | toYaml | b64enc }}\n  gunicorn.py: {{ .Values.conf.gunicorn | b64enc }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- list \"skyline-etc\" . | include \"skyline.configmap.etc\" }}\n{{- end }}\n"
  },
  {
    "path": "skyline/templates/deployment.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment }}\n{{- $envAll := . }}\n\n{{- $mounts_skyline := .Values.pod.mounts.skyline.skyline }}\n{{- $mounts_skyline_init := .Values.pod.mounts.skyline.init_container }}\n\n{{- $serviceAccountName := \"skyline\" }}\n{{ tuple $envAll \"skyline\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: skyline\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"skyline\" \"skyline\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.skyline }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"skyline\" \"skyline\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"skyline\" \"skyline\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"skyline\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"skyline\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      nodeSelector:\n        {{ .Values.labels.skyline.node_selector_key }}: {{ .Values.labels.skyline.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"skyline\" $mounts_skyline_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: api-init\n{{ tuple $envAll \"skyline\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n          command:\n            - /bin/sh\n            - -c\n            - /tmp/skyline-apiserver-init.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: skyline-bin\n              mountPath: /tmp/skyline-apiserver-init.sh\n              subPath: skyline-apiserver-init.sh\n              readOnly: true\n            - name: skyline-etc\n              mountPath: /etc/skyline/skyline.yaml\n              subPath: skyline.yaml\n              readOnly: true\n            - name: skyline-etc\n              mountPath: /etc/skyline/gunicorn.py\n              subPath: gunicorn.py\n              readOnly: true\n            - name: nginx-etc\n              mountPath: /etc/nginx\n{{ if $mounts_skyline_init.volumeMounts }}{{ toYaml $mounts_skyline_init.volumeMounts | indent 12 }}{{ end }}\n      containers:\n        - name: nginx\n          command:\n            - /bin/sh\n            - -c\n            - cp /etc/nginx/.skyline/nginx.conf /etc/nginx/nginx.conf && nginx -g 'daemon off;'\n{{ tuple $envAll \"skyline_nginx\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n          ports:\n            - name: api\n              containerPort: {{ tuple \"skyline\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: nginx-etc\n              mountPath: /etc/nginx/.skyline\n            - name: skyline-var-lib\n              mountPath: /var/lib/skyline\n        - name: skyline\n{{ tuple $envAll \"skyline\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"skyline\" \"container\" \"skyline\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /tmp/skyline-apiserver.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: skyline-var-lib\n              mountPath: /var/lib/skyline\n            - name: skyline-etc\n              mountPath: /etc/skyline/skyline.yaml\n              subPath: skyline.yaml\n              readOnly: true\n            - name: skyline-etc\n              mountPath: /etc/skyline/gunicorn.py\n              subPath: gunicorn.py\n              readOnly: true\n            - name: skyline-bin\n              mountPath: /tmp/skyline-apiserver.sh\n              subPath: skyline-apiserver.sh\n              readOnly: true\n{{ if $mounts_skyline.volumeMounts }}{{ toYaml $mounts_skyline.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: nginx-etc\n          emptyDir: {}\n        - name: skyline-var-lib\n          emptyDir: {}\n        - name: skyline-bin\n          configMap:\n            name: skyline-bin\n            defaultMode: 0555\n        - name: skyline-etc\n          secret:\n            secretName: skyline-etc\n            defaultMode: 0444\n{{ if $mounts_skyline.volumes}}{{ toYaml $mounts_skyline.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "skyline/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "skyline/templates/ingress.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress .Values.network.skyline.ingress.public }}\n{{- $envAll := . -}}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendService\" \"skyline\" \"backendServiceType\" \"skyline\" \"backendPort\" \"api\" -}}\n{{- $secretName := $envAll.Values.secrets.tls.skyline.skyline.internal -}}\n{{- if and .Values.manifests.certificates $secretName }}\n{{- $_ := set $ingressOpts \"certIssuer\" .Values.endpoints.skyline.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end }}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "skyline/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" \"skyline\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbDropJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- if .Values.pod.tolerations.skyline.enabled -}}\n{{- $_ := set $dbDropJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "skyline/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbToInit := dict \"inputType\" \"secret\" \"adminSecret\" .Values.secrets.oslo_db.admin \"userSecret\" .Values.secrets.oslo_db.skyline -}}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"skyline\" \"dbToInit\" $dbToInit -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbInitJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbInitJob \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.skyline.enabled -}}\n{{- $_ := set $dbInitJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "skyline/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- define \"skyline.templates.job_db_sync\" -}}\n{{- $envAll := index . 0 }}\n{{- with $envAll }}\n{{- $serviceName := \"skyline\" -}}\n{{- $nodeSelector := dict .Values.labels.job.node_selector_key .Values.labels.job.node_selector_value -}}\n{{- $configMapEtc := (printf \"%s-%s\" $serviceName \"etc\" ) -}}\n{{- $dbAdminTlsSecret := .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- $serviceAccountName := printf \"%s-%s\" $serviceName \"db-sync\" }}\n{{ tuple . \"db_sync\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: skyline-db-sync\n  labels:\n{{ tuple . $serviceName \"db-sync\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n{{ tuple $serviceAccountName . | include \"helm-toolkit.snippets.custom_job_annotations\" | indent 4 -}}\n{{ include \"metadata.annotations.job.db_sync\" . | indent 4 }}\nspec:\n  backoffLimit: 1000\n  template:\n    metadata:\n      labels:\n{{ tuple . $serviceName \"db-sync\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple . | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      {{ tuple . \"db_sync\" | include \"helm-toolkit.snippets.kubernetes_image_pull_secrets\" | indent 6 }}\n      nodeSelector:\n{{ toYaml $nodeSelector | indent 8 }}\n{{- if .Values.pod.tolerations.skyline.enabled }}\n{{ tuple . $serviceName | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{- end}}\n      initContainers:\n{{ tuple . \"db_sync\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: skyline-db-sync\n          image: {{ .Values.images.tags.skyline_db_sync | quote }}\n          imagePullPolicy: {{ .Values.images.pull_policy | quote }}\n{{ tuple . .Values.pod.resources.jobs.db_sync | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" . \"application\" \"skyline\" \"container\" \"db_sync\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /bin/sh\n            - -c\n            - /tmp/db-sync.sh\n          volumeMounts:\n            - name: skyline-bin\n              mountPath: /tmp/db-sync.sh\n              subPath: db-sync.sh\n              readOnly: true\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: etc-service\n              mountPath: /etc/skyline\n            - name: db-sync-conf\n              mountPath: /etc/skyline/skyline.yaml\n              subPath: skyline.yaml\n              readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" $dbAdminTlsSecret \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: etc-service\n          emptyDir: {}\n        - name: skyline-bin\n          configMap:\n            name: skyline-bin\n            defaultMode: 0555\n        - name: db-sync-conf\n          secret:\n            secretName: {{ $configMapEtc | quote }}\n            defaultMode: 0444\n{{- dict \"enabled\" $envAll.Values.manifests.certificates \"name\" $dbAdminTlsSecret | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- tuple . | include \"skyline.templates.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "skyline/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"skyline\" -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksUserJob \"tlsSecret\" .Values.secrets.tls.skyline.skyline.internal -}}\n{{- end -}}\n{{- $_ := set $ksUserJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.skyline.enabled -}}\n{{- $_ := set $ksUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "skyline/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"skyline\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n{{- $connection := tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- if $envAll.Values.manifests.certificates }}\n  DB_CONNECTION: {{ (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | b64enc -}}\n{{- else }}\n  DB_CONNECTION: {{  $connection | b64enc -}}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "skyline/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"skyline\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "skyline/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "skyline/templates/service-ingress.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress .Values.network.skyline.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"skyline\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "skyline/templates/service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"skyline\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n  - name: api\n    port: {{ tuple \"skyline\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.skyline.node_port.enabled }}\n    nodePort: {{ .Values.network.skyline.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple . \"skyline\" \"skyline\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.skyline.node_port.enabled }}\n  type: NodePort\n  {{ if .Values.network.skyline.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{ end }}\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "skyline/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nrelease_group: null\n\nlabels:\n  skyline:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  pull_policy: IfNotPresent\n  tags:\n    skyline_db_sync: quay.io/airshipit/skyline:2025.2-ubuntu_noble\n    skyline: quay.io/airshipit/skyline:2025.2-ubuntu_noble\n    skyline_nginx: quay.io/airshipit/skyline:2025.2-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    dep_check: 'quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy'\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nsecrets:\n  identity:\n    admin: skyline-keystone-admin\n    skyline: skyline-keystone-user\n  oslo_db:\n    admin: skyline-db-admin\n    skyline: skyline-db-user\n  tls:\n    skyline:\n      skyline:\n        public: skyline-tls-public\n        internal: skyline-tls-internal\n  oci_image_registry:\n    skyline: skyline-oci-image-registry\n\ntls:\n  identity: false\n  oslo_db: false\n\nnetwork:\n  skyline:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30779\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n        secret:\n          tls:\n            internal: mariadb-tls-direct\n      skyline:\n        username: skyline\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /skyline\n    scheme:\n      default: mysql+pymysql\n      skyline: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      skyline:\n        username: skyline\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      skyline:\n        role: admin\n        region_name: RegionOne\n        username: skyline\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  skyline:\n    name: skyline\n    hosts:\n      default: skyline-api\n      public: skyline\n    host_fqdn_override:\n      default: null\n    scheme:\n      default: 'http'\n      service: 'http'\n    port:\n      api:\n        default: 9999\n        public: 80\n\npod:\n  replicas:\n    skyline: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 1\n  security_context:\n    skyline:\n      pod:\n        runAsUser: 0\n      container:\n        skyline:\n          readOnlyRootFilesystem: false\n  mounts:\n    skyline:\n      init_container: null\n      skyline:\n        volumes:\n        volumeMounts:\n  tolerations:\n    skyline:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n        effect: NoSchedule\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n        effect: NoSchedule\n  resources:\n    enabled: false\n    skyline:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - skyline-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    skyline:\n      jobs:\n        - skyline-db-sync\n        - skyline-ks-user\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - skyline-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n\nconf:\n  skyline:\n    default:\n      debug: true\n      log_dir: /var/log\n      log_file: /dev/stdout\n      # These two params are only available in the custom skyline image\n      access_log_file: /dev/stdout\n      error_log_file: /dev/stdout\n    openstack:\n      interface_type: internal\n      default_region: RegionOne\n  gunicorn: |\n    import multiprocessing\n    bind = \"unix:/var/lib/skyline/skyline.sock\"\n    workers = (1 + multiprocessing.cpu_count()) // 2\n    worker_class = \"uvicorn.workers.UvicornWorker\"\n    timeout = 300\n    keepalive = 5\n    reuse_port = False\n    proc_name = \"skyline\"\n    log_level = \"info\"\n    disable_redirect_access_to_syslog = True\n    access_logfile = \"-\"\n    error_logfile = \"-\"\n\nmanifests:\n  certificates: false\n  configmap_etc: true\n  configmap_bin: true\n  deployment: true\n  job_db_init: true\n  job_db_sync: true\n  job_db_drop: false\n  secret_db: true\n  secret_keystone: true\n  job_ks_user: true\n  service: true\n  ingress: true\n  service_ingress: true\n  secret_registry: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "swift/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\nname: swift\ndescription: Openstack-Helm Swift\nversion: 2025.2.0\nhome: https://docs.openstack.org/swift/latest/\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Swift/OpenStack_Project_Swift_vertical.jpg\nsources:\n  - https://opendev.org/openstack/swift\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: Openstack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "swift/templates/bin/_account-start.sh.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"swift.bin.account_start\" }}\n#!/bin/bash\nset -ex\n\nexport HOME=/tmp\n\n# Wait for ring files\necho \"Waiting for account ring file...\"\nwhile [ ! -f /etc/swift/account.ring.gz ]; do\n    echo \"Account ring file not found, waiting...\"\n    sleep 5\ndone\n\necho \"Account ring file found\"\n\n# Create required directories\nmkdir -p /var/cache/swift /var/run/swift /var/log/swift /var/lock\n\n# Set permissions\nchown -R swift:swift /etc/swift /srv/node /var/cache/swift /var/run/swift /var/log/swift /var/lock 2>/dev/null || true\n\n# Start account services\necho \"Starting account services...\"\nswift-account-server /etc/swift/account-server.conf &\nswift-account-auditor /etc/swift/account-server.conf &\nswift-account-reaper /etc/swift/account-server.conf &\nswift-account-replicator /etc/swift/account-server.conf &\n\necho \"Swift account services started\"\n\n# Wait for any process to exit\nwait\n{{- end }}\n"
  },
  {
    "path": "swift/templates/bin/_bootstrap.sh.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\n*/}}\n\n{{- define \"swift.bin.bootstrap\" }}\n#!/bin/bash\nset -ex\n\necho \"Swift bootstrap started\"\n\n# Source credentials\nexport OS_AUTH_URL={{ tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" }}\nexport OS_IDENTITY_API_VERSION=3\nexport OS_USERNAME={{ .Values.endpoints.identity.auth.admin.username }}\nexport OS_PASSWORD={{ .Values.endpoints.identity.auth.admin.password }}\nexport OS_PROJECT_NAME={{ .Values.endpoints.identity.auth.admin.project_name }}\nexport OS_USER_DOMAIN_NAME={{ .Values.endpoints.identity.auth.admin.user_domain_name }}\nexport OS_PROJECT_DOMAIN_NAME={{ .Values.endpoints.identity.auth.admin.project_domain_name }}\nexport OS_INTERFACE=internal\n\n# Wait for Swift proxy to be ready\nSWIFT_ENDPOINT={{ tuple \"object_store\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" }}\necho \"Waiting for Swift endpoint at ${SWIFT_ENDPOINT}...\"\ncount=0\nwhile ! curl -s -o /dev/null -w '%{http_code}' \"${SWIFT_ENDPOINT}/healthcheck\" | grep -q \"200\"; do\n    if [ $count -ge 60 ]; then\n        echo \"Timeout waiting for Swift endpoint\"\n        exit 1\n    fi\n    echo \"Waiting for Swift endpoint...\"\n    sleep 5\n    count=$((count+1))\ndone\n\necho \"Swift endpoint is healthy\"\n\n# Run any custom bootstrap script\n{{ .Values.bootstrap.script | default \"\" }}\n\necho \"Swift bootstrap complete\"\n{{- end }}\n"
  },
  {
    "path": "swift/templates/bin/_container-start.sh.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"swift.bin.container_start\" }}\n#!/bin/bash\nset -ex\n\nexport HOME=/tmp\n\n# Wait for ring files\necho \"Waiting for container ring file...\"\nwhile [ ! -f /etc/swift/container.ring.gz ]; do\n    echo \"Container ring file not found, waiting...\"\n    sleep 5\ndone\n\necho \"Container ring file found\"\n\n# Create required directories\nmkdir -p /var/cache/swift /var/run/swift /var/log/swift /var/lock\n\n# Set permissions\nchown -R swift:swift /etc/swift /srv/node /var/cache/swift /var/run/swift /var/log/swift /var/lock 2>/dev/null || true\n\n# Start container services\necho \"Starting container services...\"\nswift-container-server /etc/swift/container-server.conf &\nswift-container-auditor /etc/swift/container-server.conf &\nswift-container-replicator /etc/swift/container-server.conf &\nswift-container-updater /etc/swift/container-server.conf &\nswift-container-sync /etc/swift/container-server.conf &\n\necho \"Swift container services started\"\n\n# Wait for any process to exit\nwait\n{{- end }}\n"
  },
  {
    "path": "swift/templates/bin/_ks-endpoints.sh.tpl",
    "content": "#!/bin/bash\nset -ex\n\n{{ include \"helm-toolkit.scripts.keystone_endpoints\" . }}\n"
  },
  {
    "path": "swift/templates/bin/_ks-service.sh.tpl",
    "content": "#!/bin/bash\nset -ex\n\n{{ include \"helm-toolkit.scripts.keystone_service\" . }}\n"
  },
  {
    "path": "swift/templates/bin/_ks-user.sh.tpl",
    "content": "#!/bin/bash\nset -ex\n\n{{ include \"helm-toolkit.scripts.keystone_user\" . }}\n"
  },
  {
    "path": "swift/templates/bin/_object-start.sh.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"swift.bin.object_start\" }}\n#!/bin/bash\nset -ex\n\nexport HOME=/tmp\n\n# Wait for ring files\necho \"Waiting for object ring file...\"\nwhile [ ! -f /etc/swift/object.ring.gz ]; do\n    echo \"Object ring file not found, waiting...\"\n    sleep 5\ndone\n\necho \"Object ring file found\"\n\n# Create required directories\nmkdir -p /var/cache/swift /var/run/swift /var/log/swift /var/lock\n\n# Set permissions\nchown -R swift:swift /etc/swift /srv/node /var/cache/swift /var/run/swift /var/log/swift /var/lock 2>/dev/null || true\n\n# Start rsync daemon (object replication uses rsync)\necho \"Starting rsync daemon...\"\nrsync --daemon --config=/etc/swift/rsyncd.conf\n\n# Start object services\necho \"Starting object services...\"\nswift-object-server /etc/swift/object-server.conf &\nswift-object-auditor /etc/swift/object-server.conf &\nswift-object-replicator /etc/swift/object-server.conf &\nswift-object-updater /etc/swift/object-server.conf &\nswift-object-reconstructor /etc/swift/object-server.conf 2>/dev/null &\n\necho \"Swift object services started\"\n\n# Wait for any process to exit\nwait\n{{- end }}\n"
  },
  {
    "path": "swift/templates/bin/_proxy-start.sh.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\n*/}}\n\n{{- define \"swift.bin.proxy_start\" }}\n#!/bin/bash\nset -ex\n\nexport HOME=/tmp\n\n# Wait for ring files\necho \"Waiting for ring files...\"\nwhile [ ! -f /etc/swift/account.ring.gz ] || [ ! -f /etc/swift/container.ring.gz ] || [ ! -f /etc/swift/object.ring.gz ]; do\n    echo \"Ring files not found, waiting...\"\n    sleep 5\ndone\n\necho \"Ring files found\"\nls -la /etc/swift/*.ring.gz\n\n# Create required directories\nmkdir -p /var/cache/swift /var/run/swift /var/log/swift\n\n# Set permissions\nchown -R swift:swift /etc/swift /var/cache/swift /var/run/swift /var/log/swift 2>/dev/null || true\n\n# Resolve DNS and add to /etc/hosts to work around eventlet DNS issues\necho \"Resolving service endpoints for eventlet compatibility...\"\n{{- $identityHost := tuple \"identity\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\n{{- $cacheHost := tuple \"oslo_cache\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\n\n# Resolve keystone\nKEYSTONE_HOST=\"{{ $identityHost }}\"\nif [ -n \"$KEYSTONE_HOST\" ] && [ \"$KEYSTONE_HOST\" != \"null\" ]; then\n    KEYSTONE_IP=$(getent hosts \"$KEYSTONE_HOST\" | awk '{print $1}' | head -1)\n    if [ -n \"$KEYSTONE_IP\" ]; then\n        echo \"$KEYSTONE_IP $KEYSTONE_HOST\" >> /etc/hosts\n        echo \"Added $KEYSTONE_IP $KEYSTONE_HOST to /etc/hosts\"\n    fi\nfi\n\n# Resolve memcached\nMEMCACHE_HOST=\"{{ $cacheHost }}\"\nif [ -n \"$MEMCACHE_HOST\" ] && [ \"$MEMCACHE_HOST\" != \"null\" ]; then\n    MEMCACHE_IP=$(getent hosts \"$MEMCACHE_HOST\" | awk '{print $1}' | head -1)\n    if [ -n \"$MEMCACHE_IP\" ]; then\n        echo \"$MEMCACHE_IP $MEMCACHE_HOST\" >> /etc/hosts\n        echo \"Added $MEMCACHE_IP $MEMCACHE_HOST to /etc/hosts\"\n    fi\nfi\n\necho \"Starting Swift Proxy Server...\"\nexec swift-proxy-server /etc/swift/proxy-server.conf --verbose\n{{- end }}\n"
  },
  {
    "path": "swift/templates/bin/_ring-builder.sh.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\n*/}}\n\n{{- define \"swift.bin.ring_builder\" }}\n#!/bin/bash\nset -ex\n\nexport HOME=/tmp\n\ncd /etc/swift\n\nPARTITION_POWER={{ .Values.ring.partition_power }}\nREPLICAS={{ .Values.ring.replicas }}\nMIN_PART_HOURS={{ .Values.ring.min_part_hours }}\n\n# Get the storage service IP (for Kubernetes, use the storage service)\nSTORAGE_IP=${SWIFT_STORAGE_IP:-\"127.0.0.1\"}\n\necho \"Building Swift rings with partition_power=$PARTITION_POWER, replicas=$REPLICAS, min_part_hours=$MIN_PART_HOURS\"\necho \"Storage IP: $STORAGE_IP\"\n\n# Create ring builder files if they don't exist\nif [ ! -f account.builder ]; then\n    swift-ring-builder account.builder create $PARTITION_POWER $REPLICAS $MIN_PART_HOURS\nfi\n\nif [ ! -f container.builder ]; then\n    swift-ring-builder container.builder create $PARTITION_POWER $REPLICAS $MIN_PART_HOURS\nfi\n\nif [ ! -f object.builder ]; then\n    swift-ring-builder object.builder create $PARTITION_POWER $REPLICAS $MIN_PART_HOURS\nfi\n\n# Add devices from values\n{{- range $index, $device := .Values.ring.devices }}\nDEVICE_NAME=\"{{ $device.name }}\"\nDEVICE_WEIGHT=\"{{ $device.weight }}\"\n\n# Check if device already exists in account ring\nif ! swift-ring-builder account.builder search --ip $STORAGE_IP --device $DEVICE_NAME 2>/dev/null | grep -q \"$DEVICE_NAME\"; then\n    swift-ring-builder account.builder add \\\n        --region 1 --zone 1 --ip $STORAGE_IP --port 6202 \\\n        --device $DEVICE_NAME --weight $DEVICE_WEIGHT || true\nfi\n\n# Check if device already exists in container ring\nif ! swift-ring-builder container.builder search --ip $STORAGE_IP --device $DEVICE_NAME 2>/dev/null | grep -q \"$DEVICE_NAME\"; then\n    swift-ring-builder container.builder add \\\n        --region 1 --zone 1 --ip $STORAGE_IP --port 6201 \\\n        --device $DEVICE_NAME --weight $DEVICE_WEIGHT || true\nfi\n\n# Check if device already exists in object ring\nif ! swift-ring-builder object.builder search --ip $STORAGE_IP --device $DEVICE_NAME 2>/dev/null | grep -q \"$DEVICE_NAME\"; then\n    swift-ring-builder object.builder add \\\n        --region 1 --zone 1 --ip $STORAGE_IP --port 6200 \\\n        --device $DEVICE_NAME --weight $DEVICE_WEIGHT || true\nfi\n{{- end }}\n\n# Show ring status\necho \"Account Ring:\"\nswift-ring-builder account.builder\n\necho \"Container Ring:\"\nswift-ring-builder container.builder\n\necho \"Object Ring:\"\nswift-ring-builder object.builder\n\n# Rebalance rings\nswift-ring-builder account.builder rebalance || true\nswift-ring-builder container.builder rebalance || true\nswift-ring-builder object.builder rebalance || true\n\n# Copy ring files to shared location\ncp /etc/swift/*.ring.gz /etc/swift-rings/ 2>/dev/null || true\ncp /etc/swift/*.builder /etc/swift-rings/ 2>/dev/null || true\n\necho \"Ring files created successfully\"\nls -la /etc/swift/*.ring.gz /etc/swift/*.builder\n{{- end }}\n"
  },
  {
    "path": "swift/templates/bin/_ring-copy.sh.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\n*/}}\n\n{{- define \"swift.bin.ring_copy\" }}\n#!/bin/bash\nset -ex\n\necho \"=== Swift Copy rings from shared storage ===\"\n\nfor base in account container object; do\n  if [[ ! -f /etc/swift/${base}.ring.gz ]]; then\n    echo \"Ring file /etc/swift/${base}.ring.gz not found in /etc/swift, attempting to copy from shared storage.\"\n    cp /etc/swift-rings/${base}.ring.gz /etc/swift/\n    echo \"Copied ${base}.ring.gz from shared storage.\"\n  fi\n  if [[ ! -f /etc/swift/${base}.builder ]]; then\n    echo \"Builder file /etc/swift/${base}.builder not found in /etc/swift, attempting to copy from shared storage.\"\n    cp /etc/swift-rings/${base}.builder /etc/swift/\n    echo \"Copied ${base}.builder from shared storage.\"\n  fi\ndone\n{{- end }}\n"
  },
  {
    "path": "swift/templates/bin/_storage-init.sh.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\n*/}}\n\n{{- define \"swift.bin.storage_init\" }}\n#!/bin/bash\nset -e\n\nexport HOME=/tmp\n\necho \"=== Swift Storage Validation ===\"\n\n# Create swift user if it doesn't exist\ngetent group swift >/dev/null || groupadd -r swift\ngetent passwd swift >/dev/null || useradd -r -g swift -d /var/lib/swift -s /sbin/nologin swift\n\n# Create required directories\nmkdir -p /var/cache/swift /var/run/swift /var/lock /var/log/swift\n\n# Validate storage devices from values.yaml\nERRORS=0\n{{- range $device := .Values.ring.devices }}\nDEVICE_NAME=\"{{ $device.name }}\"\nSTOREDIR=\"/srv/node/${DEVICE_NAME}\"\n\necho \"Checking device: $DEVICE_NAME at $STOREDIR\"\n\nif [ ! -d \"$STOREDIR\" ]; then\n    echo \"ERROR: Storage directory $STOREDIR does not exist!\"\n    echo \"       Please mount your storage device to $STOREDIR before deploying Swift.\"\n    ERRORS=$((ERRORS + 1))\n    continue\nfi\n\n# Check if it's a mountpoint or at least writable\nif ! touch \"$STOREDIR/.swift_test\" 2>/dev/null; then\n    echo \"ERROR: Storage directory $STOREDIR is not writable!\"\n    ERRORS=$((ERRORS + 1))\n    continue\nfi\nrm -f \"$STOREDIR/.swift_test\"\n\necho \"  ✓ $STOREDIR is valid and writable\"\n{{- end }}\n\nif [ $ERRORS -gt 0 ]; then\n    echo \"\"\n    echo \"==========================================\"\n    echo \"STORAGE VALIDATION FAILED\"\n    echo \"==========================================\"\n    echo \"\"\n    echo \"Swift requires pre-mounted storage devices.\"\n    echo \"Please prepare your storage before deploying:\"\n    echo \"\"\n    echo \"For production (real disks):\"\n    echo \"  mkfs.xfs /dev/sdX\"\n    echo \"  mkdir -p /srv/node/sdX\"\n    echo \"  mount /dev/sdX /srv/node/sdX\"\n    echo \"\"\n    echo \"For testing (loop devices):\"\n    echo \"  truncate -s 1G /var/lib/swift/sdb1.img\"\n    echo \"  losetup /dev/loop0 /var/lib/swift/sdb1.img\"\n    echo \"  mkfs.xfs /dev/loop0\"\n    echo \"  mkdir -p /srv/node/sdb1\"\n    echo \"  mount /dev/loop0 /srv/node/sdb1\"\n    echo \"\"\n    exit 1\nfi\n\n# Set permissions\nchown -R swift:swift /srv/node /var/cache/swift /var/run/swift /var/lock /var/log/swift\nchmod -R 755 /srv/node /var/cache/swift\n\necho \"\"\necho \"=== Storage directories ===\"\nls -la /srv/node/\n\necho \"\"\necho \"=== Mount points ===\"\nmount | grep /srv/node || echo \"(No dedicated mounts found - using directory storage)\"\n\necho \"\"\necho \"Storage validation complete - all devices ready\"\n{{- end }}\n"
  },
  {
    "path": "swift/templates/bin/_storage-start.sh.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\n*/}}\n\n{{- define \"swift.bin.storage_start\" }}\n#!/bin/bash\nset -ex\n\nexport HOME=/tmp\n\n# Wait for ring files\necho \"Waiting for ring files...\"\nwhile [ ! -f /etc/swift/account.ring.gz ] || [ ! -f /etc/swift/container.ring.gz ] || [ ! -f /etc/swift/object.ring.gz ]; do\n    echo \"Ring files not found, waiting...\"\n    sleep 5\ndone\n\necho \"Ring files found\"\nls -la /etc/swift/*.ring.gz\n\n# Create required directories\nmkdir -p /var/cache/swift /var/run/swift /var/log/swift /var/lock\n\n# Set permissions\nchown -R swift:swift /etc/swift /srv/node /var/cache/swift /var/run/swift /var/log/swift /var/lock 2>/dev/null || true\n\n# Start rsync daemon\necho \"Starting rsync daemon...\"\nrsync --daemon --config=/etc/swift/rsyncd.conf\n\n# Start account services\necho \"Starting account services...\"\nswift-account-server /etc/swift/account-server.conf &\nswift-account-auditor /etc/swift/account-server.conf &\nswift-account-reaper /etc/swift/account-server.conf &\nswift-account-replicator /etc/swift/account-server.conf &\n\n# Start container services\necho \"Starting container services...\"\nswift-container-server /etc/swift/container-server.conf &\nswift-container-auditor /etc/swift/container-server.conf &\nswift-container-replicator /etc/swift/container-server.conf &\nswift-container-updater /etc/swift/container-server.conf &\n\n# Start object services\necho \"Starting object services...\"\nswift-object-server /etc/swift/object-server.conf &\nswift-object-auditor /etc/swift/object-server.conf &\nswift-object-replicator /etc/swift/object-server.conf &\nswift-object-updater /etc/swift/object-server.conf &\n\necho \"All Swift storage services started\"\n\n# Wait for any process to exit\nwait\n{{- end }}\n"
  },
  {
    "path": "swift/templates/bin/_swift-test.sh.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\n*/}}\n\n{{- define \"swift.bin.swift_test\" }}\n#!/bin/bash\nset -ex\n\nexport HOME=/tmp\n\necho \"===== Swift Functional Test =====\"\n\n# Get authentication token\necho \"Getting Keystone token...\"\nTOKEN=$(openstack token issue -f value -c id)\necho \"Token obtained successfully\"\n\n# Get Swift endpoint\nSWIFT_URL=$(openstack endpoint list --service swift --interface public -f value -c URL | head -1)\necho \"Swift URL: $SWIFT_URL\"\n\n# Test Swift stat\necho \"\"\necho \"Testing swift stat...\"\nswift stat\n\n# Create test container\nCONTAINER=\"test-container-$(date +%s)\"\necho \"\"\necho \"Creating container: $CONTAINER\"\nswift post $CONTAINER\n\n# List containers\necho \"\"\necho \"Listing containers...\"\nswift list\n\n# Upload a test file\necho \"Hello from OpenStack Swift!\" > /tmp/testfile.txt\necho \"\"\necho \"Uploading test file...\"\nswift upload $CONTAINER /tmp/testfile.txt --object-name hello.txt\n\n# List objects in container\necho \"\"\necho \"Listing objects in $CONTAINER...\"\nswift list $CONTAINER\n\n# Download and verify\necho \"\"\necho \"Downloading test file...\"\nswift download $CONTAINER hello.txt -o /tmp/downloaded.txt\ncat /tmp/downloaded.txt\n\n# Cleanup\necho \"\"\necho \"Cleaning up...\"\nswift delete $CONTAINER hello.txt\nswift delete $CONTAINER\n\necho \"\"\necho \"===== Swift Test Complete =====\"\n{{- end }}\n"
  },
  {
    "path": "swift/templates/certificates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{/*\nCertificates template requires proper TLS configuration in endpoints.\nTo enable, you must set:\n  - manifests.certificates: true\n  - secrets.tls.object_store.api.public: <secret-name>\n  - endpoints.object_store.host_fqdn_override.default.tls (with crt, key, ca)\n*/}}\n{{- if .Values.manifests.certificates -}}\n{{- $secretName := .Values.secrets.tls.object_store.api.public -}}\n{{- $fqdnOverride := index .Values.endpoints.object_store.host_fqdn_override \"default\" | default dict -}}\n{{- if and $secretName (hasKey $fqdnOverride \"tls\") -}}\n{{ dict \"envAll\" . \"service\" \"object_store\" \"type\" \"public\" | include \"helm-toolkit.manifests.certificates\" }}\n{{- end -}}\n{{- end -}}\n"
  },
  {
    "path": "swift/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n   http://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"swift.configmap.bin\" }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: swift-bin\n  labels:\n{{ tuple $envAll \"swift\" \"bin\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\ndata:\n  storage-init.sh: |\n{{ include \"swift.bin.storage_init\" . | indent 4 }}\n\n  ring-builder.sh: |\n{{ include \"swift.bin.ring_builder\" . | indent 4 }}\n\n  ring-copy.sh: |\n{{ include \"swift.bin.ring_copy\" . | indent 4 }}\n\n  proxy-start.sh: |\n{{ include \"swift.bin.proxy_start\" . | indent 4 }}\n\n  storage-start.sh: |\n{{ include \"swift.bin.storage_start\" . | indent 4 }}\n\n  account-start.sh: |\n{{ include \"swift.bin.account_start\" . | indent 4 }}\n\n  container-start.sh: |\n{{ include \"swift.bin.container_start\" . | indent 4 }}\n\n  object-start.sh: |\n{{ include \"swift.bin.object_start\" . | indent 4 }}\n\n  bootstrap.sh: |\n{{ include \"swift.bin.bootstrap\" . | indent 4 }}\n\n  ks-service.sh: |\n{{ tuple \"bin/_ks-service.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n\n  ks-endpoints.sh: |\n{{ tuple \"bin/_ks-endpoints.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n\n  ks-user.sh: |\n{{ tuple \"bin/_ks-user.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n\n  swift-test.sh: |\n{{ include \"swift.bin.swift_test\" . | indent 4 }}\n\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n{{- end }}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- include \"swift.configmap.bin\" . }}\n{{- end }}\n"
  },
  {
    "path": "swift/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n   http://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"swift.configmap.etc\" }}\n{{- $envAll := . }}\n\n{{- if empty .Values.conf.proxy_server.DEFAULT.swift_dir -}}\n{{- $_ := set .Values.conf.proxy_server.DEFAULT \"swift_dir\" \"/etc/swift\" -}}\n{{- end -}}\n\n{{- if empty (index .Values.conf.proxy_server \"filter:authtoken\" \"www_authenticate_uri\") -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set (index .Values.conf.proxy_server \"filter:authtoken\") \"www_authenticate_uri\" -}}\n{{- end -}}\n\n{{- if empty (index .Values.conf.proxy_server \"filter:authtoken\" \"auth_url\") -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set (index .Values.conf.proxy_server \"filter:authtoken\") \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty (index .Values.conf.proxy_server \"filter:authtoken\" \"memcached_servers\") -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set (index .Values.conf.proxy_server \"filter:authtoken\") \"memcached_servers\" -}}\n{{- end -}}\n\n{{- if empty (index .Values.conf.proxy_server \"filter:cache\" \"memcache_servers\") -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set (index .Values.conf.proxy_server \"filter:cache\") \"memcache_servers\" -}}\n{{- end -}}\n\n{{- if empty (index .Values.conf.proxy_server \"filter:authtoken\" \"password\") -}}\n{{- $_ := .Values.endpoints.identity.auth.swift.password | set (index .Values.conf.proxy_server \"filter:authtoken\") \"password\" -}}\n{{- end -}}\n\n{{- if empty (index .Values.conf.proxy_server \"filter:authtoken\" \"memcache_secret_key\") -}}\n{{- $_ := (default (randAlphaNum 64) .Values.endpoints.oslo_cache.auth.memcache_secret_key) | set (index .Values.conf.proxy_server \"filter:authtoken\") \"memcache_secret_key\" -}}\n{{- end -}}\n\n{{- if empty (index .Values.conf.proxy_server \"filter:authtoken\" \"memcache_servers\") -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set (index .Values.conf.proxy_server \"filter:authtoken\") \"memcache_servers\" -}}\n{{- end -}}\n\n\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: swift-etc\n  labels:\n{{ tuple $envAll \"swift\" \"etc\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\ndata:\n  swift.conf: |\n    [swift-hash]\n    swift_hash_path_suffix = {{ .Values.conf.swift.swift_hash_path_suffix }}\n    swift_hash_path_prefix = {{ .Values.conf.swift.swift_hash_path_prefix }}\n{{- range $policy := .Values.conf.swift.storage_policies }}\n    [storage-policy:{{ $policy.index }}]\n    name = {{ $policy.name }}\n{{- if $policy.default }}\n    default = {{ $policy.default }}\n{{- end }}\n{{- if $policy.deprecated }}\n    deprecated = {{ $policy.deprecated }}\n{{- end }}\n{{- end }}\n\n  proxy-server.conf: |\n{{ include \"helm-toolkit.utils.to_ini\" .Values.conf.proxy_server | indent 4 }}\n\n  account-server.conf: |\n{{ include \"helm-toolkit.utils.to_ini\" .Values.conf.account_server | indent 4 }}\n\n  container-server.conf: |\n{{ include \"helm-toolkit.utils.to_ini\" .Values.conf.container_server | indent 4 }}\n\n  object-server.conf: |\n{{ include \"helm-toolkit.utils.to_ini\" .Values.conf.object_server | indent 4 }}\n\n  rsyncd.conf: |\n    uid = {{ .Values.conf.rsyncd.uid }}\n    gid = {{ .Values.conf.rsyncd.gid }}\n    log file = {{ .Values.conf.rsyncd.log_file }}\n    pid file = {{ .Values.conf.rsyncd.pid_file }}\n    address = {{ .Values.conf.rsyncd.address }}\n\n    [account]\n    max connections = {{ .Values.conf.rsyncd.account.max_connections }}\n    path = {{ .Values.conf.rsyncd.account.path }}\n    read only = {{ .Values.conf.rsyncd.account.read_only }}\n    lock file = {{ .Values.conf.rsyncd.account.lock_file }}\n\n    [container]\n    max connections = {{ .Values.conf.rsyncd.container.max_connections }}\n    path = {{ .Values.conf.rsyncd.container.path }}\n    read only = {{ .Values.conf.rsyncd.container.read_only }}\n    lock file = {{ .Values.conf.rsyncd.container.lock_file }}\n\n    [object]\n    max connections = {{ .Values.conf.rsyncd.object.max_connections }}\n    path = {{ .Values.conf.rsyncd.object.path }}\n    read only = {{ .Values.conf.rsyncd.object.read_only }}\n    lock file = {{ .Values.conf.rsyncd.object.lock_file }}\n\n  container-sync-realms.conf: |\n{{ include \"helm-toolkit.utils.to_ini\" .Values.conf.swift.container_sync_realms | indent 4 }}\n{{- end }}\n\n\n{{- if .Values.manifests.configmap_etc }}\n{{- include \"swift.configmap.etc\" . }}\n{{- end }}\n"
  },
  {
    "path": "swift/templates/daemonset-storage.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.daemonset_storage }}\n{{- $envAll := . }}\n\n{{- $mounts_swift_storage := .Values.pod.mounts.swift_storage.swift_storage }}\n{{- $mounts_swift_storage_init := .Values.pod.mounts.swift_storage.init_container }}\n\n{{- $serviceAccountName := \"swift-storage\" }}\n{{ tuple $envAll \"storage\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: swift-storage\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"swift\" \"storage\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"swift\" \"storage\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll \"storage\" | include \"helm-toolkit.snippets.kubernetes_upgrades_daemonset\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"swift\" \"storage\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"swift-storage\" \"containerNames\" (list \"swift-account\" \"swift-container\" \"swift-object\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"swift\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      hostNetwork: true\n      hostPID: true\n      dnsPolicy: ClusterFirstWithHostNet\n      nodeSelector:\n        {{ .Values.labels.storage.node_selector_key }}: {{ .Values.labels.storage.node_selector_value }}\n{{- if .Values.pod.tolerations.swift.enabled }}\n{{ tuple $envAll \"swift\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{- end }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.storage.timeout | default 30 }}\n      initContainers:\n{{ tuple $envAll \"storage\" $mounts_swift_storage_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: storage-init\n{{ tuple $envAll \"swift_storage_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.storage | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"swift\" \"container\" \"swift_storage_init\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/storage-init.sh\n          terminationMessagePath: /tmp/termination-log\n          terminationMessagePolicy: File\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: swift-bin\n              mountPath: /tmp/storage-init.sh\n              subPath: storage-init.sh\n              readOnly: true\n            - name: srv-node\n              mountPath: /srv/node\n            - name: swift-data\n              mountPath: /var/lib/swift\n{{- if .Values.manifests.pvc }}\n        - name: ring-copy\n{{ tuple $envAll \"swift_storage_init\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.storage | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"swift\" \"container\" \"swift_ring_copy\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/ring-copy.sh\n          terminationMessagePath: /tmp/termination-log\n          terminationMessagePolicy: File\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: swift-bin\n              mountPath: /tmp/ring-copy.sh\n              subPath: ring-copy.sh\n              readOnly: true\n            - name: srv-node\n              mountPath: /srv/node\n            - name: swift-data\n              mountPath: /var/lib/swift\n            - name: swift-rings-host\n              mountPath: /etc/swift\n            - name: swift-rings\n              mountPath: {{ .Values.ring.shared_storage.mount_path }}\n{{- end }}\n      containers:\n        - name: swift-account\n{{ tuple $envAll \"swift_account\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.storage | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"swift\" \"container\" \"swift_account\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/account-start.sh\n          ports:\n            - name: account\n              containerPort: {{ .Values.conf.account_server.DEFAULT.bind_port }}\n              hostPort: {{ .Values.conf.account_server.DEFAULT.bind_port }}\n          readinessProbe:\n            exec:\n              command:\n                - /bin/bash\n                - -c\n                - \"pgrep -f swift-account-server\"\n            initialDelaySeconds: 30\n            periodSeconds: 15\n          livenessProbe:\n            exec:\n              command:\n                - /bin/bash\n                - -c\n                - \"pgrep -f swift-account-server\"\n            initialDelaySeconds: 30\n            periodSeconds: 30\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: swift-bin\n              mountPath: /tmp/account-start.sh\n              subPath: account-start.sh\n              readOnly: true\n            - name: swift-etc\n              mountPath: /etc/swift/swift.conf\n              subPath: swift.conf\n              readOnly: true\n            - name: swift-etc\n              mountPath: /etc/swift/account-server.conf\n              subPath: account-server.conf\n              readOnly: true\n            - name: srv-node\n              mountPath: /srv/node\n            - name: swift-rings-host\n              mountPath: /etc/swift/account.ring.gz\n              subPath: account.ring.gz\n              readOnly: true\n{{- if $mounts_swift_storage.volumeMounts }}{{ toYaml $mounts_swift_storage.volumeMounts | indent 12 }}{{ end }}\n        - name: swift-container\n{{ tuple $envAll \"swift_container\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.storage | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"swift\" \"container\" \"swift_container\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/container-start.sh\n          ports:\n            - name: container\n              containerPort: {{ .Values.conf.container_server.DEFAULT.bind_port }}\n              hostPort: {{ .Values.conf.container_server.DEFAULT.bind_port }}\n          readinessProbe:\n            exec:\n              command:\n                - /bin/bash\n                - -c\n                - \"pgrep -f swift-container-server\"\n            initialDelaySeconds: 30\n            periodSeconds: 15\n          livenessProbe:\n            exec:\n              command:\n                - /bin/bash\n                - -c\n                - \"pgrep -f swift-container-server\"\n            initialDelaySeconds: 30\n            periodSeconds: 30\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: swift-bin\n              mountPath: /tmp/container-start.sh\n              subPath: container-start.sh\n              readOnly: true\n            - name: swift-etc\n              mountPath: /etc/swift/swift.conf\n              subPath: swift.conf\n              readOnly: true\n            - name: swift-etc\n              mountPath: /etc/swift/container-server.conf\n              subPath: container-server.conf\n              readOnly: true\n            - name: srv-node\n              mountPath: /srv/node\n            - name: swift-rings-host\n              mountPath: /etc/swift/container.ring.gz\n              subPath: container.ring.gz\n              readOnly: true\n{{- if $mounts_swift_storage.volumeMounts }}{{ toYaml $mounts_swift_storage.volumeMounts | indent 12 }}{{ end }}\n        - name: swift-object\n{{ tuple $envAll \"swift_object\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.storage | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"swift\" \"container\" \"swift_object\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/object-start.sh\n          ports:\n            - name: object\n              containerPort: {{ .Values.conf.object_server.DEFAULT.bind_port }}\n              hostPort: {{ .Values.conf.object_server.DEFAULT.bind_port }}\n            - name: rsync\n              containerPort: 873\n              hostPort: 873\n          readinessProbe:\n            exec:\n              command:\n                - /bin/bash\n                - -c\n                - \"pgrep -f swift-object-server\"\n            initialDelaySeconds: 30\n            periodSeconds: 15\n          livenessProbe:\n            exec:\n              command:\n                - /bin/bash\n                - -c\n                - \"pgrep -f swift-object-server && pgrep -f rsync\"\n            initialDelaySeconds: 30\n            periodSeconds: 30\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: swift-bin\n              mountPath: /tmp/object-start.sh\n              subPath: object-start.sh\n              readOnly: true\n            - name: swift-etc\n              mountPath: /etc/swift/swift.conf\n              subPath: swift.conf\n              readOnly: true\n            - name: swift-etc\n              mountPath: /etc/swift/object-server.conf\n              subPath: object-server.conf\n              readOnly: true\n            - name: swift-etc\n              mountPath: /etc/swift/rsyncd.conf\n              subPath: rsyncd.conf\n              readOnly: true\n            - name: srv-node\n              mountPath: /srv/node\n            - name: swift-rings-host\n              mountPath: /etc/swift/object.ring.gz\n              subPath: object.ring.gz\n              readOnly: true\n{{- if $mounts_swift_storage.volumeMounts }}{{ toYaml $mounts_swift_storage.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: swift-bin\n          configMap:\n            name: swift-bin\n            defaultMode: 0555\n        - name: swift-etc\n          configMap:\n            name: swift-etc\n            defaultMode: 0444\n        - name: srv-node\n          hostPath:\n            path: /srv/node\n            type: DirectoryOrCreate\n        - name: swift-data\n          hostPath:\n            path: /var/lib/swift\n            type: DirectoryOrCreate\n        - name: swift-rings-host\n          hostPath:\n            path: /etc/swift\n            type: DirectoryOrCreate\n{{- if .Values.manifests.pvc }}\n        - name: swift-rings\n          persistentVolumeClaim:\n            claimName: {{ .Values.ring.shared_storage.name }}\n{{- end }}\n{{- if $mounts_swift_storage.volumes }}{{ toYaml $mounts_swift_storage.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "swift/templates/deployment-proxy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_proxy }}\n{{- $envAll := . }}\n\n{{- $mounts_swift_proxy := .Values.pod.mounts.swift_proxy.swift_proxy }}\n{{- $mounts_swift_proxy_init := .Values.pod.mounts.swift_proxy.init_container }}\n\n{{- $serviceAccountName := \"swift-proxy\" }}\n{{ tuple $envAll \"proxy\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: swift-proxy\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"swift\" \"proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.proxy }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"swift\" \"proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"swift\" \"proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"swift-proxy\" \"containerNames\" (list \"swift-proxy\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"swift\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      dnsPolicy: ClusterFirst\n      dnsConfig:\n        options:\n          - name: ndots\n            value: \"2\"\n          - name: single-request-reopen\n      affinity:\n{{ tuple $envAll \"swift\" \"proxy\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.api.node_selector_key }}: {{ .Values.labels.api.node_selector_value }}\n{{- if .Values.pod.tolerations.swift.enabled }}\n{{ tuple $envAll \"swift\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{- end }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.proxy.timeout | default 30 }}\n      initContainers:\n{{ tuple $envAll \"proxy\" $mounts_swift_proxy_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: swift-proxy\n{{ tuple $envAll \"swift_proxy\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.proxy | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"swift\" \"container\" \"swift_proxy\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/proxy-start.sh\n          ports:\n            - name: swift-api\n              containerPort: {{ .Values.conf.proxy_server.DEFAULT.bind_port }}\n          readinessProbe:\n            httpGet:\n              path: /healthcheck\n              port: {{ .Values.conf.proxy_server.DEFAULT.bind_port }}\n            initialDelaySeconds: 15\n            periodSeconds: 10\n          livenessProbe:\n            httpGet:\n              path: /healthcheck\n              port: {{ .Values.conf.proxy_server.DEFAULT.bind_port }}\n            initialDelaySeconds: 30\n            periodSeconds: 30\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: swift-bin\n              mountPath: /tmp/proxy-start.sh\n              subPath: proxy-start.sh\n              readOnly: true\n            - name: swift-etc\n              mountPath: /etc/swift/swift.conf\n              subPath: swift.conf\n              readOnly: true\n            - name: swift-etc\n              mountPath: /etc/swift/proxy-server.conf\n              subPath: proxy-server.conf\n              readOnly: true\n            - name: swift-etc\n              mountPath: /etc/swift/container-sync-realms.conf\n              subPath: container-sync-realms.conf\n              readOnly: true\n            - name: swift-rings\n              mountPath: /etc/swift/account.ring.gz\n              subPath: account.ring.gz\n              readOnly: true\n            - name: swift-rings\n              mountPath: /etc/swift/container.ring.gz\n              subPath: container.ring.gz\n              readOnly: true\n            - name: swift-rings\n              mountPath: /etc/swift/object.ring.gz\n              subPath: object.ring.gz\n              readOnly: true\n{{- if $mounts_swift_proxy.volumeMounts }}{{ toYaml $mounts_swift_proxy.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: swift-bin\n          configMap:\n            name: swift-bin\n            defaultMode: 0555\n        - name: swift-etc\n          configMap:\n            name: swift-etc\n            defaultMode: 0444\n        - name: swift-rings\n          hostPath:\n            path: /etc/swift\n            type: DirectoryOrCreate\n{{- if $mounts_swift_proxy.volumes }}{{ toYaml $mounts_swift_proxy.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "swift/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "swift/templates/ingress-proxy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_proxy .Values.network.proxy.ingress.public }}\n{{- $envAll := . }}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendService\" \"proxy\" \"backendServiceType\" \"object_store\" \"backendPort\" \"swift-api\" -}}\n{{- $secretName := $envAll.Values.secrets.tls.object_store.api.public -}}\n{{- if and .Values.manifests.certificates $secretName -}}\n{{- $fqdnOverride := index .Values.endpoints.object_store.host_fqdn_override \"default\" | default dict -}}\n{{- if and $fqdnOverride (hasKey $fqdnOverride \"tls\") -}}\n{{- $tlsConfig := index $fqdnOverride \"tls\" | default dict -}}\n{{- if and $tlsConfig (hasKey $tlsConfig \"issuerRef\") -}}\n{{- $issuerRef := index $tlsConfig \"issuerRef\" | default dict -}}\n{{- if and $issuerRef (hasKey $issuerRef \"name\") -}}\n{{- $_ := set $ingressOpts \"certIssuer\" $issuerRef.name -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "swift/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_bootstrap }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"swift-bootstrap\" }}\n{{ tuple $envAll \"bootstrap\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: swift-bootstrap\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"swift\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"swift\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"swift\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n{{- if .Values.pod.tolerations.swift.enabled }}\n{{ tuple $envAll \"swift\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{- end }}\n      initContainers:\n{{ tuple $envAll \"bootstrap\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: swift-bootstrap\n{{ tuple $envAll \"bootstrap\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"swift\" \"container\" \"bootstrap\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/bootstrap.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: swift-bin\n              mountPath: /tmp/bootstrap.sh\n              subPath: bootstrap.sh\n              readOnly: true\n          env:\n{{- with $env := dict \"ksUserSecret\" \"swift-keystone-admin\" \"useCA\" .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: swift-bin\n          configMap:\n            name: swift-bin\n            defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "swift/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- define \"metadata.annotations.job.repo_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\n{{- end }}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"swift\" \"jobAnnotations\" (include \"metadata.annotations.job.repo_sync\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.swift.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "swift/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\n*/}}\n\n{{- define \"metadata.annotations.job.ks_endpoints\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-2\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"swift\" \"serviceTypes\" ( tuple \"object-store\" ) -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_endpoints\" . | fromYaml) }}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "swift/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\n*/}}\n\n{{- define \"metadata.annotations.job.ks_service\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"swift\" \"serviceTypes\" ( tuple \"object-store\" ) -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_service\" . | fromYaml) }}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "swift/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"swift\" -}}\n{{- $_ := set $ksUserJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) }}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "swift/templates/job-ring-builder.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ring_builder }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"swift-ring-builder\" }}\n{{ tuple $envAll \"ring_builder\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: swift-ring-builder\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"swift\" \"ring-builder\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"swift\" \"ring-builder\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n{{ dict \"envAll\" $envAll \"application\" \"swift\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: OnFailure\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n{{- if .Values.pod.tolerations.swift.enabled }}\n{{ tuple $envAll \"swift\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{- end }}\n      initContainers:\n{{ tuple $envAll \"ring_builder\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: ring-builder\n{{ tuple $envAll \"swift_ring_builder\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.ring_builder | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"swift\" \"container\" \"swift_ring_builder\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n            - name: SWIFT_STORAGE_IP\n              valueFrom:\n                fieldRef:\n                  fieldPath: status.hostIP\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/ring-builder.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: swift-bin\n              mountPath: /tmp/ring-builder.sh\n              subPath: ring-builder.sh\n              readOnly: true\n            - name: swift-rings-host\n              mountPath: /etc/swift\n            - name: swift-etc\n              mountPath: /etc/swift/swift.conf\n              subPath: swift.conf\n              readOnly: true\n{{- if .Values.manifests.pvc }}\n            - name: swift-rings\n              mountPath: {{ .Values.ring.shared_storage.mount_path }}\n{{- end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: swift-bin\n          configMap:\n            name: swift-bin\n            defaultMode: 0555\n        - name: swift-etc\n          configMap:\n            name: swift-etc\n            defaultMode: 0444\n        - name: swift-rings-host\n          hostPath:\n            path: /etc/swift\n            type: DirectoryOrCreate\n{{- if .Values.manifests.pvc }}\n        - name: swift-rings\n          persistentVolumeClaim:\n            claimName: {{ .Values.ring.shared_storage.name }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "swift/templates/network_policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.network_policy }}\n{{- $envAll := . }}\n---\napiVersion: networking.k8s.io/v1\nkind: NetworkPolicy\nmetadata:\n  name: swift-network-policy\n  labels:\n{{ tuple $envAll \"swift\" \"proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  podSelector:\n    matchLabels:\n{{ tuple $envAll \"swift\" \"proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  policyTypes:\n    - Ingress\n    - Egress\n  ingress:\n{{ toYaml .Values.network_policy.swift.ingress | indent 4 }}\n  egress:\n{{ toYaml .Values.network_policy.swift.egress | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "swift/templates/pdb-proxy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_proxy }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: swift-proxy-pdb\n  labels:\n{{ tuple $envAll \"swift\" \"proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  minAvailable: {{ .Values.pod.pdb.proxy.minAvailable | default 1 }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"swift\" \"proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "swift/templates/pdb-storage.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_storage }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: swift-storage-pdb\n  labels:\n{{ tuple $envAll \"swift\" \"storage\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  minAvailable: {{ .Values.pod.pdb.storage.minAvailable | default 1 }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"swift\" \"storage\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "swift/templates/pod-test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\n*/}}\n\n{{- if .Values.manifests.pod_test }}\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"swift-test\" }}\n{{ tuple $envAll \"test\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: \"{{ .Release.Name }}-test\"\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n    \"helm.sh/hook\": test-success\n  labels:\n{{ tuple $envAll \"swift\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  serviceAccountName: {{ $serviceAccountName }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n  restartPolicy: Never\n  initContainers:\n{{ tuple $envAll \"test\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n  containers:\n    - name: swift-test\n      image: {{ .Values.images.tags.test }}\n      imagePullPolicy: {{ .Values.images.pull_policy }}\n      command:\n        - /bin/bash\n        - -c\n        - /tmp/swift-test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: swift-bin\n          mountPath: /tmp/swift-test.sh\n          subPath: swift-test.sh\n          readOnly: true\n      env:\n{{- with $env := dict \"ksUserSecret\" \"swift-keystone-user\" \"useCA\" .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SWIFT_TEST_CONTAINER\n          value: \"test-container-{{ randAlphaNum 8 | lower }}\"\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: swift-bin\n      configMap:\n        name: swift-bin\n        defaultMode: 0555\n{{- end }}\n"
  },
  {
    "path": "swift/templates/pvc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{ define \"swift.pvc\" }}\n{{- $name := index . 0 }}\n{{- $size := index . 1 }}\n{{- $storageClassName := index . 2 }}\n---\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: {{ $name }}\nspec:\n  accessModes:\n    - \"ReadWriteMany\"\n  resources:\n    requests:\n      storage: {{ $size }}\n  storageClassName: {{ $storageClassName }}\n{{- end }}\n\n{{- if .Values.manifests.pvc }}\n{{ tuple .Values.ring.shared_storage.name .Values.ring.shared_storage.size .Values.ring.shared_storage.storageClassName | include \"swift.pvc\" }}\n{{- end }}\n"
  },
  {
    "path": "swift/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"swift\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "swift/templates/service-ingress-proxy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_proxy .Values.network.proxy.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"object_store\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "swift/templates/service-proxy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_proxy }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"object_store\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n  labels:\n{{ tuple $envAll \"swift\" \"proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n{{- if .Values.network.proxy.node_port.enabled }}\n  type: NodePort\n{{- if .Values.network.proxy.external_policy_local }}\n  externalTrafficPolicy: Local\n{{- end }}\n{{- else }}\n  type: ClusterIP\n{{- end }}\n  selector:\n{{ tuple $envAll \"swift\" \"proxy\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  ports:\n    - name: swift-api\n      port: {{ .Values.endpoints.object_store.port.api.default }}\n      targetPort: {{ .Values.conf.proxy_server.DEFAULT.bind_port }}\n{{- if .Values.network.proxy.node_port.enabled }}\n      nodePort: {{ .Values.network.proxy.node_port.port }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "swift/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nrelease_group: null\n\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  storage:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/swift:2025.2-ubuntu_noble\n    test: quay.io/airshipit/swift:2025.2-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    swift_proxy: quay.io/airshipit/swift:2025.2-ubuntu_noble\n    swift_account: quay.io/airshipit/swift:2025.2-ubuntu_noble\n    swift_container: quay.io/airshipit/swift:2025.2-ubuntu_noble\n    swift_object: quay.io/airshipit/swift:2025.2-ubuntu_noble\n    swift_storage: quay.io/airshipit/swift:2025.2-ubuntu_noble\n    swift_storage_init: quay.io/airshipit/swift:2025.2-ubuntu_noble\n    swift_ring_builder: quay.io/airshipit/swift:2025.2-ubuntu_noble\n    ks_user: quay.io/airshipit/heat:2025.2-ubuntu_noble\n    ks_service: quay.io/airshipit/heat:2025.2-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/heat:2025.2-ubuntu_noble\n    image_repo_sync: docker.io/docker:17.07.0\n  pull_policy: Always\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\npod:\n  security_context:\n    swift:\n      pod:\n        runAsUser: 0\n      container:\n        swift_proxy:\n          readOnlyRootFilesystem: false\n          allowPrivilegeEscalation: true\n          privileged: true\n        swift_account:\n          readOnlyRootFilesystem: false\n          allowPrivilegeEscalation: true\n          privileged: true\n        swift_container:\n          readOnlyRootFilesystem: false\n          allowPrivilegeEscalation: true\n          privileged: true\n        swift_object:\n          readOnlyRootFilesystem: false\n          allowPrivilegeEscalation: true\n          privileged: true\n        swift_storage_init:\n          readOnlyRootFilesystem: false\n          allowPrivilegeEscalation: true\n          privileged: true\n        swift_ring_builder:\n          readOnlyRootFilesystem: false\n          allowPrivilegeEscalation: true\n          privileged: true\n        swift_ring_copy:\n          readOnlyRootFilesystem: false\n          allowPrivilegeEscalation: true\n          privileged: true\n        bootstrap:\n          readOnlyRootFilesystem: false\n          allowPrivilegeEscalation: false\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n  replicas:\n    # Number of proxy replicas (Deployment)\n    proxy: 3\n    # Note: storage uses DaemonSet, so this is not used.\n    # Storage pods run on all nodes matching the storage node selector.\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n      daemonsets:\n        storage:\n          enabled: true\n          min_ready_seconds: 0\n          max_unavailable: 1\n    termination_grace_period:\n      proxy:\n        timeout: 30\n      storage:\n        timeout: 30\n  resources:\n    enabled: true\n    proxy:\n      requests:\n        memory: \"256Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    storage:\n      requests:\n        memory: \"256Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ring_builder:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n  tolerations:\n    swift:\n      enabled: false\n      tolerations:\n        - key: node-role.kubernetes.io/master\n          operator: Exists\n          effect: NoSchedule\n  mounts:\n    swift_proxy:\n      init_container: null\n      swift_proxy:\n        volumeMounts:\n        volumes:\n    swift_storage:\n      init_container: null\n      swift_storage:\n        volumeMounts:\n        volumes:\n  pdb:\n    proxy:\n      minAvailable: 1\n    storage:\n      minAvailable: 1\n\nnetwork_policy:\n  swift:\n    ingress:\n      - {}\n    egress:\n      - {}\n\nnetwork:\n  proxy:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        nginx.ingress.kubernetes.io/proxy-body-size: \"0\"\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30808\n\nbootstrap:\n  enabled: false\n  script: null\n\n# Ring Configuration\n# partition_power: 2^N partitions (10=1024, 14=16384)\n# replicas: number of data copies (production=3, minimum for HA)\n# min_part_hours: minimum hours between partition moves (production=24)\n# devices: list of storage devices used by Swift\n#   - name: device name (must match mountpoint under /srv/node/)\n#   - weight: relative capacity weight (100 = standard, higher = more data)\n#\n# IMPORTANT: Devices must be pre-mounted before deploying Swift.\n# For production: mount real block devices (e.g., /dev/sdb1 -> /srv/node/sdb1)\n# For development: use loop devices or directories\n#\n# Example production config:\n#   devices:\n#     - name: sdb1\n#       weight: 100\n#     - name: sdc1\n#       weight: 100\n#\nring:\n  partition_power: 10\n  replicas: 3\n  min_part_hours: 24\n  devices: []\n  # Example:\n  # devices:\n  #   - name: sdb1\n  #     weight: 100\n  #   - name: sdb2\n  #     weight: 100\n  shared_storage:\n    name: swift-shared-storage\n    storageClassName: nfs-provisioner\n    size: 1Gi\n    mount_path: \"/etc/swift-rings\"\n\nconf:\n  swift:\n    swift_hash_path_suffix: CHANGE_ME_SUFFIX\n    swift_hash_path_prefix: CHANGE_ME_PREFIX\n    storage_policies:\n      - name: Policy-0\n        index: 0\n        default: \"yes\"\n\n    container_sync_realms:\n      DEFAULT:\n        mtime_check_interval: 300\n\n  proxy_server:\n    DEFAULT:\n      bind_ip: 0.0.0.0\n      bind_port: 8080\n      workers: 2\n      user: swift\n      swift_dir: /etc/swift\n      log_level: INFO\n      log_name: proxy-server\n      log_facility:\n      log_address:\n    loggers:\n      keys: root,swift\n    handlers:\n      keys: console\n    formatters:\n      keys: simple\n    logger_root:\n      level: INFO\n      handlers: console\n    logger_swift:\n      level: INFO\n      handlers: console\n      qualname: swift\n      propagate: 0\n    handler_console:\n      class: StreamHandler\n      level: INFO\n      formatter: simple\n      args: (sys.stdout,)\n    formatter_simple:\n      format: \"%%(asctime)s %%(levelname)s [%%(process)d] %%(name)s: %%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    \"pipeline:main\":\n      pipeline: catch_errors gatekeeper healthcheck proxy-logging cache listing_formats container_sync bulk ratelimit authtoken keystoneauth copy container-quotas account-quotas slo dlo versioned_writes symlink proxy-logging proxy-server\n    \"app:proxy-server\":\n      use: egg:swift#proxy\n      account_autocreate: \"true\"\n      allow_account_management: \"true\"\n    \"filter:authtoken\":\n      paste.filter_factory: keystonemiddleware.auth_token:filter_factory\n      delay_auth_decision: \"True\"\n      www_authenticate_uri: null\n      auth_url: null\n      auth_type: password\n      memcached_servers: null\n      memcache_security_strategy: ENCRYPT\n      memcache_secret_key: null\n      username: swift\n      password: null\n      project_name: service\n      user_domain_name: default\n      project_domain_name: default\n      service_token_roles_required: \"true\"\n    \"filter:keystoneauth\":\n      use: egg:swift#keystoneauth\n      operator_roles: admin,member,swiftoperator\n      reseller_prefix: AUTH_\n      reseller_admin_role: ResellerAdmin\n    \"filter:healthcheck\":\n      use: egg:swift#healthcheck\n    \"filter:cache\":\n      use: egg:swift#memcache\n      memcache_servers: null\n    \"filter:account-quotas\":\n      use: egg:swift#account_quotas\n    \"filter:container-quotas\":\n      use: egg:swift#container_quotas\n    \"filter:proxy-logging\":\n      use: egg:swift#proxy_logging\n    \"filter:bulk\":\n      use: egg:swift#bulk\n    \"filter:slo\":\n      use: egg:swift#slo\n    \"filter:dlo\":\n      use: egg:swift#dlo\n    \"filter:versioned_writes\":\n      use: egg:swift#versioned_writes\n      allow_versioned_writes: \"true\"\n    \"filter:copy\":\n      use: egg:swift#copy\n    \"filter:container_sync\":\n      use: egg:swift#container_sync\n    \"filter:ratelimit\":\n      use: egg:swift#ratelimit\n    \"filter:catch_errors\":\n      use: egg:swift#catch_errors\n    \"filter:gatekeeper\":\n      use: egg:swift#gatekeeper\n    \"filter:listing_formats\":\n      use: egg:swift#listing_formats\n    \"filter:symlink\":\n      use: egg:swift#symlink\n\n  account_server:\n    DEFAULT:\n      bind_ip: 0.0.0.0\n      bind_port: 6202\n      workers: 2\n      user: swift\n      swift_dir: /etc/swift\n      devices: /srv/node\n      mount_check: \"true\"\n      log_level: INFO\n      log_name: account-server\n      log_facility:\n      log_address:\n    loggers:\n      keys: root,swift\n    handlers:\n      keys: console\n    formatters:\n      keys: simple\n    logger_root:\n      level: INFO\n      handlers: console\n    logger_swift:\n      level: INFO\n      handlers: console\n      qualname: swift\n      propagate: 0\n    handler_console:\n      class: StreamHandler\n      level: INFO\n      formatter: simple\n      args: (sys.stdout,)\n    formatter_simple:\n      format: \"%%(asctime)s %%(levelname)s [%%(process)d] %%(name)s: %%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    \"pipeline:main\":\n      pipeline: healthcheck recon account-server\n    \"app:account-server\":\n      use: egg:swift#account\n    \"filter:healthcheck\":\n      use: egg:swift#healthcheck\n    \"filter:recon\":\n      use: egg:swift#recon\n      recon_cache_path: /var/cache/swift\n    \"account-replicator\":\n      concurrency: 2\n    \"account-auditor\": {}\n    \"account-reaper\": {}\n\n  container_server:\n    DEFAULT:\n      bind_ip: 0.0.0.0\n      bind_port: 6201\n      workers: 2\n      user: swift\n      swift_dir: /etc/swift\n      devices: /srv/node\n      mount_check: \"true\"\n      log_level: INFO\n      log_name: container-server\n      log_facility:\n      log_address:\n    loggers:\n      keys: root,swift\n    handlers:\n      keys: console\n    formatters:\n      keys: simple\n    logger_root:\n      level: INFO\n      handlers: console\n    logger_swift:\n      level: INFO\n      handlers: console\n      qualname: swift\n      propagate: 0\n    handler_console:\n      class: StreamHandler\n      level: INFO\n      formatter: simple\n      args: (sys.stdout,)\n    formatter_simple:\n      format: \"%%(asctime)s %%(levelname)s [%%(process)d] %%(name)s: %%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    \"pipeline:main\":\n      pipeline: healthcheck recon container-server\n    \"app:container-server\":\n      use: egg:swift#container\n    \"filter:healthcheck\":\n      use: egg:swift#healthcheck\n    \"filter:recon\":\n      use: egg:swift#recon\n      recon_cache_path: /var/cache/swift\n    \"container-replicator\":\n      concurrency: 2\n    \"container-updater\":\n      concurrency: 2\n    \"container-auditor\": {}\n    \"container-sync\": {}\n\n  object_server:\n    DEFAULT:\n      bind_ip: 0.0.0.0\n      bind_port: 6200\n      workers: 2\n      user: swift\n      swift_dir: /etc/swift\n      devices: /srv/node\n      mount_check: \"true\"\n      log_level: INFO\n      log_name: object-server\n      log_facility:\n      log_address:\n    loggers:\n      keys: root,swift\n    handlers:\n      keys: console\n    formatters:\n      keys: simple\n    logger_root:\n      level: INFO\n      handlers: console\n    logger_swift:\n      level: INFO\n      handlers: console\n      qualname: swift\n      propagate: 0\n    handler_console:\n      class: StreamHandler\n      level: INFO\n      formatter: simple\n      args: (sys.stdout,)\n    formatter_simple:\n      format: \"%%(asctime)s %%(levelname)s [%%(process)d] %%(name)s: %%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    \"pipeline:main\":\n      pipeline: healthcheck recon object-server\n    \"app:object-server\":\n      use: egg:swift#object\n    \"filter:healthcheck\":\n      use: egg:swift#healthcheck\n    \"filter:recon\":\n      use: egg:swift#recon\n      recon_cache_path: /var/cache/swift\n      recon_lock_path: /var/lock\n    \"object-replicator\":\n      concurrency: 2\n    \"object-updater\":\n      concurrency: 2\n    \"object-auditor\": {}\n\n  rsyncd:\n    uid: swift\n    gid: swift\n    log_file: /var/log/rsyncd.log\n    pid_file: /var/run/rsyncd.pid\n    address: 0.0.0.0\n    account:\n      max_connections: 4\n      path: /srv/node/\n      read_only: \"False\"\n      lock_file: /var/lock/account.lock\n    container:\n      max_connections: 4\n      path: /srv/node/\n      read_only: \"False\"\n      lock_file: /var/lock/container.lock\n    object:\n      max_connections: 4\n      path: /srv/node/\n      read_only: \"False\"\n      lock_file: /var/lock/object.lock\n\nsecrets:\n  identity:\n    admin: swift-keystone-admin\n    swift: swift-keystone-user\n  tls:\n    object_store:\n      api:\n        public: swift-tls-public\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - swift-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    ring_builder:\n      jobs: null\n      services: null\n    # storage_init:\n    #   jobs:\n    #     - swift-ring-builder\n    #   services: null\n    storage:\n      # jobs:\n      #   - swift-storage-init\n      services: null\n    proxy:\n      daemonset:\n        - swift-storage\n      # jobs:\n      #   - swift-storage-init\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_cache\n    ks_endpoints:\n      jobs:\n        - swift-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    bootstrap:\n      jobs:\n        - swift-ks-user\n        - swift-ks-endpoints\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: object_store\n    tests:\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: object_store\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      swift:\n        role: admin\n        region_name: RegionOne\n        username: swift\n        password: password\n        project_name: service\n        user_domain_name: default\n        project_domain_name: default\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  object_store:\n    name: swift\n    hosts:\n      default: swift-proxy\n      public: swift\n    host_fqdn_override:\n      default: null\n      # NOTE: This chart supports TLS for FQDN over-ridden public\n      # endpoints using the following format:\n      # public:\n      #   host: swift.example.com\n      #   tls:\n      #     crt: |\n      #       <certificate content>\n      #     key: |\n      #       <key content>\n      #     ca: |\n      #       <ca certificate content>\n    path:\n      default: /v1/AUTH_%(tenant_id)s\n    scheme:\n      default: http\n      # Set to 'https' when TLS is enabled\n      # public: https\n    port:\n      api:\n        default: 8080\n        public: 80\n  oslo_cache:\n    auth:\n      # NOTE(portdirect): this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  deployment_proxy: true\n  daemonset_storage: true\n  job_bootstrap: false\n  job_image_repo_sync: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  job_ring_builder: true\n  pdb_proxy: true\n  pdb_storage: true\n  secret_keystone: true\n  ingress_proxy: true\n  service_ingress_proxy: true\n  service_proxy: true\n  network_policy: false\n  pod_test: false\n  certificates: false\n  pvc: true\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "tacker/.helmignore",
    "content": "# Patterns to ignore when building packages.\n# This supports shell glob matching, relative path matching, and\n# negation (prefixed with !). Only one pattern per line.\n.DS_Store\n# Common VCS dirs\n.git/\n.gitignore\n.bzr/\n.bzrignore\n.hg/\n.hgignore\n.svn/\n# Common backup files\n*.swp\n*.bak\n*.tmp\n*.orig\n*~\n# Various IDEs\n.project\n.idea/\n*.tmproj\n.vscode/\n"
  },
  {
    "path": "tacker/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Tacker\nname: tacker\nversion: 2025.2.0\nhome: https://docs.openstack.org/tacker/latest/\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Tacker/OpenStack_Project_Tacker_vertical.png\nsources:\n  - https://opendev.org/openstack/tacker\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "tacker/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\ntacker-db-manage --config-file /etc/tacker/tacker.conf upgrade head\n"
  },
  {
    "path": "tacker/templates/bin/_tacker-test.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nopenstack secret list\n\nopenstack --help\n\nopenstack vim list\n\nopenstack vnflcm list --os-tacker-api-version 1\n\n"
  },
  {
    "path": "tacker/templates/bin/_tacker_conductor.sh.tpl",
    "content": "#!/bin/bash\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\npip install python-cinderclient\npip install retrying\npip install boto3\napt update\napt install curl -y -f --install-suggests\ncurl -o /tmp/helm.tar.gz https://get.helm.sh/helm-v3.11.2-linux-amd64.tar.gz\ntar zxf /tmp/helm.tar.gz -C /tmp/;mv /tmp/linux-amd64/helm /usr/local/bin/helm\ntacker-conductor --config-file /etc/tacker/tacker.conf\n"
  },
  {
    "path": "tacker/templates/bin/_tacker_server.sh.tpl",
    "content": "#!/bin/bash\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\npip install python-cinderclient\npip install retrying\npip install boto3\napt update\napt install curl -y -f --install-suggests\ncurl -o /tmp/helm.tar.gz https://get.helm.sh/helm-v3.11.2-linux-amd64.tar.gz\ntar zxf /tmp/helm.tar.gz -C /tmp/;mv /tmp/linux-amd64/helm /usr/local/bin/helm\ntacker-server --config-file /etc/tacker/tacker.conf\n"
  },
  {
    "path": "tacker/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\ndata:\n  tacker-server.sh: |\n{{ tuple \"bin/_tacker_server.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  tacker-conductor.sh: |\n{{ tuple \"bin/_tacker_conductor.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  tacker-test.sh: |\n{{ tuple \"bin/_tacker-test.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" .| indent 4 }}\nkind: ConfigMap\nmetadata:\n  name: tacker-bin\n{{- end }}\n"
  },
  {
    "path": "tacker/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.tacker.database.connection)) (empty .Values.conf.tacker.database.connection) -}}\n{{- $connection := tuple \"oslo_db\" \"internal\" \"tacker\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | set .Values.conf.tacker.database \"connection\" -}}\n{{- else -}}\n{{- $_ := set .Values.conf.tacker.database \"connection\" $connection -}}\n{{- end -}}\n{{- end -}}\n\n{{- if empty .Values.conf.tacker.DEFAULT.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"tacker\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.tacker.DEFAULT \"transport_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.tacker.oslo_messaging_notifications.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"tacker\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.tacker.oslo_messaging_notifications \"transport_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.tacker.keystone_authtoken.www_authenticate_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.tacker.keystone_authtoken \"www_authenticate_uri\" -}}\n{{- end -}}\n{{- if empty .Values.conf.tacker.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.tacker.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.tacker.keystone_authtoken.region_name -}}\n{{- $_ := set .Values.conf.tacker.keystone_authtoken \"region_name\" .Values.endpoints.identity.auth.tacker.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.tacker.keystone_authtoken.project_name -}}\n{{- $_ := set .Values.conf.tacker.keystone_authtoken \"project_name\" .Values.endpoints.identity.auth.tacker.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.tacker.keystone_authtoken.project_domain_name -}}\n{{- $_ := set .Values.conf.tacker.keystone_authtoken \"project_domain_name\" .Values.endpoints.identity.auth.tacker.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.tacker.keystone_authtoken.user_domain_name -}}\n{{- $_ := set .Values.conf.tacker.keystone_authtoken \"user_domain_name\" .Values.endpoints.identity.auth.tacker.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.tacker.keystone_authtoken.username -}}\n{{- $_ := set .Values.conf.tacker.keystone_authtoken \"username\" .Values.endpoints.identity.auth.tacker.username -}}\n{{- end -}}\n{{- if empty .Values.conf.tacker.keystone_authtoken.password -}}\n{{- $_ := set .Values.conf.tacker.keystone_authtoken \"password\" .Values.endpoints.identity.auth.tacker.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.tacker.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.tacker.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n{{- if empty .Values.conf.tacker.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.tacker.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if empty .Values.conf.tacker.alarm_auth.url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.tacker.alarm_auth \"url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.tacker.alarm_auth.project_name -}}\n{{- $_ := set .Values.conf.tacker.alarm_auth \"project_name\" .Values.endpoints.identity.auth.tacker.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.tacker.alarm_auth.username -}}\n{{- $_ := set .Values.conf.tacker.alarm_auth \"username\" .Values.endpoints.identity.auth.tacker.username -}}\n{{- end -}}\n{{- if empty .Values.conf.tacker.alarm_auth.password -}}\n{{- $_ := set .Values.conf.tacker.alarm_auth \"password\" .Values.endpoints.identity.auth.tacker.password -}}\n{{- end -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: tacker-etc\n  namespace: openstack\ntype: Opaque\ndata:\n  config.json: {{ toPrettyJson .Values.conf.server | b64enc }}\n  config-conductor.json: {{ toPrettyJson .Values.conf.conductor | b64enc }}\n  tacker.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.tacker | b64enc }}\n  api-paste.ini: {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.paste | b64enc }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.logging | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "tacker/templates/deployment-conductor.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_conductor }}\n{{- $envAll := . }}\n{{- $serviceAccountName := \"tacker-conductor\" }}\n{{ tuple $envAll \"conductor\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: tacker-conductor\n  labels:\n{{ tuple $envAll \"tacker\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"tacker\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  replicas: {{ .Values.pod.replicas.conductor }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"tacker\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      nodeSelector:\n        {{ .Values.labels.conductor.node_selector_key }}: {{ .Values.labels.conductor.node_selector_value }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n        podAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n          - labelSelector:\n              matchExpressions:\n              - key: application\n                operator: In\n                values:\n                - tacker\n              - key: component\n                operator: In\n                values:\n                - server\n            topologyKey: kubernetes.io/hostname\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.conductor.timeout | default \"30\" }}\n      containers:\n        - name: tacker-conductor\n          image: {{ .Values.images.tags.tacker_conductor }}\n          imagePullPolicy: {{ .Values.images.pull_policy }}\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/tacker-conductor.sh\n          volumeMounts:\n          - name: localtime\n            mountPath: \"/etc/localtime\"\n            readOnly: yes\n          - name: tacker-etc\n            mountPath: \"/etc/tacker/config.json\"\n            readOnly: yes\n            subPath: config-conductor.json\n          - name: tacker-etc\n            mountPath: \"/etc/tacker/api-paste.ini\"\n            readOnly: yes\n            subPath: api-paste.ini\n          - name: tacker-etc\n            mountPath: \"/etc/tacker/tacker.conf\"\n            readOnly: yes\n            subPath: tacker.conf\n          - name: tacker-etc\n            mountPath: \"/etc/tacker/logging.conf\"\n            readOnly: yes\n            subPath: logging.conf\n{{- range $key, $volume := $envAll.Values.storage.volumes }}\n          - name: {{ $key | replace \"_\" \"-\" }}\n            mountPath: {{ $volume.mount_path | quote }}\n            readOnly: false\n{{- end }}\n          - name: tacker-conductor-sh\n            mountPath: /tmp/tacker-conductor.sh\n            subPath: tacker-conductor.sh\n            readOnly: true\n          - name: oslo-lock-path\n            mountPath: {{ .Values.conf.tacker.oslo_concurrency.lock_path }}\n          ports:\n          - name: conductor\n            containerPort: 5672\n      initContainers:\n{{ tuple $envAll \"server\" tuple | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      volumes:\n        - name: localtime\n          hostPath:\n            path: \"/etc/localtime\"\n        - name: tacker-etc\n          secret:\n            defaultMode: 292\n            secretName: tacker-etc\n{{- range $key, $volume := $envAll.Values.storage.volumes }}\n        - name: {{ $key | replace \"_\" \"-\" }}\n          persistentVolumeClaim:\n            claimName: {{ $volume.name }}\n{{- end }}\n        - name: tacker-conductor-sh\n          configMap:\n            name: tacker-bin\n            defaultMode: 0555\n        - name: oslo-lock-path\n          emptyDir: {}\n{{- end }}\n"
  },
  {
    "path": "tacker/templates/deployment-server.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_server }}\n{{- $envAll := . }}\n{{- $serviceAccountName := \"tacker-server\" }}\n{{ tuple $envAll \"server\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: tacker-server\n  labels:\n{{ tuple $envAll \"tacker\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  selector:\n    matchLabels:\n{{ tuple $envAll \"tacker\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  replicas: {{ .Values.pod.replicas.server }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"tacker\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      nodeSelector:\n        {{ .Values.labels.server.node_selector_key }}: {{ .Values.labels.server.node_selector_value }}\n      serviceAccountName: tacker-server\n{{ dict \"envAll\" $envAll \"application\" \"server\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.server.timeout | default \"30\" }}\n      containers:\n        - name: tacker-server\n          image: {{ .Values.images.tags.tacker_server }}\n          imagePullPolicy: {{ .Values.images.pull_policy }}\n          command:\n            - /bin/bash\n            - -c\n            - /tmp/tacker-server.sh\n          volumeMounts:\n          - name: localtime\n            mountPath: \"/etc/localtime\"\n            readOnly: yes\n          - name: tacker-etc\n            mountPath: \"/etc/tacker/config.json\"\n            readOnly: yes\n            subPath: config-server.json\n          - name: tacker-etc\n            mountPath: \"/etc/tacker/api-paste.ini\"\n            readOnly: yes\n            subPath: api-paste.ini\n          - name: tacker-etc\n            mountPath: \"/etc/tacker/tacker.conf\"\n            readOnly: yes\n            subPath: tacker.conf\n          - name: tacker-etc\n            mountPath: \"/etc/tacker/logging.conf\"\n            readOnly: yes\n            subPath: logging.conf\n{{- range $key, $volume := $envAll.Values.storage.volumes }}\n          - name: {{ $key | replace \"_\" \"-\" }}\n            mountPath: {{ $volume.mount_path | quote }}\n            readOnly: false\n{{- end }}\n          - name: tacker-server-sh\n            mountPath: /tmp/tacker-server.sh\n            subPath: tacker-server.sh\n            readOnly: true\n          - name: oslo-lock-path\n            mountPath: {{ .Values.conf.tacker.oslo_concurrency.lock_path }}\n          ports:\n          - name: t-api\n            containerPort: 9890\n      initContainers:\n{{ tuple $envAll \"server\" tuple | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      volumes:\n        - name: localtime\n          hostPath:\n            path: \"/etc/localtime\"\n        - name: tacker-etc\n          secret:\n            defaultMode: 292\n            secretName: tacker-etc\n{{- range $key, $volume := $envAll.Values.storage.volumes }}\n        - name: {{ $key | replace \"_\" \"-\" }}\n          persistentVolumeClaim:\n            claimName: {{ $volume.name }}\n{{- end }}\n        - name: tacker-server-sh\n          configMap:\n            name: tacker-bin\n            defaultMode: 0555\n        - name: oslo-lock-path\n          emptyDir: {}\n{{- end }}\n"
  },
  {
    "path": "tacker/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "tacker/templates/ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_api .Values.network.api.ingress.public }}\n{{- $envAll := . }}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendServiceType\" \"nfv_orchestration\" \"backendPort\" \"t-api\" -}}\n{{- $secretName := $envAll.Values.secrets.tls.nfv_orchestration.api.internal -}}\n{{- if and .Values.manifests.certificates $secretName -}}\n{{- $_ := set $ingressOpts \"certIssuer\" .Values.endpoints.nfv_orchestratio.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "tacker/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- if .Values.manifests.job_db_drop }}\n{{- $serviceName := \"tacker\" -}}\n{{- $dbToDrop := dict \"adminSecret\" .Values.secrets.oslo_db.admin \"configFile\" (printf \"/etc/%s/%s.conf\" $serviceName \"tacker\" ) \"logConfigFile\" (printf \"/etc/%s/logging.conf\" $serviceName ) \"configDbSection\" \"database\" \"configDbKey\" \"connection\" -}}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" $serviceName \"dbToDrop\" $dbToDrop -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbToDrop \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- if .Values.pod.tolerations.tacker.enabled -}}\n{{- $_ := set $dbDropJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "tacker/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n\n{{- $serviceName := \"tacker\" -}}\n{{- $dbToInit := dict \"adminSecret\" .Values.secrets.oslo_db.admin \"configFile\" (printf \"/etc/%s/%s.conf\" $serviceName $serviceName ) \"logConfigFile\" (printf \"/etc/%s/logging.conf\" $serviceName ) \"configDbSection\" \"database\" \"configDbKey\" \"connection\" -}}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" $serviceName \"dbToInit\" $dbToInit -}}\n\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbInitJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbInitJob \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.tacker.enabled -}}\n{{- $_ := set $dbInitJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "tacker/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $serviceName := \"tacker\" }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" $serviceName \"podVolMounts\" .Values.pod.mounts.tacker_db_sync.tacker_db_sync.volumeMounts \"podVols\" .Values.pod.mounts.tacker_db_sync.tacker_db_sync.volumes -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbSyncJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.tacker.enabled -}}\n{{- $_ := set $dbSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "tacker/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- define \"metadata.annotations.job.ks_endpoints\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-2\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"tacker\" \"serviceTypes\" ( tuple \"nfv-orchestration\" ) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.nfv_orchestration.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_endpoints\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.tacker.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "tacker/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- define \"metadata.annotations.job.ks_service\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"tacker\" \"serviceTypes\" ( tuple \"nfv-orchestration\" ) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.nfv_orchestration.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_service\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.tacker.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "tacker/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"tacker\" -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksUserJob \"tlsSecret\" .Values.secrets.tls.nfv_orchestration.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksUserJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.tacker.enabled -}}\n{{- $_ := set $ksUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "tacker/templates/job-rabbit-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- define \"metadata.annotations.job.rabbit_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rmqUserJob := dict \"envAll\" . \"serviceName\" \"tacker\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $rmqUserJob \"tlsSecret\" .Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $rmqUserJob \"jobAnnotations\" (include \"metadata.annotations.job.rabbit_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.tacker.enabled -}}\n{{- $_ := set $rmqUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $rmqUserJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "tacker/templates/pod-test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pod_test }}\n{{- $envAll := . }}\n{{- $dependencies := .Values.dependencies.static.tests }}\n\n{{- $mounts_tacker_tests := .Values.pod.mounts.tacker_tests.tacker_tests }}\n{{- $mounts_tacker_tests_init := .Values.pod.mounts.tacker_tests.init_container }}\n\n{{- $serviceAccountName := print .Release.Name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: \"{{.Release.Name}}-test\"\n  labels:\n{{ tuple $envAll \"tacker\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"tacker-test\" \"containerNames\" (list \"init\" \"tacker-test\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n  serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 2 }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.tacker.enabled }}\n{{ tuple $envAll \"tacker\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 2 }}\n{{ end }}\n  restartPolicy: Never\n  initContainers:\n{{ tuple $envAll \"tests\" $mounts_tacker_tests_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n  containers:\n    - name: tacker-test\n{{ tuple $envAll \"scripted_test\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" \"container\" \"tacker_test\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n      command:\n        - /tmp/tacker-test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: tacker-bin\n          mountPath: /tmp/tacker-test.sh\n          subPath: tacker-test.sh\n          readOnly: true\n{{ if $mounts_tacker_tests.volumeMounts }}{{ toYaml $mounts_tacker_tests.volumeMounts | indent 8 }}{{ end }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: tacker-bin\n      configMap:\n        name: tacker-bin\n        defaultMode: 0555\n{{ if $mounts_tacker_tests.volumes }}{{ toYaml $mounts_tacker_tests.volumes | indent 4 }}{{ end }}\n{{- end }}\n\n"
  },
  {
    "path": "tacker/templates/pvc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{ define \"tacker.pvc\" }}\n{{- $name := index . 0 }}\n{{- $size := index . 1 }}\n{{- $storageClass := index . 2 }}\n---\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: {{ $name }}\nspec:\n  accessModes:\n    - \"ReadWriteMany\"\n  resources:\n    requests:\n      storage: {{ $size }}\n  storageClassName: {{ $storageClass }}\n{{- end }}\n\n{{- if .Values.manifests.pvc }}\n{{- $storageClass := .Values.storage.storageClass }}\n{{- range .Values.storage.volumes }}\n{{ tuple .name .size $storageClass | include \"tacker.pvc\" }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "tacker/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"tacker\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n{{- $connection := tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  DB_CONNECTION: {{  $connection | b64enc -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "tacker/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"tacker\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "tacker/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- $rabbitmqProtocol := \"http\" }}\n{{- if $envAll.Values.manifests.certificates }}\n{{- $rabbitmqProtocol = \"https\" }}\n{{- end }}\n{{- range $key1, $userClass := tuple \"admin\" \"tacker\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_messaging\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass $rabbitmqProtocol $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "tacker/templates/service-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- if .Values.manifests.service_api }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"nfv_orchestration\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: t-api\n      port: {{ tuple \"nfv_orchestration\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.api.node_port.enabled }}\n      nodePort: {{ .Values.network.api.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"tacker\" \"server\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.api.node_port.enabled }}\n  type: NodePort\n  {{ if .Values.network.api.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{ end }}\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "tacker/templates/service-conductor.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n{{- if .Values.manifests.service_conductor }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"nfv_orchestration\" \"conductor\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: conductor\n      port: {{ tuple \"nfv_orchestration\" \"internal\" \"conductor\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.conductor.node_port.enabled }}\n      nodePort: {{ .Values.network.conductor.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"tacker\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.conductor.node_port.enabled }}\n  type: NodePort\n  {{ if .Values.network.conductor.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{ end }}\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "tacker/templates/service-ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_api .Values.network.api.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"nfv_orchestration\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "tacker/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for tacker.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nlabels:\n  server:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  conductor:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  pull_policy: IfNotPresent\n  tags:\n    tacker_server: quay.io/airshipit/tacker:2025.1-ubuntu_noble\n    tacker_conductor: quay.io/airshipit/tacker:2025.1-ubuntu_noble\n    scripted_test: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    tacker_db_sync: quay.io/airshipit/tacker:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\ndependencies:\n  static:\n    server:\n      jobs:\n        - tacker-db-sync\n        - tacker-ks-user\n        - tacker-ks-endpoints\n        - tacker-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: key_manager\n    conductor:\n      jobs:\n        - tacker-db-sync\n        - tacker-ks-user\n        - tacker-ks-endpoints\n        - tacker-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: key_manager\n    tests:\n      services:\n        - endpoint: internal\n          service: identity\n    db_drop:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - tacker-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    ks_endpoints:\n      jobs:\n        - tacker-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n\npod:\n  security_context:\n    server:\n      pod:\n        runAsUser: 0\n        runAsNonRoot: false\n    conductor:\n      pod:\n        runAsUser: 0\n        runAsNonRoot: false\n    test:\n      pod:\n        runAsUser: 0\n        runAsNonRoot: false\n      container:\n        tacker_test:\n          allowPrivilegeEscalation: false\n          readOnlyRootFilesystem: true\n  lifecycle:\n    termination_grace_period:\n      server:\n        timeout: 30\n      conductor:\n        timeout: 30\n  replicas:\n    conductor: 1\n    server: 1\n  tolerations:\n    tacker:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n        effect: NoSchedule\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n        effect: NoSchedule\n  resources:\n    enabled: false\n    jobs:\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n  mounts:\n    tacker_db_sync:\n      tacker_db_sync:\n        volumeMounts:\n        volumes:\n    tacker_tests:\n      init_container: null\n      tacker_tests:\n        volumeMounts:\n        volumes:\n\nstorage:\n  storageClass: general\n  volumes:\n    csar_files:\n      name: tacker-csar-files\n      size: 2Gi\n      mount_path: \"/var/lib/tacker/csar_files\"\n    vnfpackages:\n      name: tacker-vnfpackages\n      size: 2Gi\n      mount_path: \"/var/lib/tacker/vnfpackages\"\n    logs:\n      name: tacker-logs\n      size: 2Gi\n      mount_path: \"/var/log/openstackhelm/tacker\"\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30900\n  conductor:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30901\n\nsecrets:\n  identity:\n    admin: tacker-keystone-admin\n    tacker: tacker-keystone-user\n  oslo_db:\n    admin: tacker-db-admin\n    tacker: tacker-db-user\n  oslo_messaging:\n    admin: tacker-rabbitmq-admin\n    tacker: tacker-rabbitmq-user\n  oci_image_registry:\n    tacker: tacker-oci-image-registry\n  tls:\n    nfv_orchestration:\n      api:\n        public: tacker-tls-public\n        internal: tacker-tls-internal\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n        secret:\n          tls:\n            internal: mariadb-tls-direct\n      tacker:\n        username: tacker\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /tacker\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      tacker:\n        role: admin\n        region_name: RegionOne\n        username: tacker\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  oslo_messaging:\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n        secret:\n          tls:\n            internal: rabbitmq-tls-direct\n      tacker:\n        username: tacker\n        password: password\n    statefulset:\n      replicas: 2\n      name: rabbitmq-rabbitmq\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /tacker\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n  oslo_cache:\n    auth:\n      # NOTE(portdirect): this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  nfv_orchestration:\n    name: tacker\n    hosts:\n      default: tacker-api\n      conductor: tacker-conductor\n      public: tacker\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      api:\n        default: 9890\n        public: 80\n      conductor:\n        default: 5672\n  key_manager:\n    name: barbican\n    hosts:\n      default: barbican-api\n      public: barbican\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v1\n    scheme:\n      default: http\n    port:\n      api:\n        default: 9311\n        public: 80\n\nconf:\n  tacker:\n    DEFAULT:\n      log_config_append: /etc/tacker/logging.conf\n      debug: false\n      log_dir: /var/log/openstackhelm/tacker\n      api_workers: 5\n      service_plugins: \"nfvo,vnfm\"\n    nfvo:\n      vim_drivers: openstack\n    openstack_vim:\n      stack_retries: 60\n      stack_retry_wait: 10\n    vim_keys:\n      use_barbican: true\n    tacker:\n      monitor_driver: \"ping,http_ping\"\n      alarm_monitor_driver: ceilometer\n    cors:\n      enabled: true\n      allowed_origin: \"*\"\n      max_age: 3600\n      allow_methods: \"GET,POST,PUT,DELETE,PATCH,OPTIONS\"\n      allow_headers: \"Content-Type,Version,Accept,X-Auth-Token\"\n      expose_headers: \"Content-Type,Accept,Cache-Control,Content-Language,X-Subject-Token\"\n    database:\n      connection_recycle_time: 10\n      max_pool_size: 1\n      max_retries: \"-1\"\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    keystone_authtoken:\n      service_type: nfv-orchestration\n      auth_type: password\n      auth_version: v3\n      service_token_roles_required: true\n      cafile: \"\"\n      memcache_security_strategy: ENCRYPT\n    alarm_auth: {}\n    ceilometer:\n      host: tacker-api.openstack.svc.cluster.local\n      port: 9890\n    oslo_messaging_notifications:\n      driver: noop\n    oslo_concurrency:\n      lock_path: /var/lock\n    glance_store:\n      filesystem_store_datadir: /var/lib/tacker/csar_files\n  server:\n    command: \"tacker-server --config-file /etc/tacker/tacker.conf\"\n    config_files:\n      - source: \"/etc/tacker/tacker.conf\"\n        dest: \"/etc/tacker/tacker.conf\"\n        owner: \"tacker\"\n        perm: \"0600\"\n    permissions:\n      - path: \"/var/log/openstackhelm/tacker\"\n        owner: \"tacker:tacker\"\n        recurse: true\n      - path: \"/var/lib/tacker/csar_files\"\n        owner: \"tacker:tacker\"\n  conductor:\n    command: \"tacker-conductor --config-file /etc/tacker/tacker.conf\"\n    config_files:\n      - source: \"/etc/tacker/tacker.conf\"\n        dest: \"/etc/tacker/tacker.conf\"\n        owner: \"tacker\"\n        perm: \"0600\"\n    permissions:\n      - path: \"/var/log/openstackhelm/tacker\"\n        owner: \"tacker:tacker\"\n        recurse: true\n      - path: \"/var/lib/tacker/vnfpackages\"\n        owner: \"tacker:tacker\"\n      - path: \"/var/lib/tacker/csar_files\"\n        owner: \"tacker:tacker\"\n  paste:\n    composite:tacker:\n      use: egg:Paste#urlmap\n      /: tackerversions\n      /v1.0: tackerapi_v1_0\n      /vnfpkgm/v1: vnfpkgmapi_v1\n      /vnflcm: vnflcm_versions\n      /vnflcm/v1: vnflcm_v1\n      /vnflcm/v2: vnflcm_v2\n      /vnffm/v1: vnffm_v1\n      /vnfpm/v2: vnfpm_v2\n      /alert/vnf_instances: prometheus_auto_scaling\n      /alert: prometheus_fm\n      /pm_event: prometheus_pm\n      /server_notification: server_notification\n    composite:tackerapi_v1_0:\n      use: call:tacker.auth:pipeline_factory\n      noauth: cors request_id catch_errors extensions tackerapiapp_v1_0\n      keystone: cors request_id catch_errors authtoken keystonecontext extensions tackerapiapp_v1_0\n    composite:vnfpkgmapi_v1:\n      use: call:tacker.auth:pipeline_factory\n      noauth: cors request_id catch_errors vnfpkgmapp_v1\n      keystone: cors request_id catch_errors authtoken keystonecontext vnfpkgmapp_v1\n    composite:vnflcm_v1:\n      use: call:tacker.auth:pipeline_factory\n      noauth: cors request_id catch_errors vnflcmaapp_v1\n      keystone: cors request_id catch_errors authtoken keystonecontext vnflcmaapp_v1\n    composite:vnflcm_v2:\n      use: call:tacker.auth:pipeline_factory\n      noauth: cors request_id catch_errors vnflcmaapp_v2\n      keystone: cors request_id catch_errors authtoken keystonecontext vnflcmaapp_v2\n    composite:vnfpm_v2:\n      use: call:tacker.auth:pipeline_factory\n      noauth: cors request_id catch_errors vnfpmaapp_v2\n      keystone: cors request_id catch_errors authtoken keystonecontext vnfpmaapp_v2\n    composite:vnflcm_versions:\n      use: call:tacker.auth:pipeline_factory\n      noauth: cors request_id catch_errors vnflcm_api_versions\n      keystone: cors request_id catch_errors authtoken keystonecontext vnflcm_api_versions\n    composite:vnffm_v1:\n      use: call:tacker.auth:pipeline_factory\n      noauth: cors request_id catch_errors vnffmaapp_v1\n      keystone: cors request_id catch_errors authtoken keystonecontext vnffmaapp_v1\n    filter:cors:\n      paste.filter_factory: oslo_middleware.cors:filter_factory\n      oslo_config_project: tacker\n    filter:request_id:\n      paste.filter_factory: oslo_middleware:RequestId.factory\n    filter:catch_errors:\n      paste.filter_factory: oslo_middleware:CatchErrors.factory\n    filter:keystonecontext:\n      paste.filter_factory: tacker.auth:TackerKeystoneContext.factory\n    filter:authtoken:\n      paste.filter_factory: keystonemiddleware.auth_token:filter_factory\n    filter:extensions:\n      paste.filter_factory: tacker.api.extensions:extension_middleware_factory\n    app:tackerversions:\n      paste.app_factory: tacker.api.versions:Versions.factory\n    app:tackerapiapp_v1_0:\n      paste.app_factory: tacker.api.v1.router:APIRouter.factory\n    app:vnfpkgmapp_v1:\n      paste.app_factory: tacker.api.vnfpkgm.v1.router:VnfpkgmAPIRouter.factory\n    app:vnflcmaapp_v1:\n      paste.app_factory: tacker.api.vnflcm.v1.router:VnflcmAPIRouter.factory\n    app:vnflcmaapp_v2:\n      paste.app_factory: tacker.sol_refactored.api.router:VnflcmAPIRouterV2.factory\n    app:vnfpmaapp_v2:\n      paste.app_factory: tacker.sol_refactored.api.router:VnfPmAPIRouterV2.factory\n    app:vnflcm_api_versions:\n      paste.app_factory: tacker.sol_refactored.api.router:VnflcmVersions.factory\n    app:vnffmaapp_v1:\n      paste.app_factory: tacker.sol_refactored.api.router:VnffmAPIRouterV1.factory\n    app:prometheus_auto_scaling:\n      paste.app_factory: tacker.sol_refactored.api.router:AutoScalingRouter.factory\n    app:prometheus_fm:\n      paste.app_factory: tacker.sol_refactored.api.router:FmAlertRouter.factory\n    app:prometheus_pm:\n      paste.app_factory: tacker.sol_refactored.api.router:PmEventRouter.factory\n    app:server_notification:\n      paste.app_factory: tacker.sol_refactored.api.router:ServerNotificationRouter.factory\n  logging:\n    loggers:\n      keys:\n        - root\n        - tacker\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: WARNING\n      handlers: 'null'\n    logger_tacker:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: tacker\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    formatter_default:\n      format: \"%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n\ntls:\n  identity: false\n  oslo_messaging: false\n  oslo_db: false\n\nmanifests:\n  certificates: false\n  configmap_etc: true\n  configmap_bin: true\n  deployment_server: true\n  deployment_conductor: true\n  job_db_init: true\n  job_db_drop: false\n  job_db_sync: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  job_rabbit_init: true\n  pod_test: true\n  pvc: true\n  secret_db: true\n  secret_keystone: true\n  secret_rabbitmq: true\n  service_api: true\n  service_conductor: true\n  ingress_api: true\n  service_ingress_api: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "tempest/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Tempest\nname: tempest\nversion: 2025.2.0\nhome: https://docs.openstack.org/tempest/latest/\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/tempest/OpenStack_Project_tempest_vertical.png\nsources:\n  - https://opendev.org/openstack/tempest\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "tempest/templates/_helpers.tpl",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n# This function helps define blacklist and whitelist files for tempest\n# It ingests a list and creates the appropriate regex files for the whitelist\n# and blacklist\n\n{{- define \"tempest.utils.to_regex_file\" -}}\n{{- range $test_regex := . -}}\n{{ $test_regex }}\n{{ end -}}\n{{- end -}}\n"
  },
  {
    "path": "tempest/templates/bin/_run-tests.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n{{ if .Values.conf.cleanup.enabled }}\ntempest cleanup --init-saved-state\n\nif [ \"true\" == \"{{- .Values.conf.cleanup.force -}}\" ]; then\ntrap \"tempest cleanup; exit\" 1 ERR\nfi\n{{- end }}\n\n{{ .Values.conf.script }}\n\n{{ if .Values.conf.cleanup.enabled }}\ntempest cleanup\n{{- end }}\n"
  },
  {
    "path": "tempest/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: tempest-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  run-tests.sh: |\n{{ tuple \"bin/_run-tests.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "tempest/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n{{- if empty .Values.conf.tempest.auth.admin_username -}}\n{{- $_ := set .Values.conf.tempest.auth \"admin_username\" .Values.endpoints.identity.auth.admin.username -}}\n{{- end -}}\n{{- if empty .Values.conf.tempest.auth.admin_password -}}\n{{- $_ := set .Values.conf.tempest.auth \"admin_password\" .Values.endpoints.identity.auth.admin.password -}}\n{{- end -}}\n{{- if empty .Values.conf.tempest.auth.admin_project_name -}}\n{{- $_ := set .Values.conf.tempest.auth \"admin_project_name\" .Values.endpoints.identity.auth.admin.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.tempest.auth.admin_domain_name -}}\n{{- $_ := set .Values.conf.tempest.auth \"admin_domain_name\" .Values.endpoints.identity.auth.admin.user_domain_name -}}\n{{- end -}}\n\n{{- if empty .Values.conf.tempest.identity.uri_v3 -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.tempest.identity \"uri_v3\" -}}\n{{- end -}}\n{{- if empty .Values.conf.tempest.identity.region -}}\n{{- $_ := set .Values.conf.tempest.identity \"region\" .Values.endpoints.identity.auth.admin.region_name -}}\n{{- end -}}\n\n{{- if .Values.conf.tempest.service_available.heat -}}\n{{- if empty .Values.conf.tempest.heat_plugin.username -}}\n{{- $_ := set .Values.conf.tempest.heat_plugin \"username\" .Values.endpoints.identity.auth.tempest.username -}}\n{{- end -}}\n{{- if empty .Values.conf.tempest.heat_plugin.password -}}\n{{- $_ := set .Values.conf.tempest.heat_plugin \"password\" .Values.endpoints.identity.auth.tempest.password -}}\n{{- end -}}\n{{- if empty .Values.conf.tempest.heat_plugin.project_name -}}\n{{- $_ := set .Values.conf.tempest.heat_plugin \"project_name\" .Values.endpoints.identity.auth.tempest.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.tempest.heat_plugin.admin_username -}}\n{{- $_ := set .Values.conf.tempest.heat_plugin \"admin_username\" .Values.endpoints.identity.auth.admin.username -}}\n{{- end -}}\n{{- if empty .Values.conf.tempest.heat_plugin.admin_password -}}\n{{- $_ := set .Values.conf.tempest.heat_plugin \"admin_password\" .Values.endpoints.identity.auth.admin.password -}}\n{{- end -}}\n{{- if empty .Values.conf.tempest.heat_plugin.admin_project_name -}}\n{{- $_ := set .Values.conf.tempest.heat_plugin \"admin_project_name\" .Values.endpoints.identity.auth.admin.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.tempest.heat_plugin.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.tempest.heat_plugin \"auth_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.tempest.heat_plugin.region -}}\n{{- $_ := set .Values.conf.tempest.heat_plugin \"region\" .Values.endpoints.identity.auth.admin.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.tempest.heat_plugin.project_domain_name -}}\n{{- $_ := set .Values.conf.tempest.heat_plugin \"project_domain_name\" .Values.endpoints.identity.auth.tempest.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.tempest.heat_plugin.user_domain_name -}}\n{{- $_ := set .Values.conf.tempest.heat_plugin \"user_domain_name\" .Values.endpoints.identity.auth.tempest.user_domain_name -}}\n{{- end -}}\n{{- end -}}\n\n{{- if empty .Values.conf.tempest.dashboard.dashboard_url -}}\n{{- $endpointScheme := tuple \"dashboard\" \"public\" \"web\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" }}\n{{- $endpointHost := tuple \"dashboard\" \"public\" . | include \"helm-toolkit.endpoints.endpoint_host_lookup\" }}\n{{- $endpointPort := tuple \"dashboard\" \"public\" \"web\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $endpointPath := tuple \"dashboard\" \"public\" \"web\" . | include \"helm-toolkit.endpoints.keystone_endpoint_path_lookup\" }}\n{{/*\nWhen CSRF protection is enabled Refferer and Host header should match.\nCommon browsers doesn't add default ports like 80 and 443 to the headers\nUse the same logic here to make sure test passed when CSRF protection is enabled and\nwe using default port numbers. More info may be found here:\n* https://code.djangoproject.com/ticket/26037\n* https://stackoverflow.com/questions/27533011/django-csrf-error-casused-by-nginx-x-forwarded-host\n*/}}\n    {{- if eq $endpointPort \"80\" \"443\" }}\n        {{- $_ := set .Values.conf.tempest.dashboard \"dashboard_url\" (printf \"%s://%s%s\" $endpointScheme $endpointHost $endpointPath) }}\n    {{- else }}\n        {{- $_ := set .Values.conf.tempest.dashboard \"dashboard_url\" (printf \"%s://%s:%s%s\" $endpointScheme $endpointHost $endpointPort $endpointPath)  }}\n    {{- end }}\n{{- end }}\n\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: tempest-etc\ntype: Opaque\ndata:\n  tempest.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.tempest | b64enc }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.tempest_logging | b64enc }}\n{{- if not (empty .Values.conf.blacklist) }}\n  test-blacklist: {{ include \"tempest.utils.to_regex_file\" .Values.conf.blacklist | b64enc }}\n{{- end }}\n{{- if not (empty .Values.conf.whitelist) }}\n  test-whitelist: {{ include \"tempest.utils.to_regex_file\" .Values.conf.whitelist | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "tempest/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "tempest/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"tempest\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "tempest/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"tempest\" -}}\n\n{{- $_ := set $ksUserJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksUserJob \"tlsSecret\" .Values.secrets.tls.identity.api.internal -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "tempest/templates/job-run-tests.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_run_tests }}\n\n{{- $envAll := . }}\n\n{{- $serviceAccountName := \"tempest-run-tests\" }}\n{{ tuple $envAll \"run_tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: {{ .Release.Name }}-run-tests\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n    helm.sh/hook: post-install,post-upgrade\nspec:\n  backoffLimit: {{ .Values.jobs.run_tests.backoffLimit }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"tempest\" \"run-tests\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      restartPolicy: {{ .Values.jobs.run_tests.restartPolicy }}\n      nodeSelector:\n        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"run_tests\" list | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n        - name: tempest-run-tests-init\n{{ tuple $envAll \"tempest_run_tests\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.run_tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          securityContext:\n            runAsUser: 0\n          command:\n            - chown\n            - -R\n            - \"root:\"\n            - /var/lib/tempest/data\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: tempest-reports\n              mountPath: /var/lib/tempest/data\n      containers:\n        - name: tempest-run-tests\n{{ tuple $envAll \"tempest_run_tests\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.run_tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          command:\n            - /tmp/run-tests.sh\n          env:\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/tempest/certs/ca.crt\"\n{{- end }}\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: tempest-bin\n              mountPath: /tmp/run-tests.sh\n              subPath: run-tests.sh\n              readOnly: true\n            - name: etctempest\n              mountPath: /etc/tempest\n            - name: tempest-etc\n              mountPath: /etc/tempest/tempest.conf\n              subPath: tempest.conf\n              readOnly: true\n            - name: tempest-etc\n              mountPath: /etc/tempest/logging.conf\n              subPath: logging.conf\n              readOnly: true\n{{ if not (empty .Values.conf.blacklist) }}\n            - name: tempest-etc\n              mountPath: /etc/tempest/test-blacklist\n              subPath: test-blacklist\n              readOnly: true\n{{- end }}\n{{ if not (empty .Values.conf.whitelist) }}\n            - name: tempest-etc\n              mountPath: /etc/tempest/test-whitelist\n              subPath: test-whitelist\n              readOnly: true\n{{- end }}\n            - name: tempest-reports\n              mountPath: /var/lib/tempest/data\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.identity.api.internal \"path\" \"/etc/tempest/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: etctempest\n          emptyDir: {}\n        - name: tempest-etc\n          secret:\n            secretName: tempest-etc\n            defaultMode: 0444\n        - name: tempest-bin\n          configMap:\n            name: tempest-bin\n            defaultMode: 0555\n        - name: tempest-reports\n        {{- if not .Values.pvc.enabled }}\n          emptyDir: {}\n        {{- else }}\n          persistentVolumeClaim:\n            claimName: {{ .Values.pvc.name }}\n        {{- end }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.identity.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "tempest/templates/pvc-tempest.yaml",
    "content": "# {{/*\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#    http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# */}}\n\n{{- if .Values.pvc.enabled }}\n\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: {{ .Values.pvc.name }}\nspec:\n  accessModes:\n    - ReadWriteOnce\n  resources:\n    requests:\n      storage: {{ .Values.pvc.requests.storage }}\n  storageClassName: {{ .Values.pvc.storage_class }}\n{{- end }}\n"
  },
  {
    "path": "tempest/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"tempest\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "tempest/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "tempest/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for tempest.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nlabels:\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nimages:\n  tags:\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    tempest_run_tests: quay.io/airshipit/tempest:latest-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    image_repo_sync: docker.io/docker:17.07.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\njobs:\n  run_tests:\n    backoffLimit: 6\n    restartPolicy: OnFailure\n\npod:\n  user:\n    tempest:\n      uid: 1000\n  resources:\n    enabled: false\n    jobs:\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      run_tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - tempest-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    ks_user:\n      services:\n      - service: identity\n        endpoint: internal\n    run_tests:\n      jobs:\n      - tempest-ks-user\n      services:\n      - service: identity\n        endpoint: internal\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\nconf:\n  script: |\n    tempest run --config-file /etc/tempest/tempest.conf -w 4 --smoke\n  # The following sections can be used to blacklist and whitelist specific tests.\n  # If either section is not empty, it will be used to create an entry in the\n  # tempest-etc configmap and will be mounted into the tempest-run-tests pod\n  # blacklist:\n  #   - (?:tempest\\.api\\.identity\\.admin\\.v3\\.test_groups\\.GroupsV3TestJSON\\.test_list_groups)\n  #   - (?:tempest\\.api\\.image\\.v2\\.test_images\\.ListSharedImagesTest\\.test_list_images_param_member_status)\n  #   - (?:tempest\\.scenario\\.test_encrypted_cinder_volumes\\.TestEncryptedCinderVolumes\\.test_encrypted_cinder_volumes_cryptsetup)\n  #   - (?:tempest\\.scenario\\.test_encrypted_cinder_volumes\\.TestEncryptedCinderVolumes\\.test_encrypted_cinder_volumes_luks)\n  #   - (?:tempest\\.api\\.network\\.test_networks\\.NetworksIpV6Test\\.test_external_network_visibility)\n  #   - (?:tempest\\.api\\.network\\.test_networks\\.NetworksTest\\.test_external_network_visibility)\n  #   - (?:tempest\\.scenario\\.test_network_v6\\.TestGettingAddress\\.test_dualnet_multi_prefix_slaac)\n  #   - (?:tempest\\.scenario\\.test_network_v6\\.TestGettingAddress\\.test_dualnet_multi_prefix_dhcpv6_stateless)\n  #   - (?:tempest\\.scenario\\.test_network_basic_ops\\.TestNetworkBasicOps\\.test_update_router_admin_state)\n  #   - (?:tempest\\.scenario\\.test_network_basic_ops\\.TestNetworkBasicOps\\.test_router_rescheduling)\n  #   - (?:tempest\\.scenario\\.test_network_basic_ops\\.TestNetworkBasicOps\\.test_update_instance_port_admin_state)\n  # whitelist:\n  #   - (?:tempest\\.api\\.identity\\.admin\\.v3\\.test_groups\\.GroupsV3TestJSON\\.test_list_groups)\n  #   - (?:tempest\\.api\\.image\\.v2\\.test_images\\.ListSharedImagesTest\\.test_list_images_param_member_status)\n  #   - (?:tempest\\.scenario\\.test_encrypted_cinder_volumes\\.TestEncryptedCinderVolumes\\.test_encrypted_cinder_volumes_cryptsetup)\n  #   - (?:tempest\\.scenario\\.test_encrypted_cinder_volumes\\.TestEncryptedCinderVolumes\\.test_encrypted_cinder_volumes_luks)\n  #   - (?:tempest\\.api\\.network\\.test_networks\\.NetworksIpV6Test\\.test_external_network_visibility)\n  #   - (?:tempest\\.api\\.network\\.test_networks\\.NetworksTest\\.test_external_network_visibility)\n  #   - (?:tempest\\.scenario\\.test_network_v6\\.TestGettingAddress\\.test_dualnet_multi_prefix_slaac)\n  #   - (?:tempest\\.scenario\\.test_network_v6\\.TestGettingAddress\\.test_dualnet_multi_prefix_dhcpv6_stateless)\n  #   - (?:tempest\\.scenario\\.test_network_basic_ops\\.TestNetworkBasicOps\\.test_update_router_admin_state)\n  #   - (?:tempest\\.scenario\\.test_network_basic_ops\\.TestNetworkBasicOps\\.test_router_rescheduling)\n  #   - (?:tempest\\.scenario\\.test_network_basic_ops\\.TestNetworkBasicOps\\.test_update_instance_port_admin_state)\n  tempest:\n    auth:\n      # admin_username value set by configmap-etc\n      admin_username: null\n      # admin_password value set by configmap-etc\n      admin_password: null\n      # admin_project_name value set by configmap-etc\n      admin_project_name: null\n      # admin_domain_name value set by configmap-etc\n      admin_domain_name: null\n      use_dynamic_credentials: true\n    dashboard: {}\n    heat_plugin:\n      # Username to use for non admin API requests\n      username: null\n      # Non admin API key to use when authenticating.\n      password: null\n      project_name: null\n      # Username to use for admin API requests\n      admin_username: null\n      # Admin API key to use when authentication\n      admin_password: null\n      # Admin project name to use for admin API requests\n      admin_project_name: null\n      auth_version: 3\n      auth_url: null\n      user_domain_name: null\n      project_domain_name: null\n      region: null\n    identity:\n      admin_domain_scope: false\n      auth_version: v3\n      # region value set by configmap-etc\n      region: null\n      # uri_v3 value set by configmap-etc\n      uri_v3: null\n    identity-feature-enabled:\n      api_v3: true\n      # this value should be the same as the keystone chart conf.keystone.identity.domain_specific_drivers_enabled\n      domain_specific_drivers: true\n    image:\n      http_image: \"http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img\"\n      container_formats: bare\n      disk_formats: raw\n    network:\n      project_networks_reachable: false\n      shared_physical_network: true\n    network-feature-enabled:\n      floating_ips: true\n      api_extensions:\n        - default-subnetpools\n        - network-ip-availability\n        - network_availability_zone\n        - auto-allocated-topology\n        - ext-gw-mode\n        - binding\n        - agent\n        - subnet_allocation\n        - l3_agent_scheduler\n        - tag\n        - external-net\n        - flavors\n        - net-mtu\n        - availability_zone\n        - quotas\n        - l3-ha\n        - provider\n        - multi-provider\n        - address-scope\n        - extraroute\n        - subnet-service-types\n        - standard-attr-timestamp\n        - service-type\n        - l3-flavors\n        - port-security\n        - extra_dhcp_opt\n        - standard-attr-revisions\n        - pagination\n        - sorting\n        - security-group\n        - dhcp_agent_scheduler\n        - router_availability_zone\n        - rbac-policies\n        - standard-attr-description\n        - router\n        - allowed-address-pairs\n        - project-id\n        - dvr\n    service_available:\n      cinder: true\n      glance: true\n      # The following services are marked as unavailable by default. The default\n      # tempest image used includes a bug resulting in failed network tests that\n      # wasn't fixed in newton. Swift is disabled by default as the swift chart\n      # isn't complete\n      heat: false\n      neutron: false\n      nova: false\n      swift: false\n    validation:\n      connect_method: floating\n    volume:\n      disk_formats: raw\n      backend_names: rbd1\n      storage_protocol: rbd\n      catalog_type: volumev3\n  cleanup:\n    force: false\n    enabled: true\n  tempest_logging:\n    loggers:\n      keys:\n        - root\n        - tempest\n    handlers:\n      keys:\n        - stdout\n        - \"null\"\n    formatters:\n      keys:\n        - tests\n        - default\n    logger_root:\n      level: DEBUG\n      handlers:\n        - 'null'\n    logger_tempest:\n      level: WARN\n      propagate: 0\n      handlers:\n        - stdout\n      qualname: tempest\n    handler_stdout:\n      class: StreamHandler\n      level: WARN\n      args: (sys.stdout,)\n      formatter: tests\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    formatter_tests:\n      class: oslo_log.formatters.ContextFormatter\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    formatter_default:\n      format: \"%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n\n\npvc:\n  enabled: true\n  name: pvc-tempest\n  requests:\n    storage: 2Gi\n  storage_class: general\n\nsecrets:\n  identity:\n    admin: tempest-keystone-admin\n    tempest: tempest-keystone-user\n  oci_image_registry:\n    tempest: tempest-oci-image-registry\n  tls:\n    identity:\n      api:\n        public: keystone-tls-public\n        internal: keystone-tls-api\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      tempest:\n        username: tempest\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      tempest:\n        role: admin\n        region_name: RegionOne\n        username: tempest\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  dashboard:\n    name: horizon\n    hosts:\n      default: horizon-int\n      public: horizon\n    host_fqdn_override:\n      default: null\n      # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public\n      # endpoints using the following format:\n      # public:\n      #   host: null\n      #   tls:\n      #     crt: null\n      #     key: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      web:\n        default: 80\ntls:\n  identity: false\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  job_image_repo_sync: true\n  job_ks_user: true\n  job_run_tests: true\n  secret_keystone: true\n  secret_registry: true\n\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "tests/dns-test.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: dns-test\n  namespace: kube-system\nspec:\n  containers:\n  - image: busybox\n    command:\n      - sleep\n      - \"3600\"\n    imagePullPolicy: IfNotPresent\n    name: busybox\n  restartPolicy: Always\n...\n"
  },
  {
    "path": "tests/pvc-test.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  labels:\n    version: v0.1.0\n    test: ceph\n  name: ceph-test-job\nspec:\n  template:\n    spec:\n      restartPolicy: OnFailure\n      containers:\n        - name: test\n          image: docker.io/alpine:latest\n          imagePullPolicy: Always\n          command:\n            - /bin/sh\n            - -ec\n            - |\n              echo \"Ceph PVC Mount Test Passed\"\n          volumeMounts:\n            - name: ceph-mount\n              mountPath: /mnt/ceph\n      volumes:\n        - name: ceph-mount\n          persistentVolumeClaim:\n            claimName: ceph-test\n...\n---\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: ceph-test\nspec:\n  storageClassName: general\n  accessModes: [\"ReadWriteOnce\"]\n  resources:\n    requests:\n      storage: 1Gi\n...\n"
  },
  {
    "path": "tools/changelog.py",
    "content": "import argparse\nimport os.path\nfrom collections import defaultdict\n\nfrom reno import config\nfrom reno import loader\n\n\nBEFORE_2024_2_0_NOTE = \"\"\"Before 2024.2.0 all the OpenStack-Helm charts were versioned independently.\nHere we provide all the release notes for the chart for all versions before 2024.2.0.\n\"\"\"\n\ndef _indent_for_list(text, prefix='  '):\n    lines = text.splitlines()\n    return '\\n'.join([lines[0]] + [\n        prefix + l\n        for l in lines[1:]\n    ])\n\n\ndef chart_reports(loader, config, versions_to_include, title=None, charts=None):\n    reports = defaultdict(list)\n\n    file_contents = {}\n    for version in versions_to_include:\n        for filename, sha in loader[version]:\n            body = loader.parse_note_file(filename, sha)\n            file_contents[filename] = body\n\n    for chart in charts:\n        if title:\n            reports[chart].append(f\"# {title}\")\n            reports[chart].append('')\n\n        for version in versions_to_include:\n            if '-' in version:\n                version_title = config.unreleased_version_title or version\n            else:\n                version_title = version\n\n            reports[chart].append(f\"## {version_title}\")\n            reports[chart].append('')\n\n            if version == \"2024.2.0\":\n                reports[chart].append(BEFORE_2024_2_0_NOTE)\n\n            if config.add_release_date:\n                reports[chart].append('Release Date: ' + loader.get_version_date(version))\n                reports[chart].append('')\n\n            notefiles = loader[version]\n\n            # Prepare not named section\n            # 1. Get all files named <chart>*.yaml\n            #    and get <chart> section from all these files\n            # 2. Get all files named common*.yaml and get <chart>\n            #    section from all these files\n            is_content = False\n            for fn, sha in notefiles:\n                if os.path.basename(fn).startswith(chart) or \\\n                        os.path.basename(fn).startswith(\"common\"):\n                    notes = file_contents[fn].get(chart, [])\n                    for n in notes:\n                        is_content = True\n                        reports[chart].append(f\"- {_indent_for_list(n)}\")\n\n            # Add new line after unnamed section if it is not empty\n            if is_content:\n                reports[chart].append(\"\")\n\n            # Prepare named sections\n            # 1. Get all files named <chart>*.yaml\n            #    and get all sections from all these files except <chart>\n            # 2. Get all files named common*.yaml\n            #    and get all sections from all these files except <chart>\n            for section in config.sections:\n                is_content = False\n\n                # Skip chart specific sections\n                if section.name not in [\"features\", \"isseus\", \"upgrade\", \"api\", \"security\", \"fixes\"]:\n                    continue\n\n                for fn, sha in notefiles:\n                    if os.path.basename(fn).startswith(chart) or \\\n                            os.path.basename(fn).startswith(\"common\"):\n\n                        notes = file_contents[fn].get(section.name, [])\n\n                        if notes and not is_content:\n                            reports[chart].append(f\"### {section.title}\")\n                            reports[chart].append(\"\")\n\n                        if notes:\n                            is_content = True\n                            for n in notes:\n                                reports[chart].append(f\"- {_indent_for_list(n)}\")\n\n                # Add new line after the section if it is not empty\n                if is_content:\n                    reports[chart].append(\"\")\n\n        report = reports[chart]\n        reports[chart] = '\\n'.join(report)\n\n    return reports\n\n\ndef main():\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"--charts\", nargs=\"+\", default=[], help=\"Charts to generate release notes for\")\n    args = parser.parse_args()\n\n    conf = config.Config(\".\", \"releasenotes\")\n\n    with loader.Loader(conf) as ldr:\n        versions = ldr.versions\n        reports = chart_reports(\n            ldr,\n            conf,\n            versions,\n            title=\"Release notes\",\n            charts=args.charts,\n        )\n\n    for chart in reports:\n        with open(f\"{chart}/CHANGELOG.md\", \"w\") as f:\n            f.write(reports[chart])\n    return\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "tools/chart_version.sh",
    "content": "#!/bin/bash\n\nset -euo pipefail\n\nif [[ $# -lt 2 ]]; then\n    echo \"Usage: $0 <chart_dir> <base_version>\"\n    echo \"  <chart_dir> - The chart directory.\"\n    echo \"  <base_version> - The base version must be <major>.<minor>.<patch>\"\n    echo \"                   For example 2024.2.0\"\n    echo \"                   Will be modified to 2024.2.<patch>+<commit_sha>\"\n    echo \"                   where <patch> is the number of commits since the tag\"\n    echo \"                   equal to <base_version>. If no such tag exists,\"\n    echo \"                   <patch> will be taken from <base_version>.\"\n    exit 1\nfi\n\nCHART_DIR=$1\nBASE_VERSION=$2\nMAJOR=$(echo \"$BASE_VERSION\" | cut -d. -f1)\nMINOR=$(echo \"$BASE_VERSION\" | cut -d. -f2)\nPATCH=$(echo \"$BASE_VERSION\" | cut -d. -f3)\n\nif git show-ref --tags \"$BASE_VERSION\" --quiet; then\n    # if there is tag $BASE_VERSION, then we count the number of commits since the tag\n    PATCH=$(git log --oneline \"${BASE_VERSION}..\" \"$CHART_DIR\" | wc -l | xargs)\nfi\n\nCOMMIT_SHA=$(git rev-parse --short HEAD)\necho \"${MAJOR}.${MINOR}.${PATCH}+${COMMIT_SHA}\"\n"
  },
  {
    "path": "tools/debug_sleep.sh",
    "content": "#!/bin/bash\n\nsleep 86400\n"
  },
  {
    "path": "tools/deployment/baremetal/005-setup-nodes.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n#NOTE: We only want to run ceph and control plane components on the primary node\nfor LABEL in openstack-control-plane ceph-osd ceph-mon ceph-mds ceph-rgw ceph-mgr; do\n  kubectl label nodes ${LABEL}- --all --overwrite\n  PRIMARY_NODE=\"$(kubectl get nodes -l openstack-helm-node-class=primary -o name | awk -F '/' '{ print $NF; exit }')\"\n  kubectl label node \"${PRIMARY_NODE}\" ${LABEL}=enabled\ndone\n\n#NOTE: Build charts\nmake all\n\n#NOTE: Deploy libvirt with vbmc then define domains to use as baremetal nodes\n: \"${OSH_PATH:=\"../openstack-helm\"}\"\nmake -C ${OSH_PATH} libvirt\nhelm install ${OSH_PATH}/libvirt \\\n  --namespace=libvirt \\\n  --name=libvirt \\\n  --set network.backend=null \\\n  --set conf.ceph.enabled=false \\\n  --set images.tags.libvirt=docker.io/openstackhelm/vbmc:centos-0.1\n\n#NOTE: Wait for deploy\nsleep 5 #NOTE(portdirect): work around k8s not immedately assigning pods to nodes\nhelm osh wait-for-pods libvirt\n\n#NOTE: Create domains and start vbmc for ironic to manage as baremetal nodes\nLIBVIRT_PODS=$(kubectl get --namespace libvirt pods \\\n  -l application=libvirt,component=libvirt \\\n  --no-headers -o name | awk -F '/' '{ print $NF }')\nrm -f /tmp/bm-hosts.txt || true\nfor LIBVIRT_POD in ${LIBVIRT_PODS}; do\n  TEMPLATE_MAC_ADDR=\"00:01:DE:AD:BE:EF\"\n  MAC_ADDR=$(printf '00:01:DE:%02X:%02X:%02X\\n' $((RANDOM%256)) $((RANDOM%256)) $((RANDOM%256)))\n  LIBVIRT_POD_NODE=$(kubectl get -n libvirt pod \"${LIBVIRT_POD}\" -o json | jq -r '.spec.nodeName')\n  LIBVIRT_NODE_IP=$(kubectl get node \"${LIBVIRT_POD_NODE}\" -o json |  jq -r '.status.addresses[] | select(.type==\"InternalIP\").address')\n  kubectl exec -n libvirt \"${LIBVIRT_POD}\" -- mkdir -p /var/lib/libvirt/images\n  kubectl exec -n libvirt \"${LIBVIRT_POD}\" -- rm -f /var/lib/libvirt/images/vm-1.qcow2 || true\n  kubectl exec -n libvirt \"${LIBVIRT_POD}\" -- qemu-img create -f qcow2 /var/lib/libvirt/images/vm-1.qcow2 5G\n  kubectl exec -n libvirt \"${LIBVIRT_POD}\" -- chown -R qemu: /var/lib/libvirt/images/vm-1.qcow2\n  VM_DEF=\"$(sed \"s|${TEMPLATE_MAC_ADDR}|${MAC_ADDR}|g\" ./tools/deployment/baremetal/fake-baremetal-1.xml | base64 -w0)\"\n  kubectl exec -n libvirt \"${LIBVIRT_POD}\" -- sh -c \"echo ${VM_DEF} | base64 -d > /tmp/fake-baremetal-1.xml\"\n  kubectl exec -n libvirt \"${LIBVIRT_POD}\" -- sh -c \"virsh undefine fake-baremetal-1 || true\"\n  kubectl exec -n libvirt \"${LIBVIRT_POD}\" -- virsh define /tmp/fake-baremetal-1.xml\n  kubectl exec -n libvirt \"${LIBVIRT_POD}\" -- sh -c \"vbmc delete fake-baremetal-1 || true\"\n  kubectl exec -n libvirt \"${LIBVIRT_POD}\" -- vbmc add fake-baremetal-1 --address \"${LIBVIRT_NODE_IP}\"\n  kubectl exec -n libvirt \"${LIBVIRT_POD}\" -- sh -c \"nohup vbmc start fake-baremetal-1 &>/dev/null &\"\n  kubectl exec -n libvirt \"${LIBVIRT_POD}\" -- virsh list --all\n  kubectl exec -n libvirt \"${LIBVIRT_POD}\" -- vbmc show fake-baremetal-1\n  echo \"${LIBVIRT_NODE_IP} ${MAC_ADDR}\" >> /tmp/bm-hosts.txt\ndone\n\n#NOTE: Deploy OvS to connect nodes to the deployment host\n: ${OSH_PATH:=\"../openstack-helm\"}\nmake -C ${OSH_PATH} openvswitch\n\nhelm install ${OSH_PATH}/openvswitch \\\n  --namespace=openstack \\\n  --name=openvswitch\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n\n#NOTE: Setup GRE tunnels between deployment node and libvirt hosts\nOSH_IRONIC_PXE_DEV=\"${OSH_IRONIC_PXE_DEV:=\"ironic-pxe\"}\"\nOSH_IRONIC_PXE_ADDR=\"${OSH_IRONIC_PXE_ADDR:=\"172.24.6.1/24\"}\"\nMASTER_IP=$(kubectl get node \"$(hostname -f)\" -o json |  jq -r '.status.addresses[] | select(.type==\"InternalIP\").address')\nNODE_IPS=$(kubectl get nodes -o json | jq -r '.items[].status.addresses[] | select(.type==\"InternalIP\").address' | sort -V)\nOVS_VSWITCHD_PODS=$(kubectl get --namespace openstack pods \\\n  -l application=openvswitch,component=openvswitch-vswitchd \\\n  --no-headers -o name  | awk -F '/' '{ print $NF }')\nfor OVS_VSWITCHD_POD in ${OVS_VSWITCHD_PODS}; do\n  kubectl exec --namespace openstack \"${OVS_VSWITCHD_POD}\" \\\n    -- ovs-vsctl add-br \"${OSH_IRONIC_PXE_DEV}\"\n  if [ \"x$(kubectl --namespace openstack get pod \"${OVS_VSWITCHD_POD}\" -o json | jq -r '.spec.nodeName')\" == \"x$(hostname -f)\" ] ; then\n    COUNTER=0\n    for NODE_IP in ${NODE_IPS}; do\n      if ! [ \"x${MASTER_IP}\" == \"x${NODE_IP}\" ]; then\n        kubectl exec --namespace openstack \"${OVS_VSWITCHD_POD}\" \\\n          -- ovs-vsctl add-port \"${OSH_IRONIC_PXE_DEV}\" \"gre${COUNTER}\" \\\n            -- set interface \"gre${COUNTER}\" type=gre options:remote_ip=\"${NODE_IP}\"\n        let COUNTER=COUNTER+1\n      fi\n    done\n    kubectl exec --namespace openstack \"${OVS_VSWITCHD_POD}\" \\\n      -- ip addr add \"${OSH_IRONIC_PXE_ADDR}\" dev \"${OSH_IRONIC_PXE_DEV}\"\n    #NOTE(portdirect): for simplity assume we are using the default dev\n    # for tunnels, and a MTU overhead of 50\n    MASTER_NODE_DEV=\"$(kubectl exec --namespace openstack \"${OVS_VSWITCHD_POD}\" \\\n      -- ip -4 route list 0/0 | awk '{ print $5; exit }')\"\n    MASTER_NODE_MTU=\"$(kubectl exec --namespace openstack \"${OVS_VSWITCHD_POD}\" \\\n      -- cat \"/sys/class/net/${MASTER_NODE_DEV}/mtu\")\"\n    kubectl exec --namespace openstack \"${OVS_VSWITCHD_POD}\" \\\n      -- ip link set dev ${OSH_IRONIC_PXE_DEV} mtu $((MASTER_NODE_MTU - 50))\n    kubectl exec --namespace openstack \"${OVS_VSWITCHD_POD}\" \\\n      -- ip link set \"${OSH_IRONIC_PXE_DEV}\" up\n  else\n    kubectl exec --namespace openstack \"${OVS_VSWITCHD_POD}\" \\\n      -- ovs-vsctl add-port \"${OSH_IRONIC_PXE_DEV}\" gre0 \\\n        -- set interface gre0 type=gre options:remote_ip=\"${MASTER_IP}\"\n  fi\ndone\n\n#NOTE: Set up the ${OSH_IRONIC_PXE_DEV} to forward traffic\nDEFAULT_ROUTE_DEV=\"$(sudo ip -4 route list 0/0 | awk '{ print $5; exit }')\"\nsudo iptables -t nat -A POSTROUTING -o \"${DEFAULT_ROUTE_DEV}\" -j MASQUERADE\nsudo iptables -A FORWARD -i \"${DEFAULT_ROUTE_DEV}\" -o \"${OSH_IRONIC_PXE_DEV}\" -m state --state RELATED,ESTABLISHED -j ACCEPT\nsudo iptables -A FORWARD -i \"${OSH_IRONIC_PXE_DEV}\" -o \"${DEFAULT_ROUTE_DEV}\" -j ACCEPT\n"
  },
  {
    "path": "tools/deployment/baremetal/010-setup-client.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\nsudo -H -E pip3 install \\\n-c${UPPER_CONSTRAINTS_FILE:=https://releases.openstack.org/constraints/upper/master} \\\npython-openstackclient python-heatclient python-ironicclient\n\nsudo -H mkdir -p /etc/openstack\ncat << EOF | sudo -H tee -a /etc/openstack/clouds.yaml\nclouds:\n  openstack_helm:\n    region_name: RegionOne\n    identity_api_version: 3\n    auth:\n      username: 'admin'\n      password: 'password'\n      project_name: 'admin'\n      project_domain_name: 'default'\n      user_domain_name: 'default'\n      auth_url: 'http://keystone.openstack.svc.cluster.local/v3'\nEOF\nsudo -H chown -R $(id -un): /etc/openstack\n\n#NOTE: Build charts\nmake all\n"
  },
  {
    "path": "tools/deployment/baremetal/110-compute-kit.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n#NOTE: Lint and package chart\nmake neutron\nmake ironic\nmake nova\n\n#NOTE: Deploy neutron\n#NOTE(portdirect): for simplicity we will assume the default route device\n# should be used for tunnels\nNETWORK_TUNNEL_DEV=\"$(sudo ip -4 route list 0/0 | awk '{ print $5; exit }')\"\nOSH_IRONIC_PXE_DEV=\"ironic-pxe\"\nOSH_IRONIC_PXE_PYSNET=\"ironic\"\ntee /tmp/neutron.yaml << EOF\nnetwork:\n  interface:\n    tunnel: \"${NETWORK_TUNNEL_DEV}\"\nlabels:\n  ovs:\n      node_selector_key: openstack-helm-node-class\n      node_selector_value: primary\n  agent:\n    dhcp:\n      node_selector_key: openstack-helm-node-class\n      node_selector_value: primary\n    l3:\n      node_selector_key: openstack-helm-node-class\n      node_selector_value: primary\n    metadata:\n      node_selector_key: openstack-helm-node-class\n      node_selector_value: primary\npod:\n  replicas:\n    server: 1\nconf:\n  neutron:\n    DEFAULT:\n      l3_ha: False\n      max_l3_agents_per_router: 1\n      l3_ha_network_type: vxlan\n      dhcp_agents_per_network: 1\n  plugins:\n    ml2_conf:\n      ml2_type_flat:\n        flat_networks: public,${OSH_IRONIC_PXE_PYSNET}\n    openvswitch_agent:\n      agent:\n        tunnel_types: vxlan\n      ovs:\n        bridge_mappings: \"external:br-ex,${OSH_IRONIC_PXE_PYSNET}:${OSH_IRONIC_PXE_DEV}\"\nEOF\nhelm upgrade --install neutron ./neutron \\\n    --namespace=openstack \\\n    --values=/tmp/neutron.yaml \\\n    --set manifests.network_policy=true \\\n    ${OSH_EXTRA_HELM_ARGS} \\\n    ${OSH_EXTRA_HELM_ARGS_NEUTRON}\n\ntee /tmp/ironic.yaml << EOF\nlabels:\n  node_selector_key: openstack-helm-node-class\n  node_selector_value: primary\nnetwork:\n  pxe:\n    device: \"${OSH_IRONIC_PXE_DEV}\"\n    neutron_provider_network: \"${OSH_IRONIC_PXE_PYSNET}\"\nconf:\n  ironic:\n    DEFAULT:\n      debug: true\n    conductor:\n      automated_clean: \"false\"\n    deploy:\n      shred_final_overwrite_with_zeros: \"false\"\nEOF\nhelm upgrade --install ironic ./ironic \\\n    --namespace=openstack \\\n    --values=/tmp/ironic.yaml \\\n    ${OSH_EXTRA_HELM_ARGS} \\\n    ${OSH_EXTRA_HELM_ARGS_IRONIC}\n\ntee /tmp/nova.yaml << EOF\nlabels:\n  agent:\n    compute_ironic:\n      node_selector_key: openstack-helm-node-class\n      node_selector_value: primary\nconf:\n  nova:\n    DEFAULT:\n      debug: true\n      #force_config_drive: false\n      scheduler_host_manager: ironic_host_manager\n      compute_driver: ironic.IronicDriver\n      firewall_driver: nova.virt.firewall.NoopFirewallDriver\n      #ram_allocation_ratio: 1.0\n      reserved_host_memory_mb: 0\n      scheduler_use_baremetal_filters: true\n      baremetal_scheduler_default_filters: \"RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter\"\n    filter_scheduler:\n      scheduler_tracks_instance_changes: false\n      #scheduler_host_subset_size: 9999\n    scheduler:\n      discover_hosts_in_cells_interval: 120\nmanifests:\n  cron_job_cell_setup: true\n  daemonset_compute: false\n  daemonset_libvirt: false\n  statefulset_compute_ironic: true\n  job_cell_setup: true\nEOF\n# Deploy Nova\nhelm upgrade --install nova ./nova \\\n    --namespace=openstack \\\n    --values=/tmp/nova.yaml \\\n    ${OSH_EXTRA_HELM_ARGS} \\\n    ${OSH_EXTRA_HELM_ARGS_NOVA}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n\n#NOTE: Validate Deployment info\nexport OS_CLOUD=openstack_helm\nopenstack service list\nsleep 30\nopenstack network agent list\nopenstack baremetal driver list\nopenstack compute service list\n"
  },
  {
    "path": "tools/deployment/baremetal/800-create-baremetal-host-aggregate.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\nexport OS_CLOUD=openstack_helm\nexport OSH_IRONIC_NODE_ARCH=${OSH_IRONIC_NODE_ARCH:=\"x86_64\"}\n\n#NOTE: setup a host aggregate for baremetal nodes to use\nopenstack aggregate create \\\n  --property baremetal=true \\\n  --property cpu_arch=${OSH_IRONIC_NODE_ARCH} \\\n  baremetal-hosts\nIRONIC_COMPUTES=$(openstack compute service list | grep compute | grep $(hostname) | grep -v down | awk '{print $6}')\nfor COMPUTE in $IRONIC_COMPUTES; do\n  openstack aggregate add host baremetal-hosts ${COMPUTE}\ndone\n"
  },
  {
    "path": "tools/deployment/baremetal/810-register-baremetal-nodes.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\nexport OS_CLOUD=openstack_helm\nexport OSH_IRONIC_NODE_DISC=${OSH_IRONIC_NODE_DISC:=\"5\"}\nexport OSH_IRONIC_NODE_RAM=${OSH_IRONIC_NODE_RAM:=\"4096\"}\nexport OSH_IRONIC_NODE_CPU=${OSH_IRONIC_NODE_CPU:=\"2\"}\nexport OSH_IRONIC_NODE_ARCH=${OSH_IRONIC_NODE_ARCH:=\"x86_64\"}\n\n#NOTE: Register the baremetal nodes with ironic\nDEPLOY_VMLINUZ_UUID=$(openstack image show ironic-agent.kernel -f value -c id)\nDEPLOY_INITRD_UUID=$(openstack image show ironic-agent.initramfs -f value -c id)\nMASTER_IP=$(kubectl get node $(hostname -f) -o json |  jq -r '.status.addresses[] | select(.type==\"InternalIP\").address')\nwhile read NODE_DETAIL_RAW; do\n  NODE_DETAIL=($(echo ${NODE_DETAIL_RAW}))\n  NODE_BMC_IP=${NODE_DETAIL[0]}\n  NODE_MAC=${NODE_DETAIL[1]}\n  if [ \"$(kubectl get node -o name | wc -l)\" -eq \"1\" ] || [ \"x${MASTER_IP}\" != \"x${NODE_BMC_IP}\" ]; then\n    BM_NODE=$(openstack baremetal node create \\\n              --driver agent_ipmitool \\\n              --driver-info ipmi_username=admin \\\n              --driver-info ipmi_password=password \\\n              --driver-info ipmi_address=\"${NODE_BMC_IP}\" \\\n              --driver-info ipmi_port=623 \\\n              --driver-info deploy_kernel=${DEPLOY_VMLINUZ_UUID} \\\n              --driver-info deploy_ramdisk=${DEPLOY_INITRD_UUID} \\\n              --property local_gb=${OSH_IRONIC_NODE_DISC} \\\n              --property memory_mb=${OSH_IRONIC_NODE_RAM} \\\n              --property cpus=${OSH_IRONIC_NODE_CPU} \\\n              --property cpu_arch=${OSH_IRONIC_NODE_ARCH} \\\n              -f value -c uuid)\n      openstack baremetal node manage \"${BM_NODE}\"\n      openstack baremetal port create --node ${BM_NODE} \"${NODE_MAC}\"\n      openstack baremetal node validate \"${BM_NODE}\"\n      openstack baremetal node provide \"${BM_NODE}\"\n      openstack baremetal node show \"${BM_NODE}\"\n  fi\ndone < /tmp/bm-hosts.txt\n\n#NOTE: Wait for our baremetal nodes to become avalible for provisioning\nfunction wait_for_ironic_node {\n  # Default wait timeout is 1200 seconds\n  set +x\n  end=$(date +%s)\n  if ! [ -z $2 ]; then\n   end=$((end + $2))\n  else\n   end=$((end + 1200))\n  fi\n  while true; do\n      STATE=$(openstack baremetal node show $1 -f value -c provision_state)\n      [ \"x${STATE}\" == \"xavailable\" ] && break\n      sleep 1\n      now=$(date +%s)\n      [ $now -gt $end ] && echo \"Node did not come up in time\" && openstack baremetal node show $1 && exit -1\n  done\n  set -x\n}\nfor NODE in $(openstack baremetal node list -f value -c UUID); do\n  wait_for_ironic_node $NODE\ndone\nopenstack baremetal node list\n"
  },
  {
    "path": "tools/deployment/baremetal/820-create-baremetal-flavor.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\nexport OS_CLOUD=openstack_helm\nexport OSH_IRONIC_NODE_DISC=${OSH_IRONIC_NODE_DISC:=\"5\"}\nexport OSH_IRONIC_NODE_RAM=${OSH_IRONIC_NODE_RAM:=\"4096\"}\nexport OSH_IRONIC_NODE_CPU=${OSH_IRONIC_NODE_CPU:=\"2\"}\nexport OSH_IRONIC_NODE_ARCH=${OSH_IRONIC_NODE_ARCH:=\"x86_64\"}\n\n#NOTE: Create a flavor assocated with our baremetal nodes\nopenstack flavor create \\\n  --disk ${OSH_IRONIC_NODE_DISC} \\\n  --ram ${OSH_IRONIC_NODE_RAM} \\\n  --vcpus ${OSH_IRONIC_NODE_CPU} \\\n  --property cpu_arch=${OSH_IRONIC_NODE_ARCH} \\\n  --property baremetal=true \\\n  baremetal\n"
  },
  {
    "path": "tools/deployment/baremetal/900-use-it.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n#NOTE: Validate Deployment info\nexport OS_CLOUD=openstack_helm\n\nexport OSH_VM_KEY_STACK=\"heat-vm-key\"\n# Setup SSH Keypair in Nova\nmkdir -p ${HOME}/.ssh\nopenstack keypair create --private-key ${HOME}/.ssh/osh_key ${OSH_VM_KEY_STACK}\nchmod 600 ${HOME}/.ssh/osh_key\n\n# Deploy heat stack to provision node\nopenstack stack create --wait --timeout 15 \\\n    -t ./tools/deployment/baremetal/heat-basic-bm-deployment.yaml \\\n    heat-basic-bm-deployment\n\nFLOATING_IP=$(openstack stack output show \\\n    heat-basic-bm-deployment \\\n    ip \\\n    -f value -c output_value)\n\n# Wait for the nodes SSH port to come up\nfunction wait_for_ssh_port {\n  # Default wait timeout is 300 seconds\n  set +x\n  end=$(date +%s)\n  if ! [ -z $2 ]; then\n   end=$((end + $2))\n  else\n   end=$((end + 300))\n  fi\n  while true; do\n      # Use Nmap as its the same on Ubuntu and RHEL family distros\n      nmap -Pn -p22 $1 | awk '$1 ~ /22/ {print $2}' | grep -q 'open' && \\\n          break || true\n      sleep 1\n      now=$(date +%s)\n      [ $now -gt $end ] && echo \"Could not connect to $1 port 22 in time\" && exit -1\n  done\n  set -x\n}\nwait_for_ssh_port $FLOATING_IP\n\n# SSH into the VM and check it can reach the outside world\nssh-keyscan \"$FLOATING_IP\" >> ~/.ssh/known_hosts\nBM_GATEWAY=\"$(ssh -i ${HOME}/.ssh/osh_key cirros@${FLOATING_IP} ip -4 route list 0/0 | awk '{ print $3; exit }')\"\nssh -i ${HOME}/.ssh/osh_key cirros@${FLOATING_IP} ping -q -c 1 -W 2 ${BM_GATEWAY}\n\n# Check the VM can reach the metadata server\nssh -i ${HOME}/.ssh/osh_key cirros@${FLOATING_IP} curl --verbose --connect-timeout 5 169.254.169.254\n"
  },
  {
    "path": "tools/deployment/baremetal/fake-baremetal-1.xml",
    "content": "<domain type='qemu'>\n  <name>fake-baremetal-1</name>\n  <memory unit='MB'>4096</memory>\n  <vcpu placement='static'>4</vcpu>\n  <resource>\n    <partition>/machine</partition>\n  </resource>\n  <os>\n    <type arch='x86_64' machine='pc'>hvm</type>\n    <boot dev='network'/>\n    <boot dev='hd'/>\n    <bootmenu enable='no'/>\n    <bios useserial='yes'/>\n  </os>\n  <features>\n    <acpi/>\n    <apic/>\n  </features>\n  <cpu>\n    <topology sockets='1' cores='4' threads='1'/>\n  </cpu>\n  <clock offset='localtime'/>\n  <on_poweroff>destroy</on_poweroff>\n  <on_reboot>restart</on_reboot>\n  <on_crash>restart</on_crash>\n  <devices>\n    <emulator>/usr/libexec/qemu-kvm</emulator>\n     <disk type='file' device='disk'>\n      <driver name='qemu' type='qcow2'/>\n      <source file='/var/lib/libvirt/images/vm-1.qcow2'/>\n      <target dev='vda' bus='virtio'/>\n      <alias name='virtio-disk0'/>\n      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>\n    </disk>\n    <controller type='usb' index='0'>\n      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>\n    </controller>\n    <controller type='ide' index='0'>\n      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>\n    </controller>\n    <controller type='pci' index='0' model='pci-root'/>\n    <interface type='bridge'>\n      <mac address='00:01:DE:AD:BE:EF'/>\n      <source bridge='ironic-pxe'/>\n      <virtualport type='openvswitch'>\n      </virtualport>\n      <model type='virtio'/>\n      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>\n    </interface>\n    <serial type='file'>\n      <source path='/tmp/vm-1.log' append='on'/>\n      <target port='0'/>\n    </serial>\n    <serial type='pty'>\n      <target port='1'/>\n    </serial>\n    <console type='file'>\n      <source path='/tmp/vm-1.log' append='on'/>\n      <target type='serial' port='0'/>\n    </console>\n    <input type='tablet' bus='usb'>\n      <address type='usb' bus='0' port='1'/>\n    </input>\n    <input type='mouse' bus='ps2'/>\n    <input type='keyboard' bus='ps2'/>\n    <memballoon model='virtio'>\n      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>\n    </memballoon>\n  </devices>\n</domain>\n"
  },
  {
    "path": "tools/deployment/baremetal/heat-basic-bm-deployment.yaml",
    "content": "---\nheat_template_version: 2016-10-14\n\nparameters:\n  baremetal_net:\n    type: string\n    default: baremetal\n\n  baremetal_subnet:\n    type: string\n    default: baremetal\n\n  image:\n    type: string\n    default: Cirros 0.6.2 64-bit\n\n  flavor:\n    type: string\n    default: baremetal\n\n  ssh_key:\n    type: string\n    default: heat-vm-key\n\nresources:\n  server:\n    type: OS::Nova::Server\n    properties:\n      image:\n        get_param: image\n      flavor:\n        get_param: flavor\n      key_name:\n        get_param: ssh_key\n      networks:\n        - port:\n            get_resource: server_port\n      user_data_format: RAW\n\n  server_port:\n    type: OS::Neutron::Port\n    properties:\n      network:\n        get_param: baremetal_net\n      fixed_ips:\n        - subnet:\n            get_param: baremetal_subnet\n      port_security_enabled: false\n\noutputs:\n  ip:\n    value:\n      get_attr:\n        - server_port\n        - fixed_ips\n        - 0\n        - ip_address\n...\n"
  },
  {
    "path": "tools/deployment/ceph/ceph-adapter-rook.sh",
    "content": "\n#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n\nhelm upgrade --install ceph-adapter-rook ${OSH_HELM_REPO}/ceph-adapter-rook \\\n  --namespace=openstack\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n"
  },
  {
    "path": "tools/deployment/ceph/ceph-ns-activate.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n\n#NOTE: Deploy command\ntee /tmp/ceph-openstack-config.yaml <<EOF\nendpoints:\n  ceph_mon:\n    namespace: ceph\nnetwork:\n  public: 172.17.0.1/16\n  cluster: 172.17.0.1/16\ndeployment:\n  storage_secrets: false\n  ceph: false\n  csi_rbd_provisioner: false\n  client_secrets: true\n  rgw_keystone_user_and_endpoints: false\nbootstrap:\n  enabled: false\nconf:\n  rgw_ks:\n    enabled: false\nEOF\n\n: ${OSH_EXTRA_HELM_ARGS_CEPH_NS_ACTIVATE:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c ceph-provisioners ${FEATURES})\"}\n\nhelm upgrade --install ceph-openstack-config ${OSH_HELM_REPO}/ceph-provisioners \\\n  --namespace=openstack \\\n  --values=/tmp/ceph-openstack-config.yaml \\\n  ${OSH_EXTRA_HELM_ARGS} \\\n  ${OSH_EXTRA_HELM_ARGS_CEPH_NS_ACTIVATE}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n\nhelm test ceph-openstack-config --namespace openstack --timeout 600s\n\n#NOTE: Validate Deployment info\nkubectl get -n openstack jobs\nkubectl get -n openstack secrets\nkubectl get -n openstack configmaps\n"
  },
  {
    "path": "tools/deployment/ceph/ceph-radosgw.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_CEPH_RGW:=\"$(helm osh get-values-overrides -p ${OSH_VALUES_OVERRIDES_PATH} -c ceph-rgw ${FEATURES})\"}\n\n#NOTE: Deploy command\ntee /tmp/radosgw-osh-infra.yaml <<EOF\nendpoints:\n  ceph_object_store:\n    namespace: osh-infra\n  ceph_mon:\n    namespace: ceph\nnetwork:\n  public: 172.17.0.1/16\n  cluster: 172.17.0.1/16\ndeployment:\n  storage_secrets: false\n  ceph: true\n  csi_rbd_provisioner: false\n  client_secrets: false\n  rgw_keystone_user_and_endpoints: false\nbootstrap:\n  enabled: true\nconf:\n  rgw_ks:\n    enabled: false\n  rgw_s3:\n    enabled: true\npod:\n  replicas:\n    rgw: 1\nmanifests:\n  job_bootstrap: true\nEOF\n\nhelm upgrade --install radosgw-osh-infra ${OSH_HELM_REPO}/ceph-rgw \\\n  --namespace=osh-infra \\\n  --values=/tmp/radosgw-osh-infra.yaml \\\n  ${OSH_EXTRA_HELM_ARGS:=} \\\n  ${OSH_EXTRA_HELM_ARGS_CEPH_RGW}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods osh-infra\n\n# Delete the test pod if it still exists\nkubectl delete pods -l application=ceph,release_group=radosgw-osh-infra,component=rgw-test --namespace=osh-infra --ignore-not-found\n#NOTE: Test Deployment\nhelm test radosgw-osh-infra --namespace osh-infra --timeout 900s\n"
  },
  {
    "path": "tools/deployment/ceph/ceph-rook.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n# Specify the Rook release tag to use for the Rook operator here\nROOK_RELEASE=v1.19.3\n\n: ${CEPH_OSD_DATA_DEVICE:=\"/dev/loop100\"}\n\n#NOTE: Deploy command\n: ${OSH_EXTRA_HELM_ARGS:=\"\"}\n[ -s /tmp/ceph-fs-uuid.txt ] || uuidgen > /tmp/ceph-fs-uuid.txt\nCEPH_FS_ID=\"$(cat /tmp/ceph-fs-uuid.txt)\"\n#NOTE(portdirect): to use RBD devices with Ubuntu kernels < 4.5 this\n# should be set to 'hammer'\n. /etc/os-release\nif [ \"x${ID}\" == \"xcentos\" ] || \\\n   ([ \"x${ID}\" == \"xubuntu\" ] && \\\n   dpkg --compare-versions \"$(uname -r)\" \"lt\" \"4.5\"); then\n  CRUSH_TUNABLES=hammer\nelse\n  CRUSH_TUNABLES=null\nfi\ntee /tmp/rook.yaml <<EOF\nimage:\n  repository: rook/ceph\n  tag: ${ROOK_RELEASE}\n  pullPolicy: IfNotPresent\ncrds:\n  enabled: true\nnodeSelector: {}\ntolerations: []\nunreachableNodeTolerationSeconds: 5\ncurrentNamespaceOnly: false\nannotations: {}\nlogLevel: INFO\nrbacEnable: true\npspEnable: false\npriorityClassName:\nallowLoopDevices: true\ncsi:\n  enableRbdDriver: true\n  enableCephfsDriver: false\n  enableGrpcMetrics: false\n  enableCSIHostNetwork: true\n  enableCephfsSnapshotter: true\n  enableNFSSnapshotter: true\n  enableRBDSnapshotter: true\n  enablePluginSelinuxHostMount: false\n  enableCSIEncryption: false\n  pluginPriorityClassName: system-node-critical\n  provisionerPriorityClassName: system-cluster-critical\n  rbdFSGroupPolicy: \"File\"\n  cephFSFSGroupPolicy: \"File\"\n  nfsFSGroupPolicy: \"File\"\n  enableOMAPGenerator: false\n  cephFSKernelMountOptions:\n  enableMetadata: false\n  provisionerReplicas: 1\n  clusterName: ceph\n  logLevel: 0\n  sidecarLogLevel:\n  rbdPluginUpdateStrategy:\n  rbdPluginUpdateStrategyMaxUnavailable:\n  cephFSPluginUpdateStrategy:\n  nfsPluginUpdateStrategy:\n  grpcTimeoutInSeconds: 150\n  allowUnsupportedVersion: false\n  csiRBDPluginVolume:\n  csiRBDPluginVolumeMount:\n  csiCephFSPluginVolume:\n  csiCephFSPluginVolumeMount:\n  provisionerTolerations:\n  provisionerNodeAffinity: #key1=value1,value2; key2=value3\n  pluginTolerations:\n  pluginNodeAffinity: # key1=value1,value2; key2=value3\n  enableLiveness: false\n  cephfsGrpcMetricsPort:\n  cephfsLivenessMetricsPort:\n  rbdGrpcMetricsPort:\n  csiAddonsPort:\n  forceCephFSKernelClient: true\n  rbdLivenessMetricsPort:\n  kubeletDirPath:\n  cephcsi:\n    image:\n  registrar:\n    image:\n  provisioner:\n    image:\n  snapshotter:\n    image:\n  attacher:\n    image:\n  resizer:\n    image:\n  imagePullPolicy: IfNotPresent\n  cephfsPodLabels: #\"key1=value1,key2=value2\"\n  nfsPodLabels: #\"key1=value1,key2=value2\"\n  rbdPodLabels: #\"key1=value1,key2=value2\"\n  csiAddons:\n    enabled: false\n    image: \"quay.io/csiaddons/k8s-sidecar:v0.5.0\"\n  nfs:\n    enabled: false\n  topology:\n    enabled: false\n    domainLabels:\n  readAffinity:\n    enabled: false\n    crushLocationLabels:\n  cephFSAttachRequired: true\n  rbdAttachRequired: true\n  nfsAttachRequired: true\nenableDiscoveryDaemon: false\ncephCommandsTimeoutSeconds: \"15\"\nuseOperatorHostNetwork:\ndiscover:\n  toleration:\n  tolerationKey:\n  tolerations:\n  nodeAffinity: # key1=value1,value2; key2=value3\n  podLabels: # \"key1=value1,key2=value2\"\n  resources:\ndisableAdmissionController: true\nhostpathRequiresPrivileged: false\ndisableDeviceHotplug: false\ndiscoverDaemonUdev:\nimagePullSecrets:\nenableOBCWatchOperatorNamespace: true\nadmissionController:\nEOF\n\nhelm repo add rook-release https://charts.rook.io/release\nhelm install --create-namespace --namespace rook-ceph rook-ceph rook-release/rook-ceph --version ${ROOK_RELEASE} -f /tmp/rook.yaml\nhelm osh wait-for-pods rook-ceph\n\ntee /tmp/ceph.yaml <<EOF\noperatorNamespace: rook-ceph\nclusterName: ceph\nkubeVersion:\nconfigOverride: |\n  [global]\n  mon_allow_pool_delete = true\n  mon_allow_pool_size_one = true\n  osd_pool_default_size = 1\n  osd_pool_default_min_size = 1\n  mon_warn_on_pool_no_redundancy = false\n  auth_allow_insecure_global_id_reclaim = false\ntoolbox:\n  enabled: true\n  tolerations: []\n  affinity: {}\n  resources:\n    limits:\n      cpu: \"100m\"\n      memory: \"64Mi\"\n    requests:\n      cpu: \"100m\"\n      memory: \"64Mi\"\n  priorityClassName:\nmonitoring:\n  enabled: false\n  metricsDisabled: true\n  createPrometheusRules: false\n  rulesNamespaceOverride:\n  prometheusRule:\n    labels: {}\n    annotations: {}\npspEnable: false\ncephClusterSpec:\n  cephVersion:\n    image: quay.io/ceph/ceph:v20.2.1\n    allowUnsupported: false\n  dataDirHostPath: /var/lib/rook\n  skipUpgradeChecks: false\n  continueUpgradeAfterChecksEvenIfNotHealthy: false\n  waitTimeoutForHealthyOSDInMinutes: 10\n  mon:\n    count: 3\n    allowMultiplePerNode: false\n  mgr:\n    count: 3\n    allowMultiplePerNode: false\n    modules:\n      - name: pg_autoscaler\n        enabled: true\n      - name: dashboard\n        enabled: false\n      - name: nfs\n        enabled: false\n  dashboard:\n    enabled: true\n    ssl: true\n  network:\n    connections:\n      encryption:\n        enabled: false\n      compression:\n        enabled: false\n      requireMsgr2: false\n    provider: host\n  crashCollector:\n    disable: true\n  logCollector:\n    enabled: true\n    periodicity: daily # one of: hourly, daily, weekly, monthly\n    maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.\n  cleanupPolicy:\n    confirmation: \"\"\n    sanitizeDisks:\n      method: quick\n      dataSource: zero\n      iteration: 1\n    allowUninstallWithVolumes: false\n  monitoring:\n    enabled: false\n    metricsDisabled: true\n\n  removeOSDsIfOutAndSafeToRemove: false\n  priorityClassNames:\n    mon: system-node-critical\n    osd: system-node-critical\n    mgr: system-cluster-critical\n  storage: # cluster level storage configuration and selection\n    useAllNodes: true\n    useAllDevices: false\n    devices:\n      - name: \"${CEPH_OSD_DATA_DEVICE}\"\n        config:\n          databaseSizeMB: \"5120\"\n          walSizeMB: \"2048\"\n  disruptionManagement:\n    managePodBudgets: true\n    osdMaintenanceTimeout: 30\n    pgHealthCheckTimeout: 0\n  healthCheck:\n    daemonHealth:\n      mon:\n        disabled: false\n        interval: 45s\n      osd:\n        disabled: false\n        interval: 60s\n      status:\n        disabled: false\n        interval: 60s\n    livenessProbe:\n      mon:\n        disabled: false\n      mgr:\n        disabled: false\n      osd:\n        disabled: false\ningress:\n  dashboard:\n    {}\ncephBlockPools:\n  - name: rbd\n    namespace: ceph\n    spec:\n      failureDomain: host\n      replicated:\n        size: 1\n    storageClass:\n      enabled: true\n      name: general\n      isDefault: true\n      reclaimPolicy: Delete\n      allowVolumeExpansion: true\n      volumeBindingMode: \"Immediate\"\n      mountOptions: []\n      allowedTopologies: []\n      parameters:\n        imageFormat: \"2\"\n        imageFeatures: layering\n        csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner\n        csi.storage.k8s.io/provisioner-secret-namespace: \"{{ .Release.Namespace }}\"\n        csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner\n        csi.storage.k8s.io/controller-expand-secret-namespace: \"{{ .Release.Namespace }}\"\n        csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node\n        csi.storage.k8s.io/node-stage-secret-namespace: \"{{ .Release.Namespace }}\"\n        csi.storage.k8s.io/fstype: ext4\ncephFileSystems: []\n# Not needed in general for openstack-helm. Uncomment if needed.\n# cephFileSystems:\n#   - name: cephfs\n#     namespace: ceph\n#     spec:\n#       metadataPool:\n#         replicated:\n#           size: 1\n#       dataPools:\n#         - failureDomain: host\n#           replicated:\n#             size: 1\n#           name: data\n#       metadataServer:\n#         activeCount: 1\n#         activeStandby: false\n#         priorityClassName: system-cluster-critical\n#     storageClass:\n#       enabled: true\n#       isDefault: false\n#       name: ceph-filesystem\n#       pool: data0\n#       reclaimPolicy: Delete\n#       allowVolumeExpansion: true\n#       volumeBindingMode: \"Immediate\"\n#       mountOptions: []\n#       parameters:\n#         csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner\n#         csi.storage.k8s.io/provisioner-secret-namespace: \"{{ .Release.Namespace }}\"\n#         csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner\n#         csi.storage.k8s.io/controller-expand-secret-namespace: \"{{ .Release.Namespace }}\"\n#         csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node\n#         csi.storage.k8s.io/node-stage-secret-namespace: \"{{ .Release.Namespace }}\"\n#         csi.storage.k8s.io/fstype: ext4\ncephBlockPoolsVolumeSnapshotClass:\n  enabled: false\n  name: general\n  isDefault: false\n  deletionPolicy: Delete\n  annotations: {}\n  labels: {}\n  parameters: {}\ncephObjectStores:\n  - name: default\n    namespace: ceph\n    spec:\n      allowUsersInNamespaces:\n        - \"*\"\n      metadataPool:\n        failureDomain: host\n        replicated:\n          size: 1\n      dataPool:\n        failureDomain: host\n        replicated:\n          size: 1\n      preservePoolsOnDelete: true\n      gateway:\n        port: 8080\n        instances: 1\n        priorityClassName: system-cluster-critical\n    storageClass:\n      enabled: true\n      name: ceph-bucket\n      reclaimPolicy: Delete\n      volumeBindingMode: \"Immediate\"\n      parameters:\n        region: us-east-1\nEOF\n\nhelm upgrade --install --create-namespace --namespace ceph rook-ceph-cluster --set operatorNamespace=rook-ceph rook-release/rook-ceph-cluster --version ${ROOK_RELEASE} -f /tmp/ceph.yaml\n\nhelm osh wait-for-pods rook-ceph\n\nkubectl wait --namespace=ceph --for=condition=ready pod --selector=app=rook-ceph-tools --timeout=600s\n\n# Wait for all monitor pods to be ready\nwait_start_time=$(date +%s)\nwhile [[ $(($(date +%s) - $wait_start_time)) -lt 1800 ]]; do\n    sleep 30\n    MON_PODS=$(kubectl get pods --namespace=ceph --selector=app=rook-ceph-mon --no-headers | awk '{ print $1 }')\n    MON_PODS_NUM=$(echo $MON_PODS | wc -w)\n    MON_PODS_READY=0\n    for MON_POD in $MON_PODS; do\n        if kubectl get pod --namespace=ceph \"$MON_POD\" > /dev/null 2>&1; then\n            kubectl wait --namespace=ceph --for=condition=ready \"pod/$MON_POD\" --timeout=60s && \\\n                { MON_PODS_READY=$(($MON_PODS_READY+1)); } || \\\n                echo \"Pod $MON_POD not ready, skipping...\"\n        else\n            echo \"Pod $MON_POD not found, skipping...\"\n        fi\n    done\n    if [[ ${MON_PODS_READY} == ${MON_PODS_NUM} ]]; then\n        echo \"Monitor pods are ready. Moving on.\"\n        break;\n    fi\ndone\n\necho \"=========== CEPH K8S PODS LIST ============\"\nkubectl get pods -n rook-ceph -o wide\nkubectl get pods -n ceph -o wide\n#NOTE: Wait for deploy\nRGW_POD=$(kubectl get pods \\\n  --namespace=ceph \\\n  --selector=\"app=rook-ceph-rgw\" \\\n  --no-headers | awk '{print $1; exit}')\nwait_start_time=$(date +%s)\nwhile [[ -z \"${RGW_POD}\" && $(($(date +%s) - $wait_start_time)) -lt 1800 ]]\ndo\n  sleep 30\n  date +'%Y-%m-%d %H:%M:%S'\n  TOOLS_POD=$(kubectl get pods \\\n    --namespace=ceph \\\n    --selector=\"app=rook-ceph-tools\" \\\n    --no-headers | grep Running | awk '{ print $1; exit }')\n  if [[ -z \"${TOOLS_POD}\" ]]; then\n    echo \"No running rook-ceph-tools pod found. Waiting...\"\n    continue\n  fi\n  echo \"=========== CEPH STATUS ============\"\n  kubectl exec -n ceph ${TOOLS_POD} -- ceph -s || echo \"Could not get cluster status. Might be a temporary network issue.\"\n  echo \"=========== CEPH OSD POOL LIST ============\"\n  kubectl exec -n ceph ${TOOLS_POD} -- ceph osd pool ls || echo \"Could not get list of pools. Might be a temporary network issue.\"\n  echo \"=========== CEPH K8S PODS LIST ============\"\n  kubectl get pods -n ceph -o wide\n  RGW_POD=$(kubectl get pods \\\n    --namespace=ceph \\\n    --selector=\"app=rook-ceph-rgw\" \\\n    --no-headers | awk '{print $1; exit}')\ndone\nhelm osh wait-for-pods ceph\n\n#NOTE: Validate deploy\nTOOLS_POD=$(kubectl get pods \\\n    --namespace=ceph \\\n    --selector=\"app=rook-ceph-tools\" \\\n    --no-headers | grep Running | awk '{ print $1; exit }')\nkubectl exec -n ceph ${TOOLS_POD} -- ceph -s\n"
  },
  {
    "path": "tools/deployment/ceph/ceph.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${CEPH_OSD_DATA_DEVICE:=\"/dev/loop100\"}\n: ${POD_NETWORK_CIDR:=\"10.244.0.0/16\"}\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n\nNUMBER_OF_OSDS=\"$(kubectl get nodes -l ceph-osd=enabled --no-headers | wc -l)\"\n\n#NOTE: Deploy command\n[ -s /tmp/ceph-fs-uuid.txt ] || uuidgen > /tmp/ceph-fs-uuid.txt\nCEPH_FS_ID=\"$(cat /tmp/ceph-fs-uuid.txt)\"\n#NOTE(portdirect): to use RBD devices with Ubuntu kernels < 4.5 this\n# should be set to 'hammer'\n. /etc/os-release\nif [ \"x${ID}\" == \"xcentos\" ] || \\\n   ([ \"x${ID}\" == \"xubuntu\" ] && \\\n   dpkg --compare-versions \"$(uname -r)\" \"lt\" \"4.5\"); then\n  CRUSH_TUNABLES=hammer\nelse\n  CRUSH_TUNABLES=null\nfi\ntee /tmp/ceph.yaml <<EOF\nendpoints:\n  ceph_mon:\n    namespace: ceph\n    port:\n      mon:\n        default: 6789\n  ceph_mgr:\n    namespace: ceph\n    port:\n      mgr:\n        default: 7000\n      metrics:\n        default: 9283\nnetwork:\n  public: \"${POD_NETWORK_CIDR}\"\n  cluster: \"${POD_NETWORK_CIDR}\"\n  port:\n    mon: 6789\n    rgw: 8088\n    mgr: 7000\ndeployment:\n  storage_secrets: true\n  ceph: true\n  csi_rbd_provisioner: true\n  client_secrets: false\n  rgw_keystone_user_and_endpoints: false\nbootstrap:\n  enabled: true\nconf:\n  rgw_ks:\n    enabled: false\n  ceph:\n    global:\n      fsid: ${CEPH_FS_ID}\n      mon_addr: :6789\n      mon_allow_pool_size_one: true\n      osd_pool_default_size: 1\n    osd:\n      osd_crush_chooseleaf_type: 0\n  pool:\n    crush:\n      tunables: ${CRUSH_TUNABLES}\n    target:\n      osd: ${NUMBER_OF_OSDS}\n      final_osd: ${NUMBER_OF_OSDS}\n      pg_per_osd: 100\n    default:\n      crush_rule: same_host\n    spec:\n      # Health metrics pool\n      - name: .mgr\n        application: mgr_devicehealth\n        replication: 1\n        percent_total_data: 5\n      # RBD pool\n      - name: rbd\n        application: rbd\n        replication: 1\n        percent_total_data: 40\n      # CephFS pools\n      - name: cephfs_metadata\n        application: cephfs\n        replication: 1\n        percent_total_data: 5\n      - name: cephfs_data\n        application: cephfs\n        replication: 1\n        percent_total_data: 10\n      # RadosGW pools\n      - name: .rgw.root\n        application: rgw\n        replication: 1\n        percent_total_data: 0.1\n      - name: default.rgw.control\n        application: rgw\n        replication: 1\n        percent_total_data: 0.1\n      - name: default.rgw.data.root\n        application: rgw\n        replication: 1\n        percent_total_data: 0.1\n      - name: default.rgw.gc\n        application: rgw\n        replication: 1\n        percent_total_data: 0.1\n      - name: default.rgw.log\n        application: rgw\n        replication: 1\n        percent_total_data: 0.1\n      - name: default.rgw.intent-log\n        application: rgw\n        replication: 1\n        percent_total_data: 0.1\n      - name: default.rgw.meta\n        application: rgw\n        replication: 1\n        percent_total_data: 0.1\n      - name: default.rgw.usage\n        application: rgw\n        replication: 1\n        percent_total_data: 0.1\n      - name: default.rgw.users.keys\n        application: rgw\n        replication: 1\n        percent_total_data: 0.1\n      - name: default.rgw.users.email\n        application: rgw\n        replication: 1\n        percent_total_data: 0.1\n      - name: default.rgw.users.swift\n        application: rgw\n        replication: 1\n        percent_total_data: 0.1\n      - name: default.rgw.users.uid\n        application: rgw\n        replication: 1\n        percent_total_data: 0.1\n      - name: default.rgw.buckets.extra\n        application: rgw\n        replication: 1\n        percent_total_data: 0.1\n      - name: default.rgw.buckets.index\n        application: rgw\n        replication: 1\n        percent_total_data: 3\n      - name: default.rgw.buckets.data\n        application: rgw\n        replication: 1\n        percent_total_data: 29\n  storage:\n    osd:\n      - data:\n          type: bluestore\n          location: ${CEPH_OSD_DATA_DEVICE}\n        # block_db:\n        #   location: ${CEPH_OSD_DB_WAL_DEVICE}\n        #   size: \"5GB\"\n        # block_wal:\n        #   location: ${CEPH_OSD_DB_WAL_DEVICE}\n        #   size: \"2GB\"\n\npod:\n  replicas:\n    mds: 1\n    mgr: 1\n    rgw: 1\njobs:\n  ceph_defragosds:\n    # Execute every 15 minutes for gates\n    cron: \"*/15 * * * *\"\n    history:\n      # Number of successful job to keep\n      successJob: 1\n      # Number of failed job to keep\n      failJob: 1\n    concurrency:\n      # Skip new job if previous job still active\n      execPolicy: Forbid\n    startingDeadlineSecs: 60\nmanifests:\n  job_bootstrap: false\nEOF\n\nfor CHART in ceph-mon ceph-osd ceph-client ceph-provisioners; do\n  helm upgrade --install ${CHART} ${OSH_HELM_REPO}/${CHART} \\\n    --namespace=ceph \\\n    --values=/tmp/ceph.yaml \\\n    ${OSH_EXTRA_HELM_ARGS} \\\n    ${OSH_EXTRA_HELM_ARGS_CEPH_DEPLOY:-$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c ${CHART} ${FEATURES})}\n\n  #NOTE: Wait for deploy\n  helm osh wait-for-pods ceph\n\n  #NOTE: Validate deploy\n  MON_POD=$(kubectl get pods \\\n    --namespace=ceph \\\n    --selector=\"application=ceph\" \\\n    --selector=\"component=mon\" \\\n    --no-headers | awk '{ print $1; exit }')\n  kubectl exec -n ceph ${MON_POD} -- ceph -s\ndone\n\n# Delete the test pod if it still exists\nkubectl delete pods -l application=ceph-osd,release_group=ceph-osd,component=test --namespace=ceph --ignore-not-found\nhelm test ceph-osd --namespace ceph --timeout 900s\n# Delete the test pod if it still exists\nkubectl delete pods -l application=ceph-client,release_group=ceph-client,component=test --namespace=ceph --ignore-not-found\nhelm test ceph-client --namespace ceph --timeout 900s\n"
  },
  {
    "path": "tools/deployment/ceph/ceph_legacy.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${CEPH_OSD_DATA_DEVICE:=\"/dev/loop100\"}\n: ${POD_NETWORK_CIDR:=\"10.244.0.0/16\"}\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n\nNUMBER_OF_OSDS=\"$(kubectl get nodes -l ceph-osd=enabled --no-headers | wc -l)\"\n\n#NOTE: Deploy command\n[ -s /tmp/ceph-fs-uuid.txt ] || uuidgen > /tmp/ceph-fs-uuid.txt\nCEPH_FS_ID=\"$(cat /tmp/ceph-fs-uuid.txt)\"\n#NOTE(portdirect): to use RBD devices with Ubuntu kernels < 4.5 this\n# should be set to 'hammer'\n. /etc/os-release\nif [ \"x${ID}\" == \"xcentos\" ] || \\\n   ([ \"x${ID}\" == \"xubuntu\" ] && \\\n   dpkg --compare-versions \"$(uname -r)\" \"lt\" \"4.5\"); then\n  CRUSH_TUNABLES=hammer\nelse\n  CRUSH_TUNABLES=null\nfi\n\n# Most of PV fields are immutable and in case of CSI RBD plugin they refer\n# to secrets which were used for RBD provisioner and RBD attacher. These fields\n# can not be updated later.\n# So for testing purposes we assume legacy Ceph cluster is deployed with\n# the following secret names for the CSI plugin\n# - rook-csi-rbd-provisioner\n# - rook-csi-rbd-node\n# These exact secret names are used by Rook by default for CSI plugin and\n# and after migration PVs will be adopted by the new Rook Ceph cluster.\n#\n# Alternatively if we deploy legacy Ceph cluster with the default values\n# then we could later force Rook to use same CSI secret names as used for\n# legacy cluster. For example pvc-ceph-conf-combined-storageclass secret\n# name is used by default in legacy charts.\n#\n# Same is for CSI provisioner drivername option. For testing we deploy\n# legacy cluster with the drivername set to rook-ceph.rbd.csi.ceph.com\n# while default value is ceph.rbd.csi.ceph.com.\n# This is also for the sake of smooth adoption of PVs.\n\ntee /tmp/ceph.yaml <<EOF\nendpoints:\n  ceph_mon:\n    namespace: ceph\n    port:\n      mon:\n        default: 6789\n  ceph_mgr:\n    namespace: ceph\n    port:\n      mgr:\n        default: 7000\n      metrics:\n        default: 9283\nnetwork:\n  public: \"${POD_NETWORK_CIDR}\"\n  cluster: \"${POD_NETWORK_CIDR}\"\n  port:\n    mon: 6789\n    rgw: 8088\n    mgr: 7000\ndeployment:\n  storage_secrets: true\n  ceph: true\n  csi_rbd_provisioner: true\n  client_secrets: false\n  rgw_keystone_user_and_endpoints: false\nbootstrap:\n  enabled: true\nconf:\n  rgw_ks:\n    enabled: false\n  ceph:\n    global:\n      fsid: ${CEPH_FS_ID}\n      mon_addr: :6789\n      mon_allow_pool_size_one: true\n      osd_pool_default_size: 1\n    osd:\n      osd_crush_chooseleaf_type: 0\n  pool:\n    crush:\n      tunables: ${CRUSH_TUNABLES}\n    target:\n      osd: ${NUMBER_OF_OSDS}\n      final_osd: ${NUMBER_OF_OSDS}\n      pg_per_osd: 100\n    default:\n      crush_rule: same_host\n    spec:\n      # Health metrics pool\n      - name: .mgr\n        application: mgr_devicehealth\n        replication: 1\n        percent_total_data: 5\n      # RBD pool\n      - name: rbd\n        application: rbd\n        replication: 1\n        percent_total_data: 40\n  storage:\n    osd:\n      - data:\n          type: bluestore\n          location: ${CEPH_OSD_DATA_DEVICE}\n        # block_db:\n        #   location: ${CEPH_OSD_DB_WAL_DEVICE}\n        #   size: \"5GB\"\n        # block_wal:\n        #   location: ${CEPH_OSD_DB_WAL_DEVICE}\n        #   size: \"2GB\"\n\nstorageclass:\n  rbd:\n    parameters:\n      adminSecretName: rook-csi-rbd-provisioner\n      adminSecretNameNode: rook-csi-rbd-node\n  csi_rbd:\n    provisioner: rook-ceph.rbd.csi.ceph.com\n    parameters:\n      clusterID: ceph\n      csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner\n      csi.storage.k8s.io/controller-expand-secret-namespace: ceph\n      csi.storage.k8s.io/fstype: ext4\n      csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node\n      csi.storage.k8s.io/node-stage-secret-namespace: ceph\n      csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner\n      csi.storage.k8s.io/provisioner-secret-namespace: ceph\n      pool: rbd\n      imageFeatures: layering\n      imageFormat: \"2\"\n      adminId: null\n      adminSecretName: rook-csi-rbd-provisioner\n      adminSecretNamespace: ceph\n      userId: null\n      userSecretName: null\n\npod:\n  replicas:\n    mds: 1\n    mgr: 1\n    rgw: 1\n    csi_rbd_provisioner: 1\n\njobs:\n  ceph_defragosds:\n    # Execute every 15 minutes for gates\n    cron: \"*/15 * * * *\"\n    history:\n      # Number of successful job to keep\n      successJob: 1\n      # Number of failed job to keep\n      failJob: 1\n    concurrency:\n      # Skip new job if previous job still active\n      execPolicy: Forbid\n    startingDeadlineSecs: 60\nmanifests:\n  job_bootstrap: false\nEOF\n\nfor CHART in ceph-mon ceph-osd ceph-client ceph-provisioners; do\n  helm upgrade --install --create-namespace ${CHART} ${OSH_HELM_REPO}/${CHART} \\\n    --namespace=ceph \\\n    --values=/tmp/ceph.yaml \\\n    ${OSH_EXTRA_HELM_ARGS} \\\n    ${OSH_EXTRA_HELM_ARGS_CEPH_DEPLOY:-$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c ${CHART} ${FEATURES})}\n\n  #NOTE: Wait for deploy\n  helm osh wait-for-pods ceph\n\n  #NOTE: Validate deploy\n  MON_POD=$(kubectl get pods \\\n    --namespace=ceph \\\n    --selector=\"application=ceph\" \\\n    --selector=\"component=mon\" \\\n    --no-headers | awk '{ print $1; exit }')\n  kubectl exec -n ceph ${MON_POD} -- ceph -s\ndone\n"
  },
  {
    "path": "tools/deployment/ceph/migrate-after.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${NAMESPACE:=openstack}\n\n# Now we have are ready to scale up stateful applications\n# and use same PVs provisioned earlier by legacy Ceph\nkubectl -n ${NAMESPACE} scale statefulset mariadb-server --replicas=1\nkubectl -n ${NAMESPACE} scale statefulset rabbitmq-rabbitmq --replicas=1\n\nsleep 30\nhelm osh wait-for-pods ${NAMESPACE}\n\nkubectl -n ${NAMESPACE} get po\nkubectl -n ${NAMESPACE} get pvc\nkubectl get pv -o yaml\n"
  },
  {
    "path": "tools/deployment/ceph/migrate-before.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${NAMESPACE:=openstack}\n\n# Before migration we have to scale down all the stateful applications\n# so PVs provisioned by Ceph are not attached to any pods\nkubectl -n ${NAMESPACE} scale statefulset mariadb-server --replicas=0\nkubectl -n ${NAMESPACE} scale statefulset rabbitmq-rabbitmq --replicas=0\n\nsleep 30\nhelm osh wait-for-pods ${NAMESPACE}\n\nkubectl -n ${NAMESPACE} get po\nkubectl -n ${NAMESPACE} get pvc\nkubectl get pv -o yaml\n\n# Delete CSI secrets so Rook can deploy them from scratch\nkubectl -n ceph delete secret rook-csi-rbd-provisioner\nkubectl -n ceph delete secret rook-csi-rbd-node\nkubectl -n ceph get secret\n"
  },
  {
    "path": "tools/deployment/ceph/migrate-to-rook-ceph.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -x\n\n# These variables can be set prior to running the script to deploy a specific\n# Ceph release using a specific Rook release. The namespaces for the Rook\n# operator and the Ceph cluster may also be set, along with the YAML definition\n# files that should be used for the Rook operator and Ceph cluster Helm charts.\n# The default values deploy the Rook operator in the rook-ceph namespace and\n# the Ceph cluster in the ceph namespace using rook-operator.yaml and\n# rook-ceph.yaml in the current directory.\nROOK_RELEASE=${ROOK_RELEASE:-1.19.3}\nCEPH_RELEASE=${CEPH_RELEASE:-20.2.1}\nROOK_CEPH_NAMESPACE=${ROOK_CEPH_NAMESPACE:-rook-ceph}\nCEPH_NAMESPACE=${CEPH_NAMESPCE:-ceph}\nROOK_OPERATOR_YAML=${ROOK_OPERATOR_YAML:-/tmp/rook-operator.yaml}\nROOK_CEPH_YAML=${ROOK_CEPH_YAML:-/tmp/rook-ceph.yaml}\n\n# Return a list of unique status strings for pods for a specified application\n# (Pods with the same status will return a single status)\nfunction app_status() {\n  kubectl -n ${CEPH_NAMESPACE} get pods -l app=${1} -o json | jq -r '.items[].status.phase' | sort | uniq\n}\n\n# Function to wait for the initial Rook Ceph deployment to complete\nfunction wait_for_initial_rook_deployment() {\n  set +x\n  echo \"Waiting for initial Rook Ceph cluster deployment...\"\n\n  # Here in the while clause we have to check this\n  # if monitoring is enabled\n  # $(app_status rook-ceph-exporter)\" != \"Running\"\n\n  # The initial deployment can't deploy OSDs or RGW\n  while [[ \"$(app_status rook-ceph-mon)\" != \"Running\" || \\\n           \"$(app_status rook-ceph-mgr)\" != \"Running\" || \\\n           \"$(app_status rook-ceph-mds)\" != \"Running\" || \\\n           \"$(app_status rook-ceph-tools)\" != \"Running\" || \\\n           \"$(app_status rook-ceph-osd-prepare)\" != \"Succeeded\" ]]\n  do\n    echo \"Waiting for INITIAL Rook Ceph deployment ...\"\n    kubectl -n ${CEPH_NAMESPACE} get pods\n    sleep 5\n  done\n  set -x\n}\n\n# Function to wait for a full cluster deployment\nfunction wait_for_full_rook_deployment() {\n  set +x\n  echo \"Waiting for full Rook Ceph cluster deployment...\"\n\n  # Here in the while clause we have to check this\n  # if monitoring is enabled\n  # $(app_status rook-ceph-exporter)\" != \"Running\"\n\n  # Look for everything from the initial deployment plus OSDs and RGW\n  while [[ \"$(app_status rook-ceph-mon)\" != \"Running\" || \\\n           \"$(app_status rook-ceph-mgr)\" != \"Running\" || \\\n           \"$(app_status rook-ceph-mds)\" != \"Running\" || \\\n           \"$(app_status rook-ceph-tools)\" != \"Running\" || \\\n           \"$(app_status rook-ceph-osd-prepare)\" != \"Succeeded\" || \\\n           \"$(app_status rook-ceph-osd)\" != \"Running\" || \\\n           \"$(app_status rook-ceph-rgw)\" != \"Running\" ]]\n  do\n    echo \"Waiting for FULL Rook Ceph deployment ...\"\n    kubectl -n ${CEPH_NAMESPACE} get pods\n    sleep 5\n  done\n  set -x\n}\n\n# Function to wait for all pods except rook-ceph-tools to terminate\nfunction wait_for_terminate() {\n  set +x\n  echo \"Waiting for pods to terminate...\"\n\n  while [[ $(kubectl -n ${CEPH_NAMESPACE} get pods | grep -c \"Running\") -gt 1 ]]\n  do\n    sleep 5\n  done\n  set -x\n}\n\n# Function to wait for Ceph to reach a HEALTH_OK state\nfunction wait_for_health_checks() {\n  CEPH_NAMESPACE=${1}\n  CLIENT_POD=${2}\n  set +x\n  echo \"Waiting for the Ceph cluster to reach HEALTH_OK with all of the expectd resources...\"\n\n  # Time out each loop after ~15 minutes\n  for retry in {0..180}\n  do\n    if [[ $(kubectl -n ${CEPH_NAMESPACE} exec ${CLIENT_POD} -- ceph mon stat -f json | jq -r '.quorum[].name' | wc -l) -eq ${MON_COUNT} &&\n          $(kubectl -n ${CEPH_NAMESPACE} exec ${CLIENT_POD} -- ceph mgr count-metadata name | jq '.unknown') -eq ${MGR_COUNT} &&\n          $(kubectl -n ${CEPH_NAMESPACE} exec ${CLIENT_POD} -- ceph osd stat -f json | jq '.num_up_osds') -eq ${OSD_COUNT} ]]\n    then\n      break\n    fi\n    sleep 5\n  done\n\n  for retry in {0..180}\n  do\n    if [[ \"$(kubectl -n ${CEPH_NAMESPACE} exec ${CLIENT_POD} -- ceph health)\" == \"HEALTH_OK\" ]]\n    then\n      break\n    fi\n    sleep 5\n  done\n\n  kubectl -n ${CEPH_NAMESPACE} exec ${CLIENT_POD} -- ceph status\n  set -x\n}\n\n# Save a legacy ceph-mon host and the existing cluster FSID for later\nexport MON_POD=$(kubectl -n ${CEPH_NAMESPACE} get pods -l component=mon -o json | jq -r '.items[0].metadata.name')\nexport FSID=$(kubectl -n ${CEPH_NAMESPACE} exec ${MON_POD} -- ceph fsid)\nexport OLD_MON_HOST=$(kubectl -n ${CEPH_NAMESPACE} get pods -l component=mon -o json | jq -r '.items[0].spec.nodeName')\nexport OLD_MON_HOST_IP=$(kubectl get nodes -o json | jq -r '.items[] | select(.metadata.name == env.OLD_MON_HOST) | .status.addresses | .[] | select(.type == \"InternalIP\") | .address')\nexport MON_COUNT=$(kubectl -n ${CEPH_NAMESPACE} get pods -l component=mon -o json | jq '.items | length')\nexport MGR_COUNT=$(kubectl -n ${CEPH_NAMESPACE} get pods -l component=mgr -o json | jq '.items | length')\nexport OSD_COUNT=$(kubectl -n ${CEPH_NAMESPACE} get pods -l component=osd -o json | jq '.items | length')\n\n# Rename CephFS pools to match the expected names for Rook CephFS\nFS_SPEC=\"$(kubectl -n ${CEPH_NAMESPACE} exec ${MON_POD} -- ceph fs ls -f json 2> /dev/null)\"\nfor fs in $(echo $FS_SPEC | jq -r '.[].name')\ndo\n  EXPECTED_METADATA_POOL=\"${fs}-metadata\"\n  METADATA_POOL=$(echo ${FS_SPEC} | jq -r \".[] | select(.name==\\\"${fs}\\\") | .metadata_pool\")\n\n  if [[ \"${METADATA_POOL}\" != \"${EXPECTED_METADATA_POOL}\" ]]\n  then\n    kubectl -n ${CEPH_NAMESPACE} exec ${MON_POD} -- ceph osd pool rename ${METADATA_POOL} ${EXPECTED_METADATA_POOL}\n  fi\n\n  EXPECTED_DATA_POOL=\"${fs}-data\"\n  # NOTE: Only one data pool must have the expected name. Only the first one is\n  # checked here. If it is renamed and another pool with the same name already\n  # exists, the rename will fail and there is no further action needed.\n  DATA_POOL=$(echo ${FS_SPEC} | jq -r \".[] | select(.name==\\\"${fs}\\\") | .data_pools[0]\")\n\n  if [[ \"${DATA_POOL}\" != \"${EXPECTED_DATA_POOL}\" ]]\n  then\n    kubectl -n ${CEPH_NAMESPACE} exec ${MON_POD} -- ceph osd pool rename ${DATA_POOL} ${EXPECTED_DATA_POOL}\n  fi\ndone\n\n# Destroy resources in the Ceph namespace, delete Helm charts, and remove Ceph-related node labels\nfor resource in cj deploy ds service job\ndo\n  kubectl -n ${CEPH_NAMESPACE} get ${resource} -o json | jq -r '.items[].metadata.name' | xargs kubectl -n ${CEPH_NAMESPACE} delete ${resource}\ndone\nhelm -n ${CEPH_NAMESPACE} delete ceph-provisioners\nhelm -n ${CEPH_NAMESPACE} delete ceph-client\nhelm -n ${CEPH_NAMESPACE} delete ceph-mon\nhelm -n ${CEPH_NAMESPACE} delete ceph-osd\nfor node in $(kubectl get nodes -o json | jq -r '.items[].metadata.name' | xargs)\ndo\n  kubectl label node ${node} ceph-mds- ceph-mgr- ceph-mon- ceph-osd- ceph-rgw-\ndone\n\n# Use rook-helm to deploy a new Ceph cluster\nhelm repo add rook-release https://charts.rook.io/release\nhelm install --create-namespace --namespace rook-ceph rook-ceph rook-release/rook-ceph --version ${ROOK_RELEASE} -f ${ROOK_OPERATOR_YAML}\nhelm upgrade --install --create-namespace --namespace ceph rook-ceph-cluster --set operatorNamespace=rook-ceph rook-release/rook-ceph-cluster --version ${ROOK_RELEASE} -f ${ROOK_CEPH_YAML}\nwait_for_initial_rook_deployment\n\n# Retrieve the keyring from the new mon pod and save its host for further work\nexport MON_POD=$(kubectl -n ${CEPH_NAMESPACE} get pods -l app=rook-ceph-mon -o json | jq -r '.items[0].metadata.name')\nkubectl -n ${CEPH_NAMESPACE} exec ${MON_POD} -- cat /etc/ceph/keyring-store/keyring > /tmp/mon-a.keyring\nexport MON_HOST=$(kubectl -n ${CEPH_NAMESPACE} get pods -l app=rook-ceph-mon -o json | jq -r '.items[0].spec.nodeName')\nexport MON_HOST_IP=$(kubectl get nodes -o json | jq -r '.items[] | select(.metadata.name == env.MON_HOST) | .status.addresses | .[] | select(.type == \"InternalIP\") | .address')\n\n# Shut down the Rook operator, delete the rook-ceph deployments, and get the new rook-ceph-mon IP address\nkubectl -n ${ROOK_CEPH_NAMESPACE} scale deploy rook-ceph-operator --replicas=0\nkubectl -n ${CEPH_NAMESPACE} get deploy -o json | jq -r '.items[] | select(.metadata.name != \"rook-ceph-tools\") | .metadata.name' | xargs kubectl -n ${CEPH_NAMESPACE} delete deploy\n#MON_IP=$(kubectl -n ${CEPH_NAMESPACE} get service rook-ceph-mon-a -o json | jq -r '.spec.clusterIP')\nMON_IP=$(kubectl -n ${CEPH_NAMESPACE} get cm rook-ceph-mon-endpoints -o jsonpath='{.data.data}' | sed 's/.=//g' | awk -F: '{print $1}')\nwait_for_terminate\n\n# Download the old mon store and update its key to the new one\nssh ${MON_HOST_IP} \"sudo rm -rf /var/lib/rook/mon-a/data\"\nssh ${OLD_MON_HOST_IP} \"sudo chmod -R a+rX /var/lib/openstack-helm/ceph/mon/mon/ceph-${OLD_MON_HOST}\"\nscp -rp ${OLD_MON_HOST_IP}:/var/lib/openstack-helm/ceph/mon/mon/ceph-${OLD_MON_HOST} /tmp\nmv /tmp/ceph-${OLD_MON_HOST} /tmp/mon-a\ngrep -A2 \"\\[mon\\.\\]\" /tmp/mon-a.keyring > /tmp/mon-a/keyring\n\n# Generate a script to rewrite the monmap in the old mon store\ncat > /tmp/mon-a/fix-monmap.sh <<EOF\n#!/bin/bash\ntouch /etc/ceph/ceph.conf\ncd /var/lib/rook\nceph-mon --extract-monmap monmap --mon-data mon-a/data\nmonmaptool --print monmap | awk '/mon\\./{print \\$3}' | cut -d. -f2 | xargs -I{} monmaptool --rm {} monmap\nmonmaptool --addv a [v2:$(echo ${MON_IP}):3300,v1:$(echo ${MON_IP}):6789] monmap\nceph-mon --inject-monmap monmap --mon-data mon-a/data\nrm monmap\nrm mon-a/data/fix-monmap.sh\nEOF\nchmod +x /tmp/mon-a/fix-monmap.sh\n\n# Upload the mon store and script to the new mon host and run the script\nscp -rp /tmp/mon-a ${MON_HOST_IP}:/tmp\nssh ${MON_HOST_IP} \"sudo mv /tmp/mon-a /var/lib/rook/mon-a\"\nssh ${MON_HOST_IP} \"sudo mv /var/lib/rook/mon-a/mon-a /var/lib/rook/mon-a/data\"\nssh ${MON_HOST_IP} \"docker run --rm -v /var/lib/rook:/var/lib/rook quay.io/ceph/ceph:v${CEPH_RELEASE} /var/lib/rook/mon-a/data/fix-monmap.sh\"\n\n# Write the old cluster FSID to the rook-ceph-mon secret, disable authentication, and revive the Rook operator\nkubectl -n ${CEPH_NAMESPACE} get secret rook-ceph-mon -o json | jq --arg fsid \"$(echo -n ${FSID} | base64)\" '.data.fsid = $fsid' | kubectl apply -f -\nkubectl -n ${CEPH_NAMESPACE} get cm rook-config-override -o yaml | \\\nsed '/\\[global\\]/a \\ \\ \\ \\ auth_supported = none' | \\\nsed '/\\[global\\]/a \\ \\ \\ \\ auth_client_required = none' | \\\nsed '/\\[global\\]/a \\ \\ \\ \\ auth_service_required = none' | \\\nsed '/\\[global\\]/a \\ \\ \\ \\ auth_cluster_required = none' | \\\nkubectl apply -f -\nkubectl -n ${ROOK_CEPH_NAMESPACE} scale deploy rook-ceph-operator --replicas=1\nwait_for_full_rook_deployment\n\n# Write the new mon key to the rook-ceph-tools pod and import it for authentication\nTOOLS_POD=$(kubectl -n ${CEPH_NAMESPACE} get pods -l app=rook-ceph-tools -o json | jq -r '.items[0].metadata.name')\nCLIENT_KEY=$(grep -A1 \"\\[client\\.admin\\]\" /tmp/mon-a.keyring | awk '/key/{print $3}')\nkubectl -n ${CEPH_NAMESPACE} exec ${TOOLS_POD} -- bash -c \"echo -e '[client.admin]' > /tmp/keyring\"\nkubectl -n ${CEPH_NAMESPACE} exec ${TOOLS_POD} -- bash -c \"echo -e \\\"        key = ${CLIENT_KEY}\\\" >> /tmp/keyring\"\nkubectl -n ${CEPH_NAMESPACE} exec ${TOOLS_POD} -- bash -c \"echo -e '        caps mds = \\\"allow *\\\"' >> /tmp/keyring\"\nkubectl -n ${CEPH_NAMESPACE} exec ${TOOLS_POD} -- bash -c \"echo -e '        caps mon = \\\"allow *\\\"' >> /tmp/keyring\"\nkubectl -n ${CEPH_NAMESPACE} exec ${TOOLS_POD} -- bash -c \"echo -e '        caps osd = \\\"allow *\\\"' >> /tmp/keyring\"\nkubectl -n ${CEPH_NAMESPACE} exec ${TOOLS_POD} -- bash -c \"echo -e '        caps mgr = \\\"allow *\\\"' >> /tmp/keyring\"\nkubectl -n ${CEPH_NAMESPACE} exec ${TOOLS_POD} -- ceph auth import -i /tmp/keyring\nkubectl -n ${CEPH_NAMESPACE} exec ${TOOLS_POD} -- rm /tmp/keyring\n\n# Remove the auth config options to re-enable authentication\nkubectl -n ${CEPH_NAMESPACE} get cm rook-config-override -o yaml | \\\nsed '/    auth_cluster_required = none/d' | \\\nsed '/    auth_service_required = none/d' | \\\nsed '/    auth_client_required = none/d' | \\\nsed '/    auth_supported = none/d' | \\\nkubectl apply -f -\n\n# Restart the Rook operator and Ceph cluster with the new config\nkubectl -n ${ROOK_CEPH_NAMESPACE} scale deploy rook-ceph-operator --replicas=0\nkubectl -n ${CEPH_NAMESPACE} get deploy -o json | jq -r '.items[] | select(.metadata.name != \"rook-ceph-tools\") | .metadata.name' | xargs kubectl -n ${CEPH_NAMESPACE} delete deploy\nwait_for_terminate\nkubectl -n ${ROOK_CEPH_NAMESPACE} scale deploy rook-ceph-operator --replicas=1\nwait_for_full_rook_deployment\n\n# Scale the mon and mgr deployments to original replica counts\nkubectl -n ${CEPH_NAMESPACE} get cephcluster ceph -o json | \\\njq \".spec.mon.count = ${MON_COUNT} | .spec.mgr.count = ${MGR_COUNT}\" | \\\nkubectl apply -f -\nwait_for_health_checks ${CEPH_NAMESPACE} ${TOOLS_POD}\n"
  },
  {
    "path": "tools/deployment/ceph/migrate-values.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\nROOK_RELEASE=v1.19.3\n\n: ${CEPH_OSD_DATA_DEVICE:=\"/dev/loop100\"}\n\ntee /tmp/rook-operator.yaml <<EOF\nimage:\n  repository: rook/ceph\n  tag: ${ROOK_RELEASE}\n  pullPolicy: IfNotPresent\ncrds:\n  enabled: true\nresources:\n  limits:\n    cpu: 200m\n    memory: 256Mi\n  requests:\n    cpu: 100m\n    memory: 128Mi\nnodeSelector: {}\ntolerations: []\nunreachableNodeTolerationSeconds: 5\ncurrentNamespaceOnly: false\nannotations: {}\nlogLevel: INFO\nrbacEnable: true\npspEnable: false\npriorityClassName:\nallowLoopDevices: true\ncsi:\n  enableRbdDriver: true\n  enableCephfsDriver: false\n  enableGrpcMetrics: false\n  enableCSIHostNetwork: true\n  enableCephfsSnapshotter: true\n  enableNFSSnapshotter: true\n  enableRBDSnapshotter: true\n  enablePluginSelinuxHostMount: false\n  enableCSIEncryption: false\n  pluginPriorityClassName: system-node-critical\n  provisionerPriorityClassName: system-cluster-critical\n  rbdFSGroupPolicy: \"File\"\n  cephFSFSGroupPolicy: \"File\"\n  nfsFSGroupPolicy: \"File\"\n  enableOMAPGenerator: false\n  cephFSKernelMountOptions:\n  enableMetadata: false\n  provisionerReplicas: 1\n  clusterName: ceph\n  logLevel: 0\n  sidecarLogLevel:\n  rbdPluginUpdateStrategy:\n  rbdPluginUpdateStrategyMaxUnavailable:\n  cephFSPluginUpdateStrategy:\n  nfsPluginUpdateStrategy:\n  grpcTimeoutInSeconds: 150\n  allowUnsupportedVersion: false\n  csiRBDPluginVolume:\n  csiRBDPluginVolumeMount:\n  csiCephFSPluginVolume:\n  csiCephFSPluginVolumeMount:\n  csiRBDProvisionerResource: |\n    - name : csi-provisioner\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 100m\n        limits:\n          memory: 128Mi\n          cpu: 100m\n    - name : csi-resizer\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 100m\n        limits:\n          memory: 128Mi\n          cpu: 100m\n    - name : csi-attacher\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 100m\n        limits:\n          memory: 128Mi\n          cpu: 100m\n    - name : csi-snapshotter\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 100m\n        limits:\n          memory: 128Mi\n          cpu: 100m\n    - name : csi-rbdplugin\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 250m\n        limits:\n          memory: 128Mi\n          cpu: 250m\n    - name : csi-omap-generator\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 250m\n        limits:\n          memory: 128Mi\n          cpu: 250m\n    - name : liveness-prometheus\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 50m\n        limits:\n          memory: 128Mi\n          cpu: 100m\n  csiRBDPluginResource: |\n    - name : driver-registrar\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 50m\n        limits:\n          memory: 128Mi\n          cpu: 100m\n    - name : csi-rbdplugin\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 250m\n        limits:\n          memory: 128Mi\n          cpu: 250m\n    - name : liveness-prometheus\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 50m\n        limits:\n          memory: 256Mi\n          cpu: 100m\n  csiCephFSProvisionerResource: |\n    - name : csi-provisioner\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 100m\n        limits:\n          memory: 128Mi\n          cpu: 200m\n    - name : csi-resizer\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 100m\n        limits:\n          memory: 128Mi\n          cpu: 200m\n    - name : csi-attacher\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 100m\n        limits:\n          memory: 128Mi\n          cpu: 200m\n    - name : csi-snapshotter\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 100m\n        limits:\n          memory: 128Mi\n          cpu: 200m\n    - name : csi-cephfsplugin\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 250m\n        limits:\n          memory: 128Mi\n          cpu: 250m\n    - name : liveness-prometheus\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 50m\n        limits:\n          memory: 128Mi\n          cpu: 100m\n  csiCephFSPluginResource: |\n    - name : driver-registrar\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 50m\n        limits:\n          memory: 128Mi\n          cpu: 100m\n    - name : csi-cephfsplugin\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 250m\n        limits:\n          memory: 128Mi\n          cpu: 250m\n    - name : liveness-prometheus\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 50m\n        limits:\n          memory: 128Mi\n          cpu: 100m\n  csiNFSProvisionerResource: |\n    - name : csi-provisioner\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 100m\n        limits:\n          memory: 128Mi\n          cpu: 200m\n    - name : csi-nfsplugin\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 250m\n        limits:\n          memory: 128Mi\n          cpu: 250m\n    - name : csi-attacher\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 250m\n        limits:\n          memory: 128Mi\n          cpu: 250m\n  csiNFSPluginResource: |\n    - name : driver-registrar\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 50m\n        limits:\n          memory: 128Mi\n          cpu: 100m\n    - name : csi-nfsplugin\n      resource:\n        requests:\n          memory: 128Mi\n          cpu: 250m\n        limits:\n          memory: 128Mi\n          cpu: 250m\n  provisionerTolerations:\n  provisionerNodeAffinity: #key1=value1,value2; key2=value3\n  pluginTolerations:\n  pluginNodeAffinity: # key1=value1,value2; key2=value3\n  enableLiveness: false\n  cephfsGrpcMetricsPort:\n  cephfsLivenessMetricsPort:\n  rbdGrpcMetricsPort:\n  csiAddonsPort:\n  forceCephFSKernelClient: true\n  rbdLivenessMetricsPort:\n  kubeletDirPath:\n  cephcsi:\n    image:\n  registrar:\n    image:\n  provisioner:\n    image:\n  snapshotter:\n    image:\n  attacher:\n    image:\n  resizer:\n    image:\n  imagePullPolicy: IfNotPresent\n  cephfsPodLabels: #\"key1=value1,key2=value2\"\n  nfsPodLabels: #\"key1=value1,key2=value2\"\n  rbdPodLabels: #\"key1=value1,key2=value2\"\n  csiAddons:\n    enabled: false\n    image: \"quay.io/csiaddons/k8s-sidecar:v0.5.0\"\n  nfs:\n    enabled: false\n  topology:\n    enabled: false\n    domainLabels:\n  readAffinity:\n    enabled: false\n    crushLocationLabels:\n  cephFSAttachRequired: true\n  rbdAttachRequired: true\n  nfsAttachRequired: true\nenableDiscoveryDaemon: false\ncephCommandsTimeoutSeconds: \"15\"\nuseOperatorHostNetwork:\ndiscover:\n  toleration:\n  tolerationKey:\n  tolerations:\n  nodeAffinity: # key1=value1,value2; key2=value3\n  podLabels: # \"key1=value1,key2=value2\"\n  resources:\ndisableAdmissionController: true\nhostpathRequiresPrivileged: false\ndisableDeviceHotplug: false\ndiscoverDaemonUdev:\nimagePullSecrets:\nenableOBCWatchOperatorNamespace: true\nadmissionController:\nEOF\n\ntee /tmp/rook-ceph.yaml <<EOF\noperatorNamespace: rook-ceph\nclusterName: ceph\nkubeVersion:\nconfigOverride: |\n  [global]\n  mon_allow_pool_delete = true\n  mon_allow_pool_size_one = true\n  osd_pool_default_size = 1\n  osd_pool_default_min_size = 1\n  mon_warn_on_pool_no_redundancy = false\n  auth_allow_insecure_global_id_reclaim = false\ntoolbox:\n  enabled: true\n  tolerations: []\n  affinity: {}\n  resources:\n    limits:\n      cpu: \"100m\"\n      memory: \"64Mi\"\n    requests:\n      cpu: \"100m\"\n      memory: \"64Mi\"\n  priorityClassName:\nmonitoring:\n  enabled: false\n  metricsDisabled: true\n  createPrometheusRules: false\n  rulesNamespaceOverride:\n  prometheusRule:\n    labels: {}\n    annotations: {}\npspEnable: false\ncephClusterSpec:\n  cephVersion:\n    image: quay.io/ceph/ceph:v20.2.1\n    allowUnsupported: false\n  dataDirHostPath: /var/lib/rook\n  skipUpgradeChecks: false\n  continueUpgradeAfterChecksEvenIfNotHealthy: false\n  waitTimeoutForHealthyOSDInMinutes: 10\n  mon:\n    count: 1\n    allowMultiplePerNode: false\n  mgr:\n    count: 1\n    allowMultiplePerNode: false\n    modules:\n      - name: pg_autoscaler\n        enabled: true\n      - name: rook\n        enabled: true\n      - name: nfs\n        enabled: false\n  dashboard:\n    enabled: true\n    ssl: false\n  network:\n    connections:\n      encryption:\n        enabled: false\n      compression:\n        enabled: false\n      requireMsgr2: false\n    provider: host\n  crashCollector:\n    disable: true\n  logCollector:\n    enabled: true\n    periodicity: daily # one of: hourly, daily, weekly, monthly\n    maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.\n  cleanupPolicy:\n    confirmation: \"\"\n    sanitizeDisks:\n      method: quick\n      dataSource: zero\n      iteration: 1\n    allowUninstallWithVolumes: false\n  monitoring:\n    enabled: false\n    metricsDisabled: true\n  resources:\n    mgr:\n      limits:\n        cpu: \"250m\"\n        memory: \"512Mi\"\n      requests:\n        cpu: \"250m\"\n        memory: \"5Mi\"\n    mon:\n      limits:\n        cpu: \"250m\"\n        memory: \"256Mi\"\n      requests:\n        cpu: \"250m\"\n        memory: \"128Mi\"\n    osd:\n      limits:\n        cpu: \"500m\"\n        memory: \"2Gi\"\n      requests:\n        cpu: \"500m\"\n        memory: \"1Gi\"\n    prepareosd:\n      requests:\n        cpu: \"500m\"\n        memory: \"50Mi\"\n    mgr-sidecar:\n      limits:\n        cpu: \"200m\"\n        memory: \"50Mi\"\n      requests:\n        cpu: \"100m\"\n        memory: \"5Mi\"\n    crashcollector:\n      limits:\n        cpu: \"200m\"\n        memory: \"60Mi\"\n      requests:\n        cpu: \"100m\"\n        memory: \"60Mi\"\n    logcollector:\n      limits:\n        cpu: \"200m\"\n        memory: \"1Gi\"\n      requests:\n        cpu: \"100m\"\n        memory: \"100Mi\"\n    cleanup:\n      limits:\n        cpu: \"250m\"\n        memory: \"1Gi\"\n      requests:\n        cpu: \"250m\"\n        memory: \"100Mi\"\n  removeOSDsIfOutAndSafeToRemove: false\n  priorityClassNames:\n    mon: system-node-critical\n    osd: system-node-critical\n    mgr: system-cluster-critical\n  storage: # cluster level storage configuration and selection\n    useAllNodes: true\n    useAllDevices: false\n    devices:\n      - name: \"${CEPH_OSD_DATA_DEVICE}\"\n        config:\n          databaseSizeMB: \"5120\"\n          walSizeMB: \"2048\"\n  disruptionManagement:\n    managePodBudgets: true\n    osdMaintenanceTimeout: 30\n    pgHealthCheckTimeout: 0\n  healthCheck:\n    daemonHealth:\n      mon:\n        disabled: false\n        interval: 45s\n      osd:\n        disabled: false\n        interval: 60s\n      status:\n        disabled: false\n        interval: 60s\n    livenessProbe:\n      mon:\n        disabled: false\n      mgr:\n        disabled: false\n      osd:\n        disabled: false\ningress:\n  dashboard:\n    annotations:\n      nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2\n    host:\n      name: dashboard.example.com\n      path: \"/ceph-dashboard(/|$)(.*)\"\n    ingressClassName: nginx\ncephBlockPools:\n  - name: rbd\n    namespace: ceph\n    spec:\n      failureDomain: host\n      replicated:\n        size: 1\n    storageClass:\n      enabled: true\n      name: general\n      isDefault: true\n      reclaimPolicy: Delete\n      allowVolumeExpansion: true\n      volumeBindingMode: \"Immediate\"\n      mountOptions: []\n      allowedTopologies: []\n      parameters:\n        imageFormat: \"2\"\n        imageFeatures: layering\n        csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner\n        csi.storage.k8s.io/provisioner-secret-namespace: \"{{ .Release.Namespace }}\"\n        csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner\n        csi.storage.k8s.io/controller-expand-secret-namespace: \"{{ .Release.Namespace }}\"\n        csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node\n        csi.storage.k8s.io/node-stage-secret-namespace: \"{{ .Release.Namespace }}\"\n        csi.storage.k8s.io/fstype: ext4\ncephFileSystems: []\n# Not needed in general for openstack-helm. Uncomment if needed.\n# cephFileSystems:\n#   - name: cephfs\n#     namespace: ceph\n#     spec:\n#       metadataPool:\n#         replicated:\n#           size: 1\n#       dataPools:\n#         - failureDomain: host\n#           replicated:\n#             size: 1\n#           name: data\n#       metadataServer:\n#         activeCount: 1\n#         activeStandby: false\n#         resources:\n#           limits:\n#             cpu: \"250m\"\n#             memory: \"50Mi\"\n#           requests:\n#             cpu: \"250m\"\n#             memory: \"10Mi\"\n#         priorityClassName: system-cluster-critical\n#     storageClass:\n#       enabled: true\n#       isDefault: false\n#       name: ceph-filesystem\n#       pool: data0\n#       reclaimPolicy: Delete\n#       allowVolumeExpansion: true\n#       volumeBindingMode: \"Immediate\"\n#       mountOptions: []\n#       parameters:\n#         csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner\n#         csi.storage.k8s.io/provisioner-secret-namespace: \"{{ .Release.Namespace }}\"\n#         csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner\n#         csi.storage.k8s.io/controller-expand-secret-namespace: \"{{ .Release.Namespace }}\"\n#         csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node\n#         csi.storage.k8s.io/node-stage-secret-namespace: \"{{ .Release.Namespace }}\"\n#         csi.storage.k8s.io/fstype: ext4\ncephBlockPoolsVolumeSnapshotClass:\n  enabled: false\n  name: general\n  isDefault: false\n  deletionPolicy: Delete\n  annotations: {}\n  labels: {}\n  parameters: {}\ncephObjectStores:\n  - name: default\n    namespace: ceph\n    spec:\n      allowUsersInNamespaces:\n        - \"*\"\n      metadataPool:\n        failureDomain: host\n        replicated:\n          size: 1\n      dataPool:\n        failureDomain: host\n        replicated:\n          size: 1\n      preservePoolsOnDelete: true\n      gateway:\n        port: 8080\n        resources:\n          limits:\n            cpu: \"500m\"\n            memory: \"128Mi\"\n          requests:\n            cpu: \"500m\"\n            memory: \"32Mi\"\n        instances: 1\n        priorityClassName: system-cluster-critical\n    storageClass:\n      enabled: true\n      name: ceph-bucket\n      reclaimPolicy: Delete\n      volumeBindingMode: \"Immediate\"\n      parameters:\n        region: us-east-1\nEOF\n"
  },
  {
    "path": "tools/deployment/common/cert-manager.sh",
    "content": "#!/bin/bash\n\nset -eux\n\n: ${CERT_MANAGER_VERSION:=\"v1.16.1\"}\n\ncert_path=\"/etc/openstack-helm\"\nca_cert_root=\"$cert_path/certs/ca\"\n\nfunction check_cert {\n  # $1: the certificate file, e.g. ca.pem\n  # $2: the key file, e.g. ca-key.pem\n  local cert=\"$(openssl x509 -noout -modulus -in $1)\"\n  local key=\"$(openssl rsa -noout -modulus -in $2)\"\n  if ! [ \"$cert\" = \"$key\" ]; then\n    echo \"Failure: tls private key does not match cert\"\n    exit 1\n  else\n    echo \"Pass: $cert is valid with $key\"\n  fi\n}\n\n# Download cfssl and cfssljson if they are not available on the system\nif type cfssl && type cfssljson; then\n  echo \"cfssl and cfssljson found - skipping installation\"\nelse\n  echo \"installing cfssl and cfssljson\"\n  temp_bin=$(mktemp --directory)\n  cd $temp_bin\n  CFSSLURL=https://pkg.cfssl.org/R1.2\n  curl --retry 3 --retry-connrefused -sSL -o cfssl $CFSSLURL/cfssl_linux-amd64\n  curl --retry 3 --retry-connrefused -sSL -o cfssljson $CFSSLURL/cfssljson_linux-amd64\n  chmod +x {cfssl,cfssljson}\n  export PATH=$PATH:$temp_bin\nfi\n\n# Sets up a directory for the certs\nsudo rm -rf $cert_path\nsudo mkdir -p $ca_cert_root\nsudo chmod -R go+w $cert_path\n\ncd $ca_cert_root\n\ncat > ca-csr.json <<EOF\n{\n  \"CN\": \"ACME Company\",\n  \"key\": {\n    \"algo\": \"rsa\",\n    \"size\": 4096\n  },\n  \"names\": [\n    {\n      \"C\": \"US\",\n      \"L\": \"SomeState\",\n      \"ST\": \"SomeCity\",\n      \"O\": \"SomeOrg\",\n      \"OU\": \"SomeUnit\"\n    }\n  ]\n}\nEOF\n\ncfssl gencert -initca ca-csr.json | cfssljson -bare ca -\ncheck_cert ca.pem ca-key.pem\n\n\ncat > cfssl.json <<EOF\n{\n  \"signing\": {\n    \"default\": {\n      \"expiry\": \"8760h\"\n    },\n    \"profiles\": {\n      \"intermediate_ca\": {\n        \"expiry\": \"8760h\",\n        \"usages\": [\n          \"signing\",\n          \"digital signature\",\n          \"cert sign\",\n          \"crl sign\",\n          \"key encipherment\",\n          \"server auth\",\n          \"client auth\"\n        ],\n        \"ca_constraint\": {\n          \"is_ca\": true\n        }\n      }\n    }\n  }\n}\nEOF\n\ncat > intermediate-ca.json <<EOF\n{\n  \"CN\": \"OpenStack Helm CA\",\n  \"key\": {\n    \"algo\": \"rsa\",\n    \"size\": 4096\n  },\n  \"names\": [\n    {\n      \"C\": \"GB\",\n      \"L\": \"Space\",\n      \"ST\": \"Earth\",\n      \"O\": \"OSH\",\n      \"OU\": \"OSH\"\n    }\n  ],\n  \"ca\": {\n    \"expiry\": \"8760h\"\n  }\n}\nEOF\n\ncfssl gencert -initca intermediate-ca.json | cfssljson -bare intermediate-ca -\ncfssl sign -ca ca.pem -ca-key ca-key.pem -config cfssl.json -profile intermediate_ca \\\n  intermediate-ca.csr | cfssljson -bare intermediate-ca\ncheck_cert intermediate-ca.pem intermediate-ca-key.pem\n\nkubectl create ns cert-manager\nhelm repo add jetstack https://charts.jetstack.io\nhelm repo update\n\n\nhelm install cert-manager jetstack/cert-manager --namespace cert-manager \\\n   --version ${CERT_MANAGER_VERSION} \\\n   --set installCRDs=true \\\n   --set extraArgs[0]=\"--enable-certificate-owner-ref=true\" \\\n   --timeout=600s\n\nhelm repo remove jetstack\n\nkey=$(cat /etc/openstack-helm/certs/ca/ca-key.pem | base64 | tr -d \"\\n\")\ncrt=$(cat /etc/openstack-helm/certs/ca/ca.pem | base64 | tr -d \"\\n\")\n\ncat > /tmp/ca-issuers.yaml <<EOF\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: ca-key-pair\n  namespace: cert-manager\ndata:\n  tls.crt: $crt\n  tls.key: $key\n---\napiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n  name: ca-issuer\nspec:\n  ca:\n    secretName: ca-key-pair\nEOF\n\n\nkubectl wait --for=condition=Ready pods --all -n cert-manager --timeout=180s\n\n# Per [0], put a sleep here to guard against the error - failed calling webhook \"webhook.cert-manager.io\"\n# [0] https://github.com/jetstack/cert-manager/issues/2602\nsleep 45\n\nkubectl apply -f /tmp/ca-issuers.yaml\n"
  },
  {
    "path": "tools/deployment/common/clean-it.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -eux\nexport OS_CLOUD=openstack_helm\n\nclear\necho\necho \"**************************************************************\"\necho \"Clean-Up script to remove OSH-AIO (Ceph) artifacts created\"\necho \"from execution of the 900-use-it.sh script\"\necho \"**************************************************************\"\necho\n\n# Check if 900-use-it.sh was run.  Verify existance of heat stacks and key-pairs.\necho \"**************************************************************\"\necho \"Checking for heat stacks created from 900-use-it.sh:\"\nif [[ $(openstack stack list) ]]; then\n        echo \"HEAT STACKS PRESENT *** 900-use-it.sh was run\"\nelse\n        echo \"HEAT STACKS 'NOT' PRESENT **\"\n        echo\n        echo \"Stopping the clean-up script as 900-use-it.sh does\"\n        echo \"NOT appear to have been run, or pre-requisite heat\"\n        echo \"stack info is not available.\"\n        echo\n        exit\nfi\necho \"**************************************************************\"\necho\necho \"**************************************************************\"\necho \"Checking for key-pair created from 900-use-it.sh:\"\nif [[ $(openstack keypair list) ]]; then\n        echo \"KEYPAIR PRESENT *** 900-use-it.sh was run\"\nelse\n        echo \"KEYPAIR 'NOT' PRESENT **\"\n        echo\n        echo \"Stopping the clean-up script as 900-use-it.sh does\"\n        echo \"NOT appear to have been run, or pre-requisite key-pair\"\n        echo \"info is not available.\"\n        echo\n        exit\nfi\necho \"**************************************************************\"\necho\n\n# DELETE HEAT STACKS.\n# CAPTURE FLOATING_IP FIRST.  USED LATER TO DELETE FROM KNOWN_HOSTS.\nFLOATING_IP=$(sudo openstack --os-cloud openstack_helm stack output show heat-basic-vm-deployment floating_ip -f value -c output_value)\nexport FLOATING_IP\necho \"**************************************************************\"\necho \"Deleting heat stacks:\"\necho\ndeclare -a osStackList=(\"heat-vm-volume-attach\" \"heat-basic-vm-deployment\" \"heat-subnet-pool-deployment\" \"heat-public-net-deployment\")\nfor eachStack in \"${osStackList[@]}\"; do\n    echo \"Deleteing OSH-AIO stack= \" $eachStack\n    openstack stack delete -y --wait $eachStack\n    echo\ndone\necho\necho \"Heat stacks deleted.\"\necho \"**************************************************************\"\n\n# DELETE KEY PAIR.\necho\necho \"**************************************************************\"\necho \"Deleting key-pair:\"\necho\ndeclare -a osKeyPairList=(\"heat-vm-key\")\nfor eachKey in \"${osKeyPairList[@]}\"; do\n    echo \"Deleteing OSH-AIO keypair= \" $eachKey\n    openstack keypair delete $eachKey\n    echo\ndone\necho\necho \"Key-pair deleted\"\necho \"**************************************************************\"\n\n# DELETE RESIDUAL KEY ARTIFACTS CREATED DURING 900-USE-IT.sh.\necho\necho \"**************************************************************\"\necho \"Deleting files:\"$HOME\"/.ssh/known_hosts and osh_key\"\necho \"Checking for known_hosts file in the $HOME/.ssh/ directory.\"\nif [[ $(ls -A $HOME/.ssh/known_hosts) ]]; then\n    sudo sed -i /$FLOATING_IP/d $HOME/.ssh/known_hosts\n    echo \"FLOATING_IP deleted from $HOME/.ssh/known_hosts.\"\nelse\n    echo \"No known_hosts file found.\"\nfi\necho\necho \"Checking for osh_key file in the $HOME/.ssh/ directory\"\nif [[ $(ls -A $HOME/.ssh/osh_key) ]]; then\n    rm $HOME/.ssh/osh_key\n    echo \"$HOME/.ssh/osh_key file removed.\"\nelse\n    echo \"No osh_key file found.\"\nfi\necho \"KEY FILES DELETED SUCCESSFULLY, WHERE AVAILABLE.\"\necho \"**************************************************************\"\n\n# FINAL VERIFICATION.  CONFIRM HEAT STACK AND KEY-PAIR DATA HAS BEEN REMOVED.\necho\necho \"**************************************************************\"\necho \"Final verification to confirm Heat Stack and Key Pair\"\necho \"artrifacts have been removed.\"\necho\necho \"Checking for Heat Stack Data:\"\necho \"Please wait.\"\nsleep 10\nif [[ $(openstack stack list) ]]; then\n        echo \"HEAT STACKS STILL PRESENT *** STOPPING SCRIPT.  Verify manually please.\"\n        exit\nelse\n        echo \"HEAT STACK DATA REMOVED SUCCESSFULLY.\"\nfi\n\necho\necho \"Checking for Key-Pair Data:\"\nif [[ $(openstack keypair list) ]]; then\n        echo \"KEYPAIR STILL PRESENT ** STOPPING SCRIPT. Verify manually please.\"\n        exit\nelse\n        echo \"KEYPAIR DATA REMOVED SUCCESSFULLY.\"\nfi\necho \"**************************************************************\"\necho\necho \"Clean-up Completed Successfully!\"\necho"
  },
  {
    "path": "tools/deployment/common/deploy-docker-registry.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n\nfor NAMESPACE in docker-nfs docker-registry; do\ntee /tmp/${NAMESPACE}-ns.yaml << EOF\napiVersion: v1\nkind: Namespace\nmetadata:\n  labels:\n    kubernetes.io/metadata.name: ${NAMESPACE}\n    name: ${NAMESPACE}\n  name: ${NAMESPACE}\nEOF\n\nkubectl apply -f /tmp/${NAMESPACE}-ns.yaml\ndone\n\n#NOTE: Deploy nfs for the docker registry\ntee /tmp/docker-registry-nfs-provisioner.yaml << EOF\nlabels:\n  node_selector_key: openstack-helm-node-class\n  node_selector_value: primary\nstorageclass:\n  name: openstack-helm-bootstrap\nEOF\nhelm upgrade --install docker-registry-nfs-provisioner \\\n    ${OSH_HELM_REPO}/nfs-provisioner --namespace=docker-nfs \\\n    --values=/tmp/docker-registry-nfs-provisioner.yaml\n\n#NOTE: Deploy redis for the docker registry\nhelm upgrade --install docker-registry-redis ${OSH_HELM_REPO}/redis \\\n    --namespace=docker-registry \\\n    --set labels.node_selector_key=openstack-helm-node-class \\\n    --set labels.node_selector_value=primary\n\n#NOTE: Deploy the docker registry\ntee /tmp/docker-registry.yaml << EOF\nlabels:\n  node_selector_key: openstack-helm-node-class\n  node_selector_value: primary\nvolume:\n  class_name: openstack-helm-bootstrap\nEOF\nhelm upgrade --install docker-registry ${OSH_HELM_REPO}/registry \\\n    --namespace=docker-registry \\\n    --values=/tmp/docker-registry.yaml\n\n#NOTE: Wait for deployments\nhelm osh wait-for-pods docker-registry\n\n# Delete the test pod if it still exists\nkubectl delete pods -l application=redis,release_group=docker-registry-redis,component=test --namespace=docker-registry --ignore-not-found\n#NOTE: Run helm tests\nhelm test docker-registry-redis --namespace docker-registry\n"
  },
  {
    "path": "tools/deployment/common/env-variables.sh",
    "content": "#!/bin/bash\n#\n#  Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#  not use this file except in compliance with the License. You may obtain\n#  a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n#  Unless required by applicable law or agreed to in writing, software\n#  distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#  License for the specific language governing permissions and limitations\n#  under the License.\n\nexport API_ADDR=$(kubectl get endpoints kubernetes -o json | jq -r '.subsets[0].addresses[0].ip')\nexport API_PORT=$(kubectl get endpoints kubernetes -o json | jq -r '.subsets[0].ports[0].port')\n"
  },
  {
    "path": "tools/deployment/common/force-cronjob-run.sh",
    "content": "#!/bin/bash\nset -e\n\nTEST_POSTFIX=\"cronjob-test-run\"\n\nCRONJOBS=$(kubectl get cj --no-headers --all-namespaces | awk '{print $1\":\"$2}')\nfor i in ${CRONJOBS}; do\n  NS=$(echo \"$i\" | cut -f1 -d:)\n  CJ=$(echo \"$i\" | cut -f2 -d:)\n\n  # avoid scheduled runs to prevent case when our manual job is deleted by history limit.\n  kubectl patch cj/\"${CJ}\" -n\"${NS}\"  -p '{\"spec\" : {\"suspend\" : true }}'\n  kubectl get job -n\"${NS}\" \"${CJ}-${TEST_POSTFIX}\" || \\\n    kubectl create job -n\"${NS}\" --from=cj/\"${CJ}\" \"${CJ}-${TEST_POSTFIX}\"\ndone\n\necho \"Waiting for all test jobs to complete.\"\nfor i in {1..30}; do\n  RUNNING_JOBS=$(kubectl get jobs --all-namespaces | grep \"${TEST_POSTFIX}.*0/1\" ||:)\n  [ -z \"${RUNNING_JOBS}\" ] && break\n  sleep 10\ndone\n\nif [ -n \"${RUNNING_JOBS}\" ]; then\n  echo -e \"Timed out waiting for cj jobs to complete:\\n${RUNNING_JOBS}\"\n  exit 1\nfi\n\necho \"All test jobs completed.\"\n\nNOT_FOUND_JOBS=\"\"\necho \"Checking that every cronjob has a corresponding test job\"\nfor i in ${CRONJOBS}; do\n  NS=$(echo \"$i\" | cut -f1 -d:)\n  CJ=$(echo \"$i\" | cut -f2 -d:)\n  if [ -z \"$(kubectl get job -n ${NS} ${CJ}-${TEST_POSTFIX} | grep '1/1' ||:)\" ]; then\n      NOT_FOUND_JOBS=\"${NOT_FOUND_JOBS}\\n$i\"\n  fi\ndone\n\nif [ -n \"${NOT_FOUND_JOBS}\" ]; then\n    echo -e \"Some cronjobs don't have corresponding test jobs:${NOT_FOUND_JOBS}\"\n    kubectl get jobs --all-namespaces | grep ${TEST_POSTFIX}\n    exit 1\nfi\n\necho \"Cronjobs run successfully:\"\nkubectl get jobs --all-namespaces | grep ${TEST_POSTFIX}\n\n# Unsuspend cj on success assuming they all were not suspended on start\nfor i in ${CRONJOBS}; do\n  NS=$(echo \"$i\" | cut -f1 -d:)\n  CJ=$(echo \"$i\" | cut -f2 -d:)\n  kubectl patch cj/\"${CJ}\" -n\"${NS}\"  -p '{\"spec\" : {\"suspend\" : false }}'\ndone\n"
  },
  {
    "path": "tools/deployment/common/heat-basic-vm-deployment.yaml",
    "content": "---\nheat_template_version: '2021-04-16'\n\nparameters:\n  public_net:\n    type: string\n    default: public\n\n  image:\n    type: string\n    default: Cirros 0.6.2 64-bit\n\n  ssh_key:\n    type: string\n    default: heat-vm-key\n\n  cidr:\n    type: string\n    default: 192.168.128.0/24\n\n  dns_nameserver:\n    type: string\n    description: address of a dns nameserver reachable in your environment\n    default: 8.8.8.8\n\n  dpdk:\n    type: string\n    default: disabled\n    constraints:\n      - allowed_values:\n          - enabled\n          - disabled\n\n  is_ubuntu:\n    type: string\n    default: \"false\"\n    constraints:\n      - allowed_values:\n          - \"true\"\n          - \"false\"\n\nconditions:\n  dpdk_enable: {equals: [{get_param: dpdk}, \"enabled\"]}\n  is_ubuntu: {equals: [{get_param: is_ubuntu}, \"true\"]}\n\nresources:\n  flavor:\n    type: OS::Nova::Flavor\n    properties:\n      disk: 3\n      ram: 1024\n      vcpus: 2\n\n  flavor_dpdk:\n    type: OS::Nova::Flavor\n    properties:\n      disk: 3\n      ram: 2048\n      vcpus: 2\n      extra_specs:\n        \"hw:mem_page_size\": \"2MB\"\n\n  ubuntu_cloud_config:\n    type: OS::Heat::CloudConfig\n    properties:\n      cloud_config:\n        package_update: true\n        packages:\n          - iputils-ping\n        write_files:\n          - path: /etc/resolv.conf\n            content:\n              str_replace:\n                template: |\n                  nameserver $nameserver\n                params:\n                  $nameserver: {get_param: dns_nameserver}\n            owner: root:root\n            permissions: '0644'\n        runcmd:\n          - systemctl stop systemd-resolved\n          - systemctl disable systemd-resolved\n\n  server:\n    type: OS::Nova::Server\n    properties:\n      image:\n        get_param: image\n      flavor: {if: [\"dpdk_enable\", {get_resource: flavor_dpdk}, {get_resource: flavor}]}\n      key_name:\n        get_param: ssh_key\n      networks:\n        - port:\n            get_resource: server_port\n      user_data_format: RAW\n      user_data: {if: [is_ubuntu, {get_resource: ubuntu_cloud_config}, \"\"]}\n\n  router:\n    type: OS::Neutron::Router\n    properties:\n      external_gateway_info:\n        network:\n          get_param: public_net\n\n  router_interface:\n    type: OS::Neutron::RouterInterface\n    properties:\n      router_id:\n        get_resource: router\n      subnet_id:\n        get_resource: private_subnet\n\n  private_net:\n    type: OS::Neutron::Net\n\n  private_subnet:\n    type: OS::Neutron::Subnet\n    properties:\n      network:\n        get_resource: private_net\n      cidr:\n        get_param: cidr\n      dns_nameservers:\n        - {get_param: dns_nameserver}\n\n  port_security_group:\n    type: OS::Neutron::SecurityGroup\n    properties:\n      name: default_port_security_group\n      description: 'Default security group assigned to port.'\n      rules:\n        - remote_ip_prefix: 0.0.0.0/0\n          protocol: tcp\n          port_range_min: 22\n          port_range_max: 22\n        - remote_ip_prefix: 0.0.0.0/0\n          protocol: icmp\n\n  server_port:\n    type: OS::Neutron::Port\n    properties:\n      network:\n        get_resource: private_net\n      fixed_ips:\n        - subnet:\n            get_resource: private_subnet\n      security_groups:\n        - get_resource: port_security_group\n\n  server_floating_ip:\n    type: OS::Neutron::FloatingIP\n    properties:\n      floating_network:\n        get_param: public_net\n      port_id:\n        get_resource: server_port\n\noutputs:\n  floating_ip:\n    value:\n      get_attr:\n        - server_floating_ip\n        - floating_ip_address\n  instance_uuid:\n    value:\n      get_attr:\n        - server\n        - show\n        - id\n...\n"
  },
  {
    "path": "tools/deployment/common/heat-public-net-deployment.yaml",
    "content": "---\nheat_template_version: 2016-10-14\n\nparameters:\n  network_name:\n    type: string\n    default: public\n\n  physical_network_name:\n    type: string\n    default: public\n\n  subnet_name:\n    type: string\n    default: public\n\n  subnet_cidr:\n    type: string\n    default: 172.24.4.0/24\n\n  subnet_gateway:\n    type: string\n    default: 172.24.4.1\n\n  allocation_pool_start:\n    type: string\n    default: 172.24.4.10\n\n  allocation_pool_end:\n    type: string\n    default: 172.24.4.254\n\nresources:\n  public_net:\n    type: OS::Neutron::ProviderNet\n    properties:\n      name:\n        get_param: network_name\n      router_external: true\n      physical_network:\n        get_param: physical_network_name\n      network_type: flat\n\n  private_subnet:\n    type: OS::Neutron::Subnet\n    properties:\n      name:\n        get_param: subnet_name\n      network:\n        get_resource: public_net\n      cidr:\n        get_param: subnet_cidr\n      gateway_ip:\n        get_param: subnet_gateway\n      enable_dhcp: false\n      dns_nameservers:\n        - get_param: subnet_gateway\n      allocation_pools:\n        - start: {get_param: allocation_pool_start}\n          end: {get_param: allocation_pool_end}\n...\n"
  },
  {
    "path": "tools/deployment/common/heat-subnet-pool-deployment.yaml",
    "content": "---\nheat_template_version: 2016-10-14\n\nparameters:\n  subnet_pool_name:\n    type: string\n    default: shared-default-subnetpool\n\n  subnet_pool_prefixes:\n    type: comma_delimited_list\n    default:\n      - 192.168.128.0/20\n\n  subnet_pool_default_prefix_length:\n    type: number\n    default: 24\n\nresources:\n  public_net:\n    type: OS::Neutron::SubnetPool\n    properties:\n      name:\n        get_param: subnet_pool_name\n      shared: true\n      is_default: true\n      default_prefixlen:\n        get_param: subnet_pool_default_prefix_length\n      prefixes:\n        get_param: subnet_pool_prefixes\n...\n"
  },
  {
    "path": "tools/deployment/common/heat-vm-volume-attach.yaml",
    "content": "---\nheat_template_version: 2016-10-14\n\nparameters:\n  instance_uuid:\n    type: string\n\nresources:\n  cinder_volume:\n    type: OS::Cinder::Volume\n    properties:\n      name: vol1\n      size: 1\n\n  cinder_volume_attach:\n    type: OS::Cinder::VolumeAttachment\n    properties:\n      instance_uuid:\n        get_param: instance_uuid\n      volume_id:\n        get_resource: cinder_volume\n...\n"
  },
  {
    "path": "tools/deployment/common/namespace-config.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n\n#NOTE: Deploy namespace configs\nfor NAMESPACE in kube-system ceph openstack; do\n  helm upgrade --install ${NAMESPACE}-namespace-config ${OSH_HELM_REPO}/namespace-config \\\n    --namespace=${NAMESPACE}\ndone\n"
  },
  {
    "path": "tools/deployment/common/prepare-bashrc.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\ncat >> ${HOME}/.bashrc <<EOF\nexport RUN_HELM_TESTS=no\nexport OS_CLOUD=\"openstack_helm\"\nexport OPENSTACK_RELEASE=\"${OPENSTACK_RELEASE}\"\nexport CONTAINER_DISTRO_NAME=\"${CONTAINER_DISTRO_NAME}\"\nexport CONTAINER_DISTRO_VERSION=\"${CONTAINER_DISTRO_VERSION}\"\nexport FEATURES=\"${FEATURES}\"\nEOF\n"
  },
  {
    "path": "tools/deployment/common/prepare-charts.sh",
    "content": "#!/bin/bash\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -ex\n\n# Build all OSH charts\nmake all SKIP_CHANGELOG=1\n"
  },
  {
    "path": "tools/deployment/common/prepare-helm-repos-local.sh",
    "content": "#!/bin/bash\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -ex\n\nhelm repo index ./\n\ndocker run -d --name serve_charts \\\n    -v $(pwd):/usr/share/nginx/html/openstack-helm:ro \\\n    -p 80:80 \\\n    nginx\n\nhelm repo add ${OSH_HELM_REPO:-\"openstack-helm\"} http://localhost/openstack-helm\n"
  },
  {
    "path": "tools/deployment/common/prepare-helm-repos-public.sh",
    "content": "#!/bin/bash\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -ex\n\nhelm repo add ${OSH_HELM_REPO:-\"openstack-helm\"} https://tarballs.opendev.org/openstack/openstack-helm\n"
  },
  {
    "path": "tools/deployment/common/prepare-k8s.sh",
    "content": "#!/bin/bash\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -ex\n\n# Add labels to the core namespaces & nodes\nkubectl label --overwrite namespace default name=default\nkubectl label --overwrite namespace kube-system name=kube-system\nkubectl label --overwrite namespace kube-public name=kube-public\nkubectl label --overwrite nodes --all openstack-control-plane=enabled\nkubectl label --overwrite nodes --all openstack-compute-node=enabled\nkubectl label --overwrite nodes --all openvswitch=enabled\nkubectl label --overwrite nodes --all linuxbridge=enabled\nkubectl label --overwrite nodes --all ceph-mon=enabled\nkubectl label --overwrite nodes --all ceph-osd=enabled\nkubectl label --overwrite nodes --all ceph-mds=enabled\nkubectl label --overwrite nodes --all ceph-rgw=enabled\nkubectl label --overwrite nodes --all ceph-mgr=enabled\n# We deploy l3 agent only on the node where we run test scripts.\n# In this case virtual router will be created only on this node\n# and we don't need L2 overlay (will be implemented later).\nkubectl label --overwrite nodes -l \"node-role.kubernetes.io/control-plane\" l3-agent=enabled\n\nkubectl label --overwrite nodes -l \"node-role.kubernetes.io/control-plane\" openstack-network-node=enabled\n\nfor NAMESPACE in ceph openstack osh-infra; do\ntee /tmp/${NAMESPACE}-ns.yaml << EOF\napiVersion: v1\nkind: Namespace\nmetadata:\n  labels:\n    kubernetes.io/metadata.name: ${NAMESPACE}\n    name: ${NAMESPACE}\n  name: ${NAMESPACE}\nEOF\n\nkubectl apply -f /tmp/${NAMESPACE}-ns.yaml\ndone\n"
  },
  {
    "path": "tools/deployment/common/pull-images.sh",
    "content": "#!/bin/bash\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nif [ \"x$1\" == \"x\" ]; then\n  CHART_DIRS=\"$(echo ./*/)\"\nelse\n  CHART_DIRS=\"$(echo ./$1/)\"\nfi\n\nfor CHART_DIR in ${CHART_DIRS} ; do\n  if [ -e ${CHART_DIR}values.yaml ]; then\n    for IMAGE in $(cat ${CHART_DIR}values.yaml | yq '.images.tags | map(.) | join(\" \")' | tr -d '\"'); do\n      sudo docker inspect $IMAGE >/dev/null|| sudo docker pull $IMAGE\n    done\n  fi\ndone\n"
  },
  {
    "path": "tools/deployment/common/rally-reports.yaml",
    "content": "---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  labels:\n    version: v0.1.0\n  name: get-rally-data\nspec:\n  template:\n    spec:\n      restartPolicy: OnFailure\n      containers:\n        - name: get-rally-data\n          image: docker.io/alpine:latest\n          imagePullPolicy: Always\n          command:\n            - /bin/sh\n            - -ec\n            - |\n              cp -av /mnt/rally-pvc/* /mnt/rally-data\n          volumeMounts:\n            - name: pvc-rally\n              mountPath: /mnt/rally-pvc\n            - name: rally-data\n              mountPath: /mnt/rally-data\n      volumes:\n        - name: pvc-rally\n          persistentVolumeClaim:\n            claimName: pvc-rally\n        - name: rally-data\n          hostPath:\n            path: /tmp/rally-data\n...\n"
  },
  {
    "path": "tools/deployment/common/run-helm-tests.sh",
    "content": "#!/bin/bash\nset -x\n\nAPPLICATION=$1\nRELEASE_GROUP=${2:-${APPLICATION}}\nNAMESPACE=${3:-openstack}\n: ${HELM_TESTS_TRIES:=2}\ntimeout=${OSH_TEST_TIMEOUT:-900}\n\nrun_tests() {\n  # Delete the test pod if it still exists\n  kubectl delete pods -l application=${APPLICATION},release_group=${RELEASE_GROUP},component=test --namespace=${NAMESPACE} --ignore-not-found\n  helm test ${APPLICATION} --timeout ${timeout}s --namespace=${NAMESPACE}\n}\n\nfor i in $(seq 1 ${HELM_TESTS_TRIES}); do\n  echo \"Run helm tests for ${APPLICATION}. Try #${i}\"\n  run_tests\n  RC=$?\n  [ ${RC} -eq \"0\" ] && break\ndone\nexit ${RC}\n"
  },
  {
    "path": "tools/deployment/common/setup-certificates.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nset -xe\n\nCURRENT_DIR=$(pwd)\nCFSSLURL=https://pkg.cfssl.org/R1.2\n\nTDIR=/tmp/certs\nrm -rf $TDIR\nmkdir -p $TDIR/bin\n\ncd $TDIR\ncurl -sSL -o bin/cfssl $CFSSLURL/cfssl_linux-amd64\ncurl -sSL -o bin/cfssljson $CFSSLURL/cfssljson_linux-amd64\nchmod +x bin/{cfssl,cfssljson}\nexport PATH=$PATH:./bin\n\nOSH_CONFIG_ROOT=\"/etc/openstack-helm\"\nOSH_CA_ROOT=\"${OSH_CONFIG_ROOT}/certs/ca\"\nOSH_SERVER_TLS_ROOT=\"${OSH_CONFIG_ROOT}/certs/server\"\n\nsudo mkdir -p ${OSH_CONFIG_ROOT}\nsudo chown $(whoami): -R ${OSH_CONFIG_ROOT}\n\nmkdir -p \"${OSH_CA_ROOT}\"\ntee ${OSH_CA_ROOT}/ca-config.json << EOF\n{\n    \"signing\": {\n        \"default\": {\n            \"expiry\": \"24h\"\n        },\n        \"profiles\": {\n            \"server\": {\n                \"expiry\": \"24h\",\n                \"usages\": [\n                    \"signing\",\n                    \"key encipherment\",\n                    \"server auth\"\n                ]\n            }\n        }\n    }\n}\nEOF\n\ntee ${OSH_CA_ROOT}/ca-csr.json << EOF\n{\n  \"CN\": \"ACME Company\",\n  \"key\": {\n    \"algo\": \"rsa\",\n    \"size\": 2048\n  },\n  \"names\": [\n    {\n      \"C\": \"US\",\n      \"L\": \"SomeState\",\n      \"ST\": \"SomeCity\",\n      \"O\": \"SomeOrg\",\n      \"OU\": \"SomeUnit\"\n    }\n  ]\n}\nEOF\n\ncfssl gencert -initca ${OSH_CA_ROOT}/ca-csr.json | cfssljson -bare ${OSH_CA_ROOT}/ca -\n\nfunction check_cert_and_key () {\n  TLS_CERT=$1\n  TLS_KEY=$2\n  openssl x509 -inform pem -in ${TLS_CERT} -noout -text\n  CERT_MOD=\"$(openssl x509 -noout -modulus -in ${TLS_CERT})\"\n  KEY_MOD=\"$(openssl rsa -noout -modulus -in ${TLS_KEY})\"\n  if ! [ \"${CERT_MOD}\" = \"${KEY_MOD}\" ]; then\n    echo \"Failure: TLS private key does not match this certificate.\"\n    exit 1\n  else\n    CERT_MOD=\"\"\n    KEY_MOD=\"\"\n    echo \"Pass: ${TLS_CERT} is valid with ${TLS_KEY}\"\n  fi\n}\ncheck_cert_and_key ${OSH_CA_ROOT}/ca.pem ${OSH_CA_ROOT}/ca-key.pem\n\nDOMAIN=openstackhelm.test\nfor HOSTNAME in \"swift\" \"keystone\" \"heat\" \"cloudformation\" \"horizon\" \"glance\" \"cinder\" \"nova\" \"placement\" \"novnc\" \"metadata\" \"neutron\" \"barbican\"; do\n  FQDN=\"${HOSTNAME}.${DOMAIN}\"\n\n  OSH_SERVER_CERTS=\"${OSH_SERVER_TLS_ROOT}/${HOSTNAME}\"\n  mkdir -p \"${OSH_SERVER_CERTS}\"\n\n  tee ${OSH_SERVER_CERTS}/server-csr-${HOSTNAME}.json <<EOF\n{\n  \"CN\": \"${FQDN}\",\n  \"hosts\": [\n    \"${FQDN}\"\n  ],\n  \"key\": {\n    \"algo\": \"rsa\",\n    \"size\": 2048\n  },\n  \"names\": [\n    {\n      \"C\": \"US\",\n      \"L\": \"SomeState\",\n      \"ST\": \"SomeCity\",\n      \"O\": \"SomeOrg\",\n      \"OU\": \"SomeUnit\"\n    }\n  ]\n}\nEOF\n  cfssl gencert \\\n    -hostname=\"${FQDN}\" \\\n    -ca=${OSH_CA_ROOT}/ca.pem \\\n    -ca-key=${OSH_CA_ROOT}/ca-key.pem \\\n    -config=${OSH_CA_ROOT}/ca-config.json \\\n    -profile=server \\\n    ${OSH_SERVER_CERTS}/server-csr-${HOSTNAME}.json | cfssljson -bare ${OSH_SERVER_CERTS}/server\n\n  check_cert_and_key ${OSH_SERVER_CERTS}/server.pem ${OSH_SERVER_CERTS}/server-key.pem\ndone\n\ncd $CURRENT_DIR\n\nKEYSTONE_CRT=${OSH_SERVER_TLS_ROOT}/keystone/server.pem\nKEYSTONE_KEY=${OSH_SERVER_TLS_ROOT}/keystone/server-key.pem\nKEYSTONE_CSR=${OSH_SERVER_TLS_ROOT}/keystone/server-csr-keystone.json\n\nSWIFT_CRT=${OSH_SERVER_TLS_ROOT}/swift/server.pem\nSWIFT_KEY=${OSH_SERVER_TLS_ROOT}/swift/server-key.pem\nSWIFT_CSR=${OSH_SERVER_TLS_ROOT}/swift/server-csr-swift.json\n\nBARBICAN_CRT=${OSH_SERVER_TLS_ROOT}/barbican/server.pem\nBARBICAN_KEY=${OSH_SERVER_TLS_ROOT}/barbican/server-key.pem\nBARBICAN_CSR=${OSH_SERVER_TLS_ROOT}/barbican/server-csr-barbican.json\n\nHEAT_API_CRT=${OSH_SERVER_TLS_ROOT}/heat/server.pem\nHEAT_API_KEY=${OSH_SERVER_TLS_ROOT}/heat/server-key.pem\nHEAT_API_CSR=${OSH_SERVER_TLS_ROOT}/heat/server-csr-heat.json\nHEAT_CFN_CRT=${OSH_SERVER_TLS_ROOT}/cloudformation/server.pem\nHEAT_CFN_KEY=${OSH_SERVER_TLS_ROOT}/cloudformation/server-key.pem\nHEAT_CFN_CSR=${OSH_SERVER_TLS_ROOT}/cloudformation/server-csr-cloudformation.json\n\nHORIZON_CRT=${OSH_SERVER_TLS_ROOT}/horizon/server.pem\nHORIZON_KEY=${OSH_SERVER_TLS_ROOT}/horizon/server-key.pem\nHORIZON_CSR=${OSH_SERVER_TLS_ROOT}/horizon/server-csr-horizon.json\n\nGLANCE_API_CRT=${OSH_SERVER_TLS_ROOT}/glance/server.pem\nGLANCE_API_KEY=${OSH_SERVER_TLS_ROOT}/glance/server-key.pem\nGLANCE_API_CSR=${OSH_SERVER_TLS_ROOT}/glance/server-csr-glance.json\n\nCINDER_CRT=${OSH_SERVER_TLS_ROOT}/cinder/server.pem\nCINDER_KEY=${OSH_SERVER_TLS_ROOT}/cinder/server-key.pem\nCINDER_CSR=${OSH_SERVER_TLS_ROOT}/cinder/server-csr-cinder.json\n\nNOVA_API_CRT=${OSH_SERVER_TLS_ROOT}/nova/server.pem\nNOVA_API_KEY=${OSH_SERVER_TLS_ROOT}/nova/server-key.pem\nNOVA_API_CSR=${OSH_SERVER_TLS_ROOT}/nova/server-csr-nova.json\n\nNOVA_NOVNC_CRT=${OSH_SERVER_TLS_ROOT}/novnc/server.pem\nNOVA_NOVNC_KEY=${OSH_SERVER_TLS_ROOT}/novnc/server-key.pem\nNOVA_NOVNC_CSR=${OSH_SERVER_TLS_ROOT}/novnc/server-csr-novnc.json\n\nPLACEMENT_CRT=${OSH_SERVER_TLS_ROOT}/placement/server.pem\nPLACEMENT_KEY=${OSH_SERVER_TLS_ROOT}/placement/server-key.pem\nPLACEMENT_CSR=${OSH_SERVER_TLS_ROOT}/placement/server-csr-placement.json\n\nNEUTRON_SERVER_CRT=${OSH_SERVER_TLS_ROOT}/neutron/server.pem\nNEUTRON_SERVER_KEY=${OSH_SERVER_TLS_ROOT}/neutron/server-key.pem\nNEUTRON_SERVER_CSR=${OSH_SERVER_TLS_ROOT}/neutron/server-csr-neutron.json\n\nBARBICAN_API_CRT=${OSH_SERVER_TLS_ROOT}/barbican/server.pem\nBARBICAN_API_KEY=${OSH_SERVER_TLS_ROOT}/barbican/server-key.pem\nBARBICAN_API_CSR=${OSH_SERVER_TLS_ROOT}/barbican/server-csr-barbican.json\n\ntee /tmp/tls-endpoints.yaml << EOF\nendpoints:\n  object_store:\n    scheme:\n      public: https\n    port:\n      api:\n        public: 443\n    host_fqdn_override:\n      public:\n        host: \"$(cat \"${SWIFT_CSR}\" | jq -r '.CN')\"\n        tls:\n          crt: |\n$(cat ${SWIFT_CRT} | sed 's/^/            /')\n          key: |\n$(cat ${SWIFT_KEY} | sed 's/^/            /')\n          ca: |\n$(cat ${OSH_CA_ROOT}/ca.pem | sed 's/^/            /')\n  identity:\n    scheme:\n      public: https\n    port:\n      api:\n        public: 443\n    host_fqdn_override:\n      public:\n        host: \"$(cat \"${KEYSTONE_CSR}\" | jq -r '.CN')\"\n        tls:\n          crt: |\n$(cat ${KEYSTONE_CRT} | sed 's/^/            /')\n          key: |\n$(cat ${KEYSTONE_KEY} | sed 's/^/            /')\n          ca: |\n$(cat ${OSH_CA_ROOT}/ca.pem | sed 's/^/            /')\n  orchestration:\n    scheme:\n      public: https\n    port:\n      api:\n        public: 443\n    host_fqdn_override:\n      public:\n        host: \"$(cat \"${HEAT_API_CSR}\" | jq -r '.CN')\"\n        tls:\n          crt: |\n$(cat ${HEAT_API_CRT} | sed 's/^/            /')\n          key: |\n$(cat ${HEAT_API_KEY} | sed 's/^/            /')\n          ca: |\n$(cat ${OSH_CA_ROOT}/ca.pem | sed 's/^/            /')\n  cloudformation:\n    scheme:\n      public: https\n    port:\n      cfn:\n        public: 443\n    host_fqdn_override:\n      public:\n        host: \"$(cat \"${HEAT_CFN_CSR}\" | jq -r '.CN')\"\n        tls:\n          crt: |\n$(cat ${HEAT_CFN_CRT} | sed 's/^/            /')\n          key: |\n$(cat ${HEAT_CFN_KEY} | sed 's/^/            /')\n          ca: |\n$(cat ${OSH_CA_ROOT}/ca.pem | sed 's/^/            /')\n  dashboard:\n    scheme:\n      public: https\n    port:\n      web:\n        public: 443\n    host_fqdn_override:\n      public:\n        host: \"$(cat \"${HORIZON_CSR}\" | jq -r '.CN')\"\n        tls:\n          crt: |\n$(cat ${HORIZON_CRT} | sed 's/^/            /')\n          key: |\n$(cat ${HORIZON_KEY} | sed 's/^/            /')\n          ca: |\n$(cat ${OSH_CA_ROOT}/ca.pem | sed 's/^/            /')\n  image:\n    scheme:\n      public: https\n    port:\n      api:\n        public: 443\n    host_fqdn_override:\n      public:\n        host: \"$(cat \"${GLANCE_API_CSR}\" | jq -r '.CN')\"\n        tls:\n          crt: |\n$(cat ${GLANCE_API_CRT} | sed 's/^/            /')\n          key: |\n$(cat ${GLANCE_API_KEY} | sed 's/^/            /')\n          ca: |\n$(cat ${OSH_CA_ROOT}/ca.pem | sed 's/^/            /')\n  volume:\n    scheme:\n      public: https\n    port:\n      api:\n        public: 443\n    host_fqdn_override:\n      public:\n        host: \"$(cat \"${CINDER_CSR}\" | jq -r '.CN')\"\n        tls:\n          crt: |\n$(cat ${CINDER_CRT} | sed 's/^/            /')\n          key: |\n$(cat ${CINDER_KEY} | sed 's/^/            /')\n          ca: |\n$(cat ${OSH_CA_ROOT}/ca.pem | sed 's/^/            /')\n  volumev2:\n    scheme:\n      public: https\n    port:\n      api:\n        public: 443\n    host_fqdn_override:\n      public:\n        host: \"$(cat \"${CINDER_CSR}\" | jq -r '.CN')\"\n        tls:\n          crt: |\n$(cat ${CINDER_CRT} | sed 's/^/            /')\n          key: |\n$(cat ${CINDER_KEY} | sed 's/^/            /')\n          ca: |\n$(cat ${OSH_CA_ROOT}/ca.pem | sed 's/^/            /')\n  volumev3:\n    scheme:\n      public: https\n    port:\n      api:\n        public: 443\n    host_fqdn_override:\n      public:\n        host: \"$(cat \"${CINDER_CSR}\" | jq -r '.CN')\"\n        tls:\n          crt: |\n$(cat ${CINDER_CRT} | sed 's/^/            /')\n          key: |\n$(cat ${CINDER_KEY} | sed 's/^/            /')\n          ca: |\n$(cat ${OSH_CA_ROOT}/ca.pem | sed 's/^/            /')\n  compute:\n    scheme:\n      public: https\n    port:\n      api:\n        public: 443\n    host_fqdn_override:\n      public:\n        host: \"$(cat \"${NOVA_API_CSR}\" | jq -r '.CN')\"\n        tls:\n          crt: |\n$(cat ${NOVA_API_CRT} | sed 's/^/            /')\n          key: |\n$(cat ${NOVA_API_KEY} | sed 's/^/            /')\n          ca: |\n$(cat ${OSH_CA_ROOT}/ca.pem | sed 's/^/            /')\n  compute_novnc_proxy:\n    scheme:\n      public: https\n    port:\n      novnc_proxy:\n        public: 443\n    host_fqdn_override:\n      public:\n        host: \"$(cat \"${NOVA_NOVNC_CSR}\" | jq -r '.CN')\"\n        tls:\n          crt: |\n$(cat ${NOVA_NOVNC_CRT} | sed 's/^/            /')\n          key: |\n$(cat ${NOVA_NOVNC_KEY} | sed 's/^/            /')\n          ca: |\n$(cat ${OSH_CA_ROOT}/ca.pem | sed 's/^/            /')\n  placement:\n    scheme:\n      public: https\n    port:\n      api:\n        public: 443\n    host_fqdn_override:\n      public:\n        host: \"$(cat \"${PLACEMENT_CSR}\" | jq -r '.CN')\"\n        tls:\n          crt: |\n$(cat ${PLACEMENT_CRT} | sed 's/^/            /')\n          key: |\n$(cat ${PLACEMENT_KEY} | sed 's/^/            /')\n          ca: |\n$(cat ${PLACEMENT_ROOT}/ca.pem | sed 's/^/            /')\n  network:\n    scheme:\n      public: https\n    port:\n      api:\n        public: 443\n    host_fqdn_override:\n      public:\n        host: \"$(cat \"${NEUTRON_SERVER_CSR}\" | jq -r '.CN')\"\n        tls:\n          crt: |\n$(cat ${NEUTRON_SERVER_CRT} | sed 's/^/            /')\n          key: |\n$(cat ${NEUTRON_SERVER_KEY} | sed 's/^/            /')\n          ca: |\n$(cat ${OSH_CA_ROOT}/ca.pem | sed 's/^/            /')\n  key_manager:\n    scheme:\n      public: https\n    port:\n      api:\n        public: 443\n    host_fqdn_override:\n      public:\n        host: \"$(cat \"${BARBICAN_API_CSR}\" | jq -r '.CN')\"\n        tls:\n          crt: |\n$(cat ${BARBICAN_API_CRT} | sed 's/^/            /')\n          key: |\n$(cat ${BARBICAN_API_KEY} | sed 's/^/            /')\n          ca: |\n$(cat ${OSH_CA_ROOT}/ca.pem | sed 's/^/            /')\nEOF\n\nexport OSH_EXTRA_HELM_ARGS=\"--values=/tmp/tls-endpoints.yaml\"\n"
  },
  {
    "path": "tools/deployment/common/setup-client.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\nsudo -H mkdir -p /etc/openstack\nsudo -H chown -R $(id -un): /etc/openstack\nif [[ ${FEATURES//,/ } =~ (^|[[:space:]])tls($|[[:space:]]) ]]; then\n  tee /etc/openstack/clouds.yaml << EOF\n  clouds:\n    openstack_helm:\n      region_name: RegionOne\n      identity_api_version: 3\n      cacert: /etc/openstack-helm/certs/ca/ca.pem\n      auth:\n        username: 'admin'\n        password: 'password'\n        project_name: 'admin'\n        project_domain_name: 'default'\n        user_domain_name: 'default'\n        auth_url: 'https://keystone.openstack-helm.org/v3'\nEOF\nelse\n  tee /etc/openstack/clouds.yaml << EOF\n  clouds:\n    openstack_helm:\n      region_name: RegionOne\n      identity_api_version: 3\n      auth:\n        username: 'admin'\n        password: 'password'\n        project_name: 'admin'\n        project_domain_name: 'default'\n        user_domain_name: 'default'\n        auth_url: 'http://keystone.openstack-helm.org/v3'\nEOF\nfi\n\nsudo tee /usr/local/bin/openstack << EOF\n#!/bin/bash\nset -x\n\nargs=(\"\\$@\")\nsudo docker run \\\\\n    --rm \\\\\n    --network host \\\\\n    -w / \\\\\n    -v /etc/openstack/clouds.yaml:/etc/openstack/clouds.yaml \\\\\n    -v /etc/openstack-helm:/etc/openstack-helm \\\\\n    -e OS_CLOUD=\\${OS_CLOUD} \\\\\n    \\${OPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS} \\\\\n    quay.io/airshipit/openstack-client:\\${OPENSTACK_RELEASE:-2025.1}-\\${CONTAINER_DISTRO_NAME:-ubuntu}_\\${CONTAINER_DISTRO_VERSION:-noble} openstack \"\\${args[@]}\"\nEOF\nsudo chmod +x /usr/local/bin/openstack\n"
  },
  {
    "path": "tools/deployment/common/sleep.sh",
    "content": "#!/bin/bash\n\nset -ex\n\nwhile true; do\n    echo \"Sleeping for 100 seconds...\"\ndone\n"
  },
  {
    "path": "tools/deployment/common/test-networkpolicy.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n# test_netpol(namespace, application, component, target_host, expected_result{fail,success})\nfunction test_netpol {\n  NS=$1\n  APP=$2\n  COMPONENT=$3\n  HOST=$4\n  STATUS=$5\n  echo Testing connection from $APP - $COMPONENT to host $HOST with namespace $NS\n  POD=$(kubectl -n $NS get pod -l application=$APP,component=$COMPONENT | grep Running | cut -f 1 -d \" \" | head -n 1)\n  PID=$(sudo docker inspect --format '{{ .State.Pid }}' $(kubectl get pods --namespace $NS $POD -o jsonpath='{.status.containerStatuses[0].containerID}' | cut -c 10-21))\n  if [ \"x${STATUS}\" == \"xfail\" ]; then\n    if ! sudo nsenter -t $PID -n wget --spider --timeout=5 --tries=1 $HOST ; then\n      echo \"Connection timed out; as expected by policy.\"\n    else\n      exit 1\n    fi\n  else\n    sudo nsenter -t $PID -n wget --spider --timeout=5 --tries=1 $HOST\n  fi\n}\n\n#NOTE(gagehugo): Enable the negative tests once the services policy is defined\n\n# General Netpol Tests\n# Doing negative tests\n#test_netpol openstack mariadb server rabbitmq.openstack.svc.cluster.local:5672 fail\n#test_netpol openstack rabbitmq-rabbitmq server memcached.openstack.svc.cluster.local:11211 fail\n\n# Negative Keystone tests\ntest_netpol openstack mariadb server keystone-api.openstack.svc.cluster.local:5000 fail\ntest_netpol openstack mariadb ingress keystone-api.openstack.svc.cluster.local:5000 fail\ntest_netpol openstack memcached server keystone-api.openstack.svc.cluster.local:5000 fail\ntest_netpol openstack rabbitmq server keystone-api.openstack.svc.cluster.local:5000 fail\n\n# Negative Mariadb tests\ntest_netpol openstack memcached server mariadb.openstack.svc.cluster.local:3306 fail\ntest_netpol openstack ingress server mariadb-server.openstack.svc.cluster.local:3306 fail\n\n# Doing positive tests\n\n# Positive Mariadb tests\ntest_netpol openstack keystone api mariadb.openstack.svc.cluster.local:3306 success\ntest_netpol openstack keystone api mariadb-server.openstack.svc.cluster.local:3306 success\ntest_netpol openstack mariadb ingress mariadb-server.openstack.svc.cluster.local:3306 success\n\ntest_netpol openstack keystone api rabbitmq.openstack.svc.cluster.local:5672 success\ntest_netpol openstack ingress server keystone-api.openstack.svc.cluster.local:5000 success\ntest_netpol openstack prometheus-openstack-exporter exporter keystone-api.openstack.svc.cluster.local:5000 success\n\nif kubectl -n openstack get pod -l application=horizon | grep Running ; then\n  test_netpol openstack keystone api horizon.openstack.svc.cluster.local:80 fail\nfi\n\nif kubectl -n openstack get pod -l application=cinder | grep Running ; then\n# Negative Cinder Tests\n  #test_netpol openstack keystone api cinder-api.openstack.svc.cluster.local fail\n  test_netpol openstack cinder api horizon.openstack.svc.cluster.local:80 fail\n# Positive Cinder Tests\n  test_netpol openstack cinder api rabbitmq.openstack.svc.cluster.local:5672 success\n\n  # Positive Keystone test\n  test_netpol openstack cinder api keystone-api.openstack.svc.cluster.local:5000 success\n\n  # Positive Mariadb tests\n  test_netpol openstack cinder api mariadb.openstack.svc.cluster.local:3306 success\n  test_netpol openstack cinder api mariadb-server.openstack.svc.cluster.local:3306 success\nelse\n# Negative Compute-Kit Tests\n  #test_netpol openstack keystone api heat-api.openstack.svc.cluster.local fail\n  #test_netpol openstack keystone api glance-api.openstack.svc.cluster.local fail\n  test_netpol openstack mariadb server glance-api.openstack.svc.cluster.local:9292 fail\n  test_netpol openstack memcached server glance-api.openstack.svc.cluster.local:9292 fail\n  test_netpol openstack keystone api glance-api.openstack.svc.cluster.local:9292 fail\n  # Memcached Negative Tests\n  test_netpol openstack mariadb server memcached.openstack.svc.cluster.local:11211 fail\n  test_netpol openstack rabbitmq server memcached.openstack.svc.cluster.local:11211 fail\n  test_netpol openstack openvswitch openvswitch-vswitchd memcached.openstack.svc.cluster.local:11211 fail\n  test_netpol openstack libvirt libvirt memcached.openstack.svc.cluster.local:11211 fail\n  # Heat Negative Tests\n  test_netpol openstack keystone api heat-api.openstack.svc.cluster.local:8004 fail\n  test_netpol openstack nova os-api heat-api.openstack.svc.cluster.local:8004 fail\n  test_netpol openstack neutron server heat-api.openstack.svc.cluster.local:8004 fail\n  test_netpol openstack glance api heat-api.openstack.svc.cluster.local:8004 fail\n\n# Positive Compute-Kit Tests\n\n  # Positive Mariadb tests\n  test_netpol openstack heat api mariadb.openstack.svc.cluster.local:3306 success\n  test_netpol openstack glance api mariadb.openstack.svc.cluster.local:3306 success\n  test_netpol openstack glance api mariadb-server.openstack.svc.cluster.local:3306 success\n\n  # Positive Keystone tests\n  test_netpol openstack heat api keystone-api.openstack.svc.cluster.local:5000 success\n  test_netpol openstack glance api keystone-api.openstack.svc.cluster.local:5000 success\n  test_netpol openstack horizon server keystone-api.openstack.svc.cluster.local:5000 success\n  test_netpol openstack nova os-api keystone-api.openstack.svc.cluster.local:5000 success\n  test_netpol openstack nova compute keystone-api.openstack.svc.cluster.local:5000 success\n  test_netpol openstack neutron l3-agent keystone-api.openstack.svc.cluster.local:5000 success\n  test_netpol openstack ingress server glance-api.openstack.svc.cluster.local:9292 success\n  test_netpol openstack nova os-api glance-api.openstack.svc.cluster.local:9292 success\n  test_netpol openstack nova compute glance-api.openstack.svc.cluster.local:9292 success\n  test_netpol openstack heat api glance-api.openstack.svc.cluster.local:9292 success\n  test_netpol openstack horizon server glance-api.openstack.svc.cluster.local:9292 success\n  test_netpol openstack horizon server heat-api.openstack.svc.cluster.local:8004 success\n  test_netpol openstack horizon server heat-cfn.openstack.svc.cluster.local:8000 success\n  test_netpol openstack heat api heat-api.openstack.svc.cluster.local:8004 success\nfi\n\necho Test Success\n"
  },
  {
    "path": "tools/deployment/common/use-it.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nset -xe\n\nDPDK_ENABLED=disabled\nif [[ ${FEATURES//,/ } =~ (^|[[:space:]])dpdk($|[[:space:]]) ]]; then\n    DPDK_ENABLED=enabled\nfi\n\nexport OS_CLOUD=openstack_helm\n\n: ${HEAT_DIR:=\"$(readlink -f ./tools/deployment/common)\"}\n: ${SSH_DIR:=\"${HOME}/.ssh\"}\n\nif [[ -n ${HEAT_DIR} ]]; then\n  OPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS=\"${OPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS} -v ${HEAT_DIR}:${HEAT_DIR}\"\nfi\n\nif [[ -n ${SSH_DIR} ]]; then\n  OPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS=\"${OPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS} -v ${SSH_DIR}:${SSH_DIR}\"\nfi\n\nexport OPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS\n\n: ${OSH_EXT_NET_NAME:=\"public\"}\n: ${OSH_EXT_SUBNET_NAME:=\"public-subnet\"}\n: ${OSH_EXT_SUBNET:=\"172.24.4.0/24\"}\n: ${OSH_BR_EX_ADDR:=\"172.24.4.1/24\"}\n: ${OSH_ALLOCATION_POOL_START:=\"172.24.4.10\"}\n: ${OSH_ALLOCATION_POOL_END:=\"172.24.4.254\"}\nopenstack stack show \"heat-public-net-deployment\" || \\\n  openstack stack create --wait \\\n    --parameter network_name=${OSH_EXT_NET_NAME} \\\n    --parameter physical_network_name=public \\\n    --parameter subnet_name=${OSH_EXT_SUBNET_NAME} \\\n    --parameter subnet_cidr=${OSH_EXT_SUBNET} \\\n    --parameter subnet_gateway=${OSH_BR_EX_ADDR%/*} \\\n    --parameter allocation_pool_start=${OSH_ALLOCATION_POOL_START} \\\n    --parameter allocation_pool_end=${OSH_ALLOCATION_POOL_END} \\\n    -t ${HEAT_DIR}/heat-public-net-deployment.yaml \\\n    heat-public-net-deployment\n\n: ${OSH_PRIVATE_SUBNET_POOL:=\"192.168.128.0/20\"}\n: ${OSH_PRIVATE_SUBNET_POOL_NAME:=\"shared-default-subnetpool\"}\n: ${OSH_PRIVATE_SUBNET_POOL_DEF_PREFIX:=\"24\"}\nopenstack stack show \"heat-subnet-pool-deployment\" || \\\n  openstack stack create --wait \\\n    --parameter subnet_pool_name=${OSH_PRIVATE_SUBNET_POOL_NAME} \\\n    --parameter subnet_pool_prefixes=${OSH_PRIVATE_SUBNET_POOL} \\\n    --parameter subnet_pool_default_prefix_length=${OSH_PRIVATE_SUBNET_POOL_DEF_PREFIX} \\\n    -t ${HEAT_DIR}/heat-subnet-pool-deployment.yaml \\\n    heat-subnet-pool-deployment\n\n: ${OSH_EXT_NET_NAME:=\"public\"}\n: ${OSH_VM_KEY_STACK:=\"heat-vm-key\"}\n: ${OSH_PRIVATE_SUBNET:=\"192.168.128.0/24\"}\n\n\nif [[ ${USE_UBUNTU_IMAGE:=\"false\"} == \"true\" ]]; then\n    IMAGE_ID=$(openstack image list -f value | grep -i \"ubuntu\" | head -1 | awk '{ print $1 }')\n    IMAGE_USER=ubuntu\nelse\n    IMAGE_ID=$(openstack image list -f value | grep -i \"cirros\" | head -1 | awk '{ print $1 }')\n    IMAGE_USER=cirros\nfi\n\n# Setup SSH Keypair in Nova\nmkdir -p ${SSH_DIR}\nopenstack keypair show \"${OSH_VM_KEY_STACK}\" || \\\n  openstack keypair create --private-key ${SSH_DIR}/osh_key ${OSH_VM_KEY_STACK}\nsudo chown $(id -un) ${SSH_DIR}/osh_key\nchmod 600 ${SSH_DIR}/osh_key\n\nopenstack stack show \"heat-basic-vm-deployment\" || \\\n  openstack stack create --wait \\\n      --parameter public_net=${OSH_EXT_NET_NAME} \\\n      --parameter image=\"${IMAGE_ID}\" \\\n      --parameter is_ubuntu=${USE_UBUNTU_IMAGE} \\\n      --parameter ssh_key=${OSH_VM_KEY_STACK} \\\n      --parameter cidr=${OSH_PRIVATE_SUBNET} \\\n      --parameter dns_nameserver=${OSH_BR_EX_ADDR%/*} \\\n      --parameter dpdk=${DPDK_ENABLED} \\\n      -t ${HEAT_DIR}/heat-basic-vm-deployment.yaml \\\n      heat-basic-vm-deployment\n\nFLOATING_IP=$(openstack stack output show \\\n    heat-basic-vm-deployment \\\n    floating_ip \\\n    -f value -c output_value)\n\nINSTANCE_ID=$(openstack stack output show \\\n    heat-basic-vm-deployment \\\n    instance_uuid \\\n    -f value -c output_value)\n\nopenstack server show ${INSTANCE_ID}\n\n# accept diffie-hellman-group1-sha1 algo for SSH (for compatibility with older images)\nsudo tee -a /etc/ssh/ssh_config <<EOF\n    KexAlgorithms +diffie-hellman-group1-sha1\n    HostKeyAlgorithms +ssh-rsa\n    PubkeyAcceptedKeyTypes +ssh-rsa\nEOF\n\n[ ${USE_UBUNTU_IMAGE} == \"true\" ] \\\n  && wait_for_ssh_timeout=$(date -d '+900 sec' +%s) \\\n  || wait_for_ssh_timeout=$(date -d '+300 sec' +%s)\n\nwhile true; do\n    nmap -Pn -p22 ${FLOATING_IP} | awk '$1 ~ /22/ {print $2}' | grep -q 'open' \\\n        && echo \"SSH port is open.\" \\\n        && ssh -o \"StrictHostKeyChecking no\" -i ${SSH_DIR}/osh_key ${IMAGE_USER}@${FLOATING_IP} true \\\n        && echo \"SSH session successfully established\" \\\n        && if [ ${USE_UBUNTU_IMAGE} == \"true\" ]; then\n            ssh -o \"StrictHostKeyChecking no\" -i ${SSH_DIR}/osh_key ${IMAGE_USER}@${FLOATING_IP} cloud-init status | grep -q 'done' \\\n            && echo \"Cloud-init status is done.\"\n        fi \\\n        && break \\\n        || true\n    sleep 30\n    if [ $(date +%s) -gt $wait_for_ssh_timeout ]; then\n        {\n            echo \"Could not establish ssh session to ${IMAGE_USER}@${FLOATING_IP} in time\"\n            openstack console log show ${INSTANCE_ID}\n            exit 1\n        }\n    fi\ndone\n\n# SSH into the VM and check it can reach the outside world\nssh -o \"StrictHostKeyChecking no\" -i ${SSH_DIR}/osh_key ${IMAGE_USER}@${FLOATING_IP} ping -q -c 1 -W 2 ${OSH_BR_EX_ADDR%/*}\n\n# Check the VM can reach the metadata server\nssh -i ${SSH_DIR}/osh_key ${IMAGE_USER}@${FLOATING_IP} curl --verbose --connect-timeout 5 169.254.169.254\n\n# Check the VM can reach the keystone server\nssh -i ${SSH_DIR}/osh_key ${IMAGE_USER}@${FLOATING_IP} curl --verbose --connect-timeout 5 keystone.openstack-helm.org\n\n# Check to see if cinder has been deployed, if it has then perform a volume attach.\nif openstack service list -f value -c Type | grep -q \"^volume\"; then\n  # Get the devices that are present on the instance\n  DEVS_PRE_ATTACH=$(mktemp)\n  ssh -i ${SSH_DIR}/osh_key ${IMAGE_USER}@${FLOATING_IP} lsblk > ${DEVS_PRE_ATTACH}\n\n  openstack stack show \"heat-vm-volume-attach\" || \\\n  # Create and attach a block device to the instance\n    openstack stack create --wait \\\n      --parameter instance_uuid=${INSTANCE_ID} \\\n      -t ${HEAT_DIR}/heat-vm-volume-attach.yaml \\\n      heat-vm-volume-attach\n\n  # Get the devices that are present on the instance\n  DEVS_POST_ATTACH=$(mktemp)\n  ssh -i ${SSH_DIR}/osh_key ${IMAGE_USER}@${FLOATING_IP} lsblk > ${DEVS_POST_ATTACH}\n\n  # Check that we have the expected number of extra devices on the instance post attach\n  if ! [ \"$(comm -13 ${DEVS_PRE_ATTACH} ${DEVS_POST_ATTACH} | wc -l)\" -eq \"1\" ]; then\n    echo \"Volume not successfully attached\"\n    exit 1\n  fi\nfi\n"
  },
  {
    "path": "tools/deployment/component/aodh/aodh.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_AODH:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c aodh ${FEATURES})\"}\n\n#NOTE: Wait for deploy\nhelm upgrade --install aodh ${OSH_HELM_REPO}/aodh \\\n    --namespace=openstack \\\n    --set pod.replicas.api=2 \\\n    --set pod.replicas.evaluator=2 \\\n    --set pod.replicas.listener=2 \\\n    --set pod.replicas.notifier=2 \\\n    ${OSH_EXTRA_HELM_ARGS:=} \\\n    ${OSH_EXTRA_HELM_ARGS_AODH}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n\n#NOTE: Validate Deployment info\nexport OS_CLOUD=openstack_helm\nopenstack service list\n"
  },
  {
    "path": "tools/deployment/component/barbican/barbican.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_BARBICAN:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c barbican ${FEATURES})\"}\n: ${RUN_HELM_TESTS:=\"yes\"}\n\n#NOTE: Deploy command\nhelm upgrade --install barbican ${OSH_HELM_REPO}/barbican \\\n    --namespace=openstack \\\n    ${OSH_EXTRA_HELM_ARGS:=} \\\n    ${OSH_EXTRA_HELM_ARGS_BARBICAN}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n\n# Run helm test\nif [ \"x${RUN_HELM_TESTS}\" != \"xno\" ]; then\n    ./tools/deployment/common/run-helm-tests.sh barbican\nfi\n"
  },
  {
    "path": "tools/deployment/component/blazar/blazar.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_BLAZAR:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c blazar ${FEATURES})\"}\n: ${BLAZAR_RELEASE_NAME:=\"blazar\"}\n: ${BLAZAR_NAMESPACE:=\"openstack\"}\n\n#NOTE: Wait for deploy\n\necho \"Deploying OpenStack Blazar\"\n\nhelm upgrade --install ${BLAZAR_RELEASE_NAME} ${OSH_HELM_REPO}/blazar \\\n    --namespace ${BLAZAR_NAMESPACE} \\\n    ${OSH_EXTRA_HELM_ARGS:=} \\\n    ${OSH_EXTRA_HELM_ARGS_BLAZAR}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods ${BLAZAR_NAMESPACE}\n\necho \"OpenStack Blazar deployment complete.\"\n\n#NOTE: Validate Deployment info\nexport OS_CLOUD=openstack_helm\nopenstack service list\n\n# Run helm test\nif [ \"x${RUN_HELM_TESTS}\" != \"xno\" ]; then\n    ./tools/deployment/common/run-helm-tests.sh blazar\nfi\n"
  },
  {
    "path": "tools/deployment/component/blazar/blazar_smoke_test.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nset -ex\n\nexport OS_CLOUD=openstack_helm\n\nBLAZAR_DIR=\"$(readlink -f ./tools/deployment/component/blazar)\"\nSSH_DIR=\"${HOME}/.ssh\"\n\nOPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS=\"${OPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS} -v ${BLAZAR_DIR}:${BLAZAR_DIR} -v ${SSH_DIR}:${SSH_DIR}\"\nexport OPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS\n\necho \"Test: Starting the process to delete all existing Blazar leases, if any\"\n\nlease_ids=$(openstack reservation lease list -c id -f value)\nsleep 2\n\n# Check if the list of leases is empty.\nif [ -z \"$lease_ids\" ]; then\n  echo \"Test: No leases found to delete\"\nelse\n  echo \"Test: The following lease IDs will be deleted:\"\n  echo \"$lease_ids\"\n  echo \"-------------------------------------\"\n\n  while IFS= read -r lease_id; do\n    echo \"Test: Deleting lease with ID: $lease_id\"\n    openstack reservation lease delete \"$lease_id\"\n    sleep 2\n    echo \"Test: Lease $lease_id deleted.\"\n  done <<< \"$lease_ids\"\n\n  echo \"-------------------------------------\"\n  echo \"Test: All Blazar leases have been successfully deleted\"\nfi\n\necho \"Test: Starting the process to delete all existing Blazar hosts, if any\"\n\nopenstack host list\nsleep 2\nopenstack reservation host list\nsleep 2\n\nhost_ids=$(openstack reservation host list -c id -f value)\nsleep 2\n\n# Check if the list of hosts is empty.\nif [ -z \"$host_ids\" ]; then\n  echo \"Test: No hosts found to delete\"\nelse\n  echo \"Test: The following host IDs will be deleted:\"\n  echo \"$host_ids\"\n  echo \"-------------------------------------\"\n\n  while IFS= read -r host_id; do\n\n    # Get the list of servers on the specified host\n    SERVER_LIST=$(openstack server list --host \"$host_id\" -f value -c id)\n    sleep 2\n\n    # Check if any servers were found\n    if [ -z \"$SERVER_LIST\" ]; then\n      echo \"No servers found on host '$host_id'\"\n    else\n      # Delete all servers on the host\n      echo \"Deleting servers on host '$host_id'\"\n      for SERVER_ID in $SERVER_LIST; do\n        echo \"Deleting server $SERVER_ID\"\n        openstack server delete \"$SERVER_ID\"\n      done\n      echo \"All servers on host '$host_id' have been deleted\"\n    fi\n\n    echo \"Test: Deleting host with ID: $host_id\"\n    openstack reservation host delete \"$host_id\"\n    sleep 2\n    echo \"Test: Host $host_id deleted\"\n  done <<< \"$host_ids\"\n\n  echo \"-------------------------------------\"\n  echo \"Test: All Blazar hosts have been successfully deleted\"\nfi\n\necho \"Test: list all the services\"\nopenstack service list\nsleep 2\n\necho \"Test: list all the endpoints\"\nopenstack endpoint list\nsleep 2\n\necho \"Test: list all the hypervisors\"\nopenstack hypervisor list\nsleep 2\n\necho \"Extract the first available compute host name from the list of hosts\"\nFIRST_COMPUTE_HOST=$(openstack host list | grep 'compute' | awk '{print $2}' | head -n 1)\nsleep 2\n\n# A simple check to see if a host name was successfully found.\nif [ -z \"$FIRST_COMPUTE_HOST\" ]; then\n    echo \"Error: No compute host found in the list\"\n    exit 1\nelse\n    echo \"The first compute host found is: $FIRST_COMPUTE_HOST\"\nfi\nsleep 2\n\n# Set a variable for the aggregate name\nAGGREGATE_NAME=\"freepool\"\n\necho \"Test: Checking if aggregate '${AGGREGATE_NAME}' already exists\"\nAGGREGATE_FOUND=$(openstack aggregate list | grep \" ${AGGREGATE_NAME} \" | cut -d '|' -f 3 | tr -d ' ' 2>/dev/null || true)\nsleep 2\n\n# Check if the AGGREGATE_FOUND variable is empty.\nif [ -z \"$AGGREGATE_FOUND\" ]; then\n  echo \"Test: Aggregate '${AGGREGATE_NAME}' not found, Creating it now\"\n  openstack aggregate create \"${AGGREGATE_NAME}\"\n  sleep 5\n  # Check the exit status of the previous command.\n  if [ $? -eq 0 ]; then\n    echo \"Test: Aggregate '${AGGREGATE_NAME}' created successfully\"\n  else\n    echo \"Test: Failed to create aggregate '${AGGREGATE_NAME}'\"\n  fi\nelse\n  echo \"Test: Aggregate '${AGGREGATE_NAME}' already exists\"\nfi\nsleep 2\n\necho \"Test: list all the aggregates after creating/checking freepool aggregate\"\nopenstack aggregate list\nsleep 2\n\necho \"Test: Add host into the Blazar freepool\"\nopenstack reservation host create $FIRST_COMPUTE_HOST\nsleep 5\n\necho \"Test: Add extra capabilities to host to add other properties\"\nopenstack reservation host set --extra gpu=True $FIRST_COMPUTE_HOST\nsleep 2\n\necho \"Test: list hosts in the blazar freepool after adding a host\"\nopenstack reservation host list\nsleep 2\n\n# Get the current date in YYYY-MM-DD format, generate start and end dates\ncurrent_date=$(date +%Y-%m-%d)\nstart_date=$(date -d \"$current_date + 1 day\" +%Y-%m-%d\\ 12:00)\nend_date=$(date -d \"$current_date + 2 day\" +%Y-%m-%d\\ 12:00)\n\necho \"Test: Create a lease (compute host reservation)\"\nopenstack reservation lease create \\\n --reservation resource_type=physical:host,min=1,max=1,hypervisor_properties='[\">=\", \"$vcpus\", \"2\"]' \\\n --start-date \"$start_date\" \\\n --end-date \"$end_date\" \\\n lease-test-comp-host-res\nsleep 5\n\necho \"Test: list leases after creating a lease\"\nopenstack reservation lease list\nsleep 2\n\necho \"Test: list projects\"\nopenstack project list\nsleep 2\n\necho \"Test: list flavors\"\nopenstack flavor list\nsleep 2\n\necho \"Test: list images\"\nopenstack image list\nsleep 2\n\necho \"Test: list networks\"\nopenstack network list\nsleep 2\n\n# Get the flavor ID for m1.tiny\nFLAVOR_ID=$(openstack flavor show m1.tiny -f value -c id)\nsleep 2\n\n# Get the image ID for Cirros 0.6.2 64-bit\nIMAGE_ID=$(openstack image show \"Cirros 0.6.2 64-bit\" -f value -c id)\nsleep 2\n\n# --- Network ---\n# Check if a network named \"net1\" exists\nif ! openstack network show net1 &> /dev/null; then\n  echo \"Network 'net1' not found. Creating now\"\n  sleep 2\n  openstack network create net1\n  sleep 2\nelse\n  echo \"Network 'net1' already exists.\"\nfi\n\nNETWORK_ID=$(openstack network show net1 -f value -c id)\nsleep 2\n\n# --- Subnet ---\n# Check if a subnet named \"subnet1\" exists\nif ! openstack subnet show subnet1 &> /dev/null; then\n  echo \"Subnet 'subnet1' not found. Creating now\"\n  sleep 2\n  openstack subnet create \\\n    --network \"$NETWORK_ID\" \\\n    --subnet-range 10.0.0.0/24 \\\n    --allocation-pool start=10.0.0.2,end=10.0.0.254 \\\n    --gateway 10.0.0.1 \\\n    subnet1\n    sleep 2\nelse\n  echo \"Subnet 'subnet1' already exists.\"\nfi\n\nSUBNET_ID=$(openstack subnet show subnet1 -f value -c id)\nsleep 2\n\n# --- Router ---\n# Check if a router named \"router1\" exists\nif ! openstack router show router1 &> /dev/null; then\n  sleep 2\n  echo \"Router 'router1' not found. Creating now\"\n  openstack router create router1\n  sleep 2\n  ROUTER_ID=$(openstack router show router1 -f value -c id)\n  sleep 2\n  openstack router add subnet \"$ROUTER_ID\" \"$SUBNET_ID\"\n  sleep 2\nelse\n  echo \"Router 'router1' already exists.\"\nfi\n\necho \"Test: get the lease ID\"\nLEASE_ID=$(openstack reservation lease list | grep \"lease-test-comp-host-res\" | awk '{print $2}')\nsleep 2\n\necho \"Test: get the reservation ID\"\n# Check if the lease ID was found\nif [ -z \"$LEASE_ID\" ]; then\n  echo \"Error: Lease 'lease-test-comp-host-res' not found.\"\nelse\n  RESERVATION_ID=$(openstack reservation lease show \"$LEASE_ID\" | grep -A 100 'reservations' | sed -n 's/.*\"id\": \"\\([^\"]*\\)\".*/\\1/p')\n  sleep 2\n  echo \"Test: RESERVATION ID: $RESERVATION_ID\"\nfi\n\necho \"Test: list servers\"\nopenstack server list\nsleep 2\n\nif [ -n \"$RESERVATION_ID\" ]; then\n  echo \"Test: Create a server with the reservation hint\"\n  openstack server create \\\n    --flavor \"$FLAVOR_ID\" \\\n    --image \"$IMAGE_ID\" \\\n    --network \"$NETWORK_ID\" \\\n    --hint reservation=\"$RESERVATION_ID\" \\\n    server_test_blazar_with_reservation\n  sleep 60\n\n  echo \"Test: list servers after creating a server with reservation\"\n  openstack server list\n  sleep 2\nfi\n\necho \"Test: delete the created servers\"\n# Get the list of servers and delete them\nSERVER_LIST=$(openstack server list -f value -c id)\nsleep 2\n\n# Check if any servers were found\nif [ -z \"$SERVER_LIST\" ]; then\n  echo \"No servers found\"\nelse\n  # Delete the servers\n  for SERVER_ID in $SERVER_LIST; do\n    echo \"Deleting server: $SERVER_ID\"\n    openstack server delete \"$SERVER_ID\"\n    sleep 5\n  done\n  echo \"All servers on host '$host_id' have been deleted\"\nfi\n\necho \"Test: list servers after deleting\"\nopenstack server list\nsleep 2\n\necho \"Test: Starting the process to delete all Blazar leases\"\n\nlease_ids=$(openstack reservation lease list -c id -f value)\nsleep 2\n\n# Check if the list of leases is empty.\nif [ -z \"$lease_ids\" ]; then\n  echo \"Test: No leases found to delete\"\nelse\n  echo \"Test: The following lease IDs will be deleted:\"\n  echo \"$lease_ids\"\n  echo \"-------------------------------------\"\n\n  while IFS= read -r lease_id; do\n    echo \"Test: Deleting lease with ID: $lease_id\"\n    openstack reservation lease delete \"$lease_id\"\n    sleep 2\n    echo \"Test: Lease $lease_id deleted.\"\n  done <<< \"$lease_ids\"\n\n  echo \"-------------------------------------\"\n  echo \"Test: All Blazar leases have been successfully deleted\"\nfi\n\necho \"Test: Starting the process to delete all Blazar hosts\"\n\nhost_ids=$(openstack reservation host list -c id -f value)\nsleep 2\n\n# Check if the list of hosts is empty.\nif [ -z \"$host_ids\" ]; then\n  echo \"Test: No hosts found to delete\"\nelse\n  echo \"Test: The following host IDs will be deleted:\"\n  echo \"$host_ids\"\n  echo \"-------------------------------------\"\n\n  while IFS= read -r host_id; do\n    echo \"Test: Deleting host with ID: $host_id\"\n    openstack reservation host delete \"$host_id\"\n    sleep 2\n    echo \"Test: Host $host_id deleted\"\n  done <<< \"$host_ids\"\n\n  echo \"-------------------------------------\"\n  echo \"Test: All Blazar hosts have been successfully deleted\"\nfi\n\nexit 0\n"
  },
  {
    "path": "tools/deployment/component/ceilometer/ceilometer.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_CEILOMETER:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c ceilometer ${FEATURES})\"}\n\n#NOTE: Wait for deploy\nhelm upgrade --install ceilometer ${OSH_HELM_REPO}/ceilometer \\\n  --namespace=openstack \\\n  --set pod.replicas.api=2 \\\n  --set pod.replicas.central=2 \\\n  --set pod.replicas.notification=2 \\\n  ${OSH_EXTRA_HELM_ARGS:=} \\\n  ${OSH_EXTRA_HELM_ARGS_CEILOMETER}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n\n#NOTE: Validate Deployment info\nexport OS_CLOUD=openstack_helm\nopenstack service list\n"
  },
  {
    "path": "tools/deployment/component/cinder/cinder.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_CINDER:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c cinder ${FEATURES})\"}\n: ${RUN_HELM_TESTS:=\"yes\"}\n\n#NOTE: Deploy command\ntee /tmp/cinder.yaml <<EOF\nconf:\n  ceph:\n    pools:\n      backup:\n        replication: 1\n        crush_rule: replicated_rule\n        chunk_size: 8\n        app_name: cinder-backup\n      # default pool used by rbd1 backend\n      cinder.volumes:\n        replication: 1\n        crush_rule: replicated_rule\n        chunk_size: 8\n        app_name: cinder-volume\n      # secondary pool used by rbd2 backend\n      cinder.volumes.gold:\n        replication: 1\n        crush_rule: replicated_rule\n        chunk_size: 8\n        app_name: cinder-volume\n  backends:\n    # add an extra storage backend same values as rbd1 (see\n    # cinder/values.yaml) except for volume_backend_name and rbd_pool\n    rbd2:\n      volume_driver: cinder.volume.drivers.rbd.RBDDriver\n      volume_backend_name: rbd2\n      rbd_pool: cinder.volumes.gold\n      rbd_ceph_conf: \"/etc/ceph/ceph.conf\"\n      rbd_flatten_volume_from_snapshot: false\n      report_discard_supported: true\n      rbd_max_clone_depth: 5\n      rbd_store_chunk_size: 4\n      rados_connect_timeout: -1\n      rbd_user: cinder\n      rbd_secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337\nEOF\nhelm upgrade --install cinder ${OSH_HELM_REPO}/cinder \\\n  --namespace=openstack \\\n  --values=/tmp/cinder.yaml \\\n  --timeout=600s \\\n  ${OSH_EXTRA_HELM_ARGS:=} \\\n  ${OSH_EXTRA_HELM_ARGS_CINDER}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n\n#NOTE: Validate Deployment info\nexport OS_CLOUD=openstack_helm\nopenstack service list\nsleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx\nopenstack volume type list\nopenstack volume type list --default\n\n# Delete the test pod if it still exists\nkubectl delete pods -l application=cinder,release_group=cinder,component=test --namespace=openstack --ignore-not-found\n\nif [ \"x${RUN_HELM_TESTS}\" != \"xno\" ]; then\n    ./tools/deployment/common/run-helm-tests.sh cinder\nfi\n"
  },
  {
    "path": "tools/deployment/component/cloudkitty/cloudkitty.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_CLOUDKITTY:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-}  -p ${OSH_VALUES_OVERRIDES_PATH} -c cloudkitty ${FEATURES})\"}\n: ${RUN_HELM_TESTS:=\"no\"}\n\n#NOTE: Deploy command\nhelm upgrade --install cloudkitty ${OSH_HELM_REPO}/cloudkitty \\\n    --namespace=openstack \\\n    ${OSH_EXTRA_HELM_ARGS} \\\n    ${OSH_EXTRA_HELM_ARGS_CLOUDKITTY}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack 1800\n\n#NOTE: Validate Deployment\nexport OS_CLOUD=openstack_helm\nopenstack service list\n\nopenstack rating module list\n"
  },
  {
    "path": "tools/deployment/component/common/ldap.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_LDAP:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c ldap ${FEATURES})\"}\n: ${NAMESPACE:=openstack}\n\n#NOTE: Deploy command\ntee /tmp/ldap.yaml <<EOF\npod:\n  replicas:\n    server: 1\nbootstrap:\n  enabled: true\nstorage:\n  pvc:\n    enabled: false\nEOF\nhelm upgrade --install ldap ${OSH_HELM_REPO}/ldap \\\n    --namespace=${NAMESPACE} \\\n    --values=/tmp/ldap.yaml \\\n    ${OSH_EXTRA_HELM_ARGS:=} \\\n    ${OSH_EXTRA_HELM_ARGS_LDAP}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods ${NAMESPACE}\n"
  },
  {
    "path": "tools/deployment/component/common/memcached.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_MEMCACHED:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c memcached ${FEATURES})\"}\n: ${NAMESPACE:=openstack}\n\n#NOTE: Deploy command\nhelm upgrade --install memcached ${OSH_HELM_REPO}/memcached \\\n    --namespace=${NAMESPACE} \\\n    ${OSH_EXTRA_HELM_ARGS:=} \\\n    ${OSH_EXTRA_HELM_ARGS_MEMCACHED}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods ${NAMESPACE}\n"
  },
  {
    "path": "tools/deployment/component/common/openstack.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\nexport OS_CLOUD=openstack_helm\n: \"${RUN_HELM_TESTS:=\"no\"}\"\n: \"${CEPH_ENABLED:=\"false\"}\"\n: \"${OSH_EXTRA_HELM_ARGS:=\"\"}\"\nrelease=openstack\nnamespace=$release\n\n\n: ${GLANCE_BACKEND:=\"pvc\"}\n\n#NOTE: Deploy neutron\ntee /tmp/neutron.yaml << EOF\nneutron:\n  release_group: neutron\n  enabled: true\n  network:\n    interface:\n      tunnel: null\n  conf:\n    neutron:\n      DEFAULT:\n        l3_ha: False\n        max_l3_agents_per_router: 1\n        l3_ha_network_type: vxlan\n        dhcp_agents_per_network: 1\n    # provider1 is a tap interface used by default in the test env\n    # we create this interface while setting up the test env\n    auto_bridge_add:\n      br-ex: provider1\n    plugins:\n      ml2_conf:\n        ml2_type_flat:\n          flat_networks: public\n      openvswitch_agent:\n        agent:\n          tunnel_types: vxlan\n        ovs:\n          bridge_mappings: public:br-ex\n      linuxbridge_agent:\n        linux_bridge:\n          bridge_mappings: public:br-ex\n  labels:\n    agent:\n      l3:\n        node_selector_key: l3-agent\n        node_selector_value: enabled\nEOF\n## includes second argument 'subchart' to indicate a different path\n: ${OSH_EXTRA_HELM_ARGS_MARIADB:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c openstack -s mariadb ${FEATURES})\"}\n: ${OSH_EXTRA_HELM_ARGS_RABBITMQ:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c openstack -s rabbitmq ${FEATURES})\"}\n: ${OSH_EXTRA_HELM_ARGS_MEMCACHED:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c openstack -s memcached ${FEATURES})\"}\n: ${OSH_EXTRA_HELM_ARGS_KEYSTONE:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c openstack -s keystone ${FEATURES})\"}\n: ${OSH_EXTRA_HELM_ARGS_HEAT:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c openstack -s heat ${FEATURES})\"}\n: ${OSH_EXTRA_HELM_ARGS_GLANCE:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c openstack -s glance ${FEATURES})\"}\n: ${OSH_EXTRA_HELM_ARGS_OPENVSWITCH:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c openstack -s openvswitch ${FEATURES})\"}\n: ${OSH_EXTRA_HELM_ARGS_LIBVIRT:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c openstack -s libvirt ${FEATURES})\"}\n: ${OSH_EXTRA_HELM_ARGS_NOVA:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c openstack -s nova ${FEATURES})\"}\n: ${OSH_EXTRA_HELM_ARGS_PLACEMENT:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c openstack -s placement ${FEATURES})\"}\n: ${OSH_EXTRA_HELM_ARGS_NEUTRON:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c openstack -s neutron ${FEATURES})\"}\n: ${OSH_EXTRA_HELM_ARGS_HORIZON:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c openstack -s horizon ${FEATURES})\"}\n\nif [ \"x$(systemd-detect-virt)\" != \"xnone\" ]; then\n  echo 'OSH is being deployed in virtualized environment, using qemu for nova'\n  OSH_EXTRA_HELM_VIRT_ARGS=( \"--set nova.conf.nova.libvirt.virt_type=qemu\" \\\n                        \"--set nova.conf.nova.libvirt.cpu_mode=none\" )\nfi\n\n# Check if Hugepages is enabled\nhgpgs_available=\"$(awk '/HugePages_Total/{print $2}' /proc/meminfo)\"\nif [ \"x$hgpgs_available\" != \"x0\" ]; then\n  OSH_EXTRA_HELM_ARGS_LIBVIRT_CGROUP=\"--set libvirt.conf.kubernetes.cgroup=.\"\nfi\n\nhelm dependency update openstack\n\necho \"helm installing openstack...\"\nhelm upgrade --install $release ${OSH_HELM_REPO}/openstack \\\n  ${OSH_EXTRA_HELM_ARGS_MARIADB} \\\n  ${OSH_EXTRA_HELM_ARGS_RABBITMQ} \\\n  ${OSH_EXTRA_HELM_ARGS_MEMCACHED} \\\n  ${OSH_EXTRA_HELM_ARGS_KEYSTONE} \\\n  ${OSH_EXTRA_HELM_ARGS_HEAT} \\\n  ${OSH_EXTRA_HELM_ARGS_HORIZON} \\\n  ${OSH_EXTRA_HELM_ARGS_GLANCE} \\\n  ${OSH_EXTRA_HELM_ARGS_OPENVSWITCH} \\\n  ${OSH_EXTRA_HELM_ARGS_LIBVIRT} \\\n  ${OSH_EXTRA_HELM_ARGS_NOVA} \\\n  ${OSH_EXTRA_HELM_ARGS_PLACEMENT} \\\n  ${OSH_EXTRA_HELM_ARGS_NEUTRON} \\\n  ${OSH_EXTRA_HELM_ARGS_LIBVIRT_CGROUP} \\\n  ${OSH_EXTRA_HELM_VIRT_ARGS} \\\n  ${OSH_EXTRA_HELM_ARGS} \\\n  --set glance.conf.glance.keystone_authtoken.memcache_secret_key=\"$(openssl rand -hex 64)\" \\\n  --set glance.storage=${GLANCE_BACKEND} \\\n  --set nova.bootstrap.wait_for_computes.enabled=true \\\n  --set libvirt.conf.ceph.enabled=${CEPH_ENABLED} \\\n  --set nova.conf.ceph.enabled=${CEPH_ENABLED} \\\n  --values=/tmp/neutron.yaml \\\n  --set mariadb.pod.replicas.server=1 \\\n  --set mariadb.volume.enabled=false \\\n  --set mariadb.volume.use_local_path_for_single_pod_cluster.enabled=true \\\n  --set rabbitmq.pod.replicas.server=1 \\\n  --set rabbitmq.volume.enabled=false \\\n  --set rabbitmq.volume.use_local_path.enabled=true \\\n  --namespace=$namespace \\\n  --timeout=1200s\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods $namespace 1800\n\n# list pods and services\necho \"------------------ List kube-system pods and servics ------------\"\nkubectl -n kube-system get pods\nkubectl -n kube-system get services\n\necho\necho \"----------------- List openstack pods and services ---------------\"\nkubectl -n openstack get pods\nkubectl -n openstack get services\n\n#NOTE: Validate Deployment info\nopenstack service list\nsleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx\nopenstack compute service list\nopenstack network agent list\nopenstack hypervisor list\n"
  },
  {
    "path": "tools/deployment/component/common/rabbitmq.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_RABBITMQ:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c rabbitmq ${FEATURES})\"}\n: ${NAMESPACE:=openstack}\n\n#NOTE: Deploy command\nhelm upgrade --install rabbitmq ${OSH_HELM_REPO}/rabbitmq \\\n    --namespace=${NAMESPACE} \\\n    --set pod.replicas.server=1 \\\n    --timeout=600s \\\n    ${VOLUME_HELM_ARGS:=\"--set volume.enabled=false --set volume.use_local_path.enabled=true\"} \\\n    ${OSH_EXTRA_HELM_ARGS:=} \\\n    ${OSH_EXTRA_HELM_ARGS_RABBITMQ}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods ${NAMESPACE}\n"
  },
  {
    "path": "tools/deployment/component/compute-kit/compute-kit-sr-iov.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n#NOTE(portdirect): This file is included as an example of how to deploy\n# nova and neutron with ovs and sr-iov active. It will not work without\n# modification for your environment.\n\nset -xe\n\n#NOTE: Pull images and lint chart\nmake pull-images nova\nmake pull-images neutron\n\nSRIOV_DEV1=enp3s0f0\nSRIOV_DEV2=enp66s0f1\nOVSBR=vlan92\n\n#NOTE: Deploy nova\n: ${OSH_EXTRA_HELM_ARGS:=\"\"}\ntee /tmp/nova.yaml << EOF\nnetwork:\n  backend:\n   - openvswitch\n   - sriov\nconf:\n  nova:\n    DEFAULT:\n      debug: True\n      vcpu_pin_set: 4,8,12,16,20,24,28,32,36,40,44,48,52,56,60,5,9,13,17,21,25,29,33,37,41,45,49,53,57,61\n      vif_plugging_is_fatal: False\n      vif_plugging_timeout: 30\n    pci:\n      alias: '{\"name\": \"numa0\", \"capability_type\": \"pci\", \"product_id\": \"10fb\", \"vendor_id\": \"8086\", \"device_type\": \"type-PCI\", \"numa_policy\": \"required\"}'\n      passthrough_whitelist: |\n        [{\"address\": \"0000:03:10.0\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:10.2\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:10.4\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:10.6\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:11.0\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:11.2\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:11.4\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:11.6\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:12.0\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:12.2\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:12.4\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:12.6\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:13.0\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:13.2\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:13.4\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:13.6\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:14.0\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:14.2\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:14.4\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:14.6\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:15.0\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:15.2\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:15.4\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:15.6\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:16.0\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:16.2\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:16.4\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:16.6\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:17.0\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:17.2\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:17.4\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:03:17.6\", \"physical_network\": \"physnet1\"}, {\"address\": \"0000:42:10.1\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:10.3\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:10.5\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:10.7\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:11.1\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:11.3\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:11.5\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:11.7\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:12.1\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:12.3\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:12.5\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:12.7\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:13.1\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:13.3\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:13.5\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:13.7\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:14.1\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:14.3\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:14.5\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:14.7\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:15.1\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:15.3\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:15.5\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:15.7\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:16.1\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:16.3\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:16.5\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:16.7\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:17.1\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:17.3\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:17.5\", \"physical_network\": \"physnet2\"}, {\"address\": \"0000:42:17.7\", \"physical_network\": \"physnet2\"}]\n    filter_scheduler:\n      enabled_filters: \"RetryFilter, AvailabilityZoneFilter, RamFilter, ComputeFilter, ComputeCapabilitiesFilter, ImagePropertiesFilter, ServerGroupAntiAffinityFilter, ServerGroupAffinityFilter, PciPassthroughFilter, NUMATopologyFilter, DifferentHostFilter, SameHostFilter\"\nEOF\n\nif [ \"x$(systemd-detect-virt)\" == \"xnone\" ]; then\n  echo 'OSH is not being deployed in virtualized environment'\n  helm upgrade --install nova ./nova \\\n      --namespace=openstack \\\n      --values /tmp/nova.yaml \\\n      ${OSH_EXTRA_HELM_ARGS}\nelse\n  echo 'OSH is being deployed in virtualized environment, using qemu for nova'\n  helm upgrade --install nova ./nova \\\n      --namespace=openstack \\\n      --set conf.nova.libvirt.virt_type=qemu \\\n      --set conf.nova.libvirt.cpu_mode=none \\\n      --values /tmp/nova.yaml \\\n      ${OSH_EXTRA_HELM_ARGS}\nfi\n\n#NOTE: Deploy neutron\ntee /tmp/neutron.yaml << EOF\nnetwork:\n  backend:\n   - openvswitch\n   - sriov\n  interface:\n    tunnel: docker0\n    sriov:\n      - device: ${SRIOV_DEV1}\n        num_vfs: 32\n        promisc: false\n      - device: ${SRIOV_DEV2}\n        num_vfs: 32\n        promisc: false\n  auto_bridge_add:\n    br-ex: null\n    br-physnet3: ${OVSBR}\nconf:\n  neutron:\n    DEFAULT:\n      debug: True\n      l3_ha: False\n      max_l3_agents_per_router: 1\n      l3_ha_network_type: vxlan\n      dhcp_agents_per_network: 1\n  plugins:\n    ml2_conf:\n      ml2:\n        mechanism_drivers: openvswitch,sriovnicswitch,l2population\n      ml2_type_flat:\n        flat_networks: public\n        type_drivers: vlan,flat,vxlan\n        mechanism_drivers: openvswitch,sriovnicswitch,l2population\n        tenant_network_types: vxlan\n      ml2_type_vlan:\n        network_vlan_ranges: physnet1:20:30,physnet2:20:30\n    #NOTE(portdirect): for clarity we include options for all the neutron\n    # backends here.\n    openvswitch_agent:\n      agent:\n        tunnel_types: vxlan\n      ovs:\n        bridge_mappings: \"public:br-ex,physnet3:br-physnet3\"\n    linuxbridge_agent:\n      linux_bridge:\n        bridge_mappings: \"public:br-ex,physnet1:br-physnet1\"\n    sriov_agent:\n      sriov_nic:\n        physical_device_mappings: physnet1:${SRIOV_DEV1},physnet2:${SRIOV_DEV2}\n        exclude_devices: null\nEOF\nkubectl label node cab24-r820-14 --overwrite=true sriov=enabled\nkubectl label node cab24-r820-15 --overwrite=true sriov=enabled\n\nhelm upgrade --install neutron ./neutron \\\n    --namespace=openstack \\\n    --values=/tmp/neutron.yaml \\\n    ${OSH_EXTRA_HELM_ARGS}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n\n#NOTE: Validate Deployment info\nexport OS_CLOUD=openstack_helm\nopenstack service list\nsleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx\nopenstack compute service list\nopenstack network agent list\n\n#NOTE: Exercise the deployment\nopenstack network create test\nNET_ID=$(openstack network show test -f value -c id)\nopenstack subnet create --subnet-range \"172.24.4.0/24\" --network ${NET_ID} test\nopenstack port create --network ${NET_ID} --fixed-ip subnet=test,ip-address=\"172.24.4.10\" --binding-profile vnic_type=direct sriov_port\nPORT_ID=$(openstack port show sriov_port -f value -c id)\n\n# NOTE(portdirect): We do this fancy, and seemingly pointless, footwork to get\n# the full image name for the cirros Image without having to be explicit.\nexport IMAGE_NAME=$(openstack image show -f value -c name \\\n  $(openstack image list -f csv | awk -F ',' '{ print $2 \",\" $1 }' | \\\n    grep \"^\\\"Cirros\" | head -1 | awk -F ',' '{ print $2 }' | tr -d '\"'))\n\nopenstack server create --flavor m1.tiny --image \"${IMAGE_NAME}\" --nic port-id=${PORT_ID} test-sriov\n"
  },
  {
    "path": "tools/deployment/component/compute-kit/compute-kit.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_PLACEMENT:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c placement ${FEATURES})\"}\n: ${OSH_EXTRA_HELM_ARGS_NOVA:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c nova ${FEATURES})\"}\n: ${OSH_EXTRA_HELM_ARGS_NEUTRON:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c neutron ${FEATURES})\"}\n: ${RUN_HELM_TESTS:=\"yes\"}\n\nexport OS_CLOUD=openstack_helm\nCEPH_ENABLED=false\nif openstack service list -f value -c Type | grep -q \"^volume\" && \\\n    openstack volume type list -f value -c Name | grep -q \"rbd\"; then\n  CEPH_ENABLED=true\nfi\n\n#NOTE: Deploy placement\nhelm upgrade --install placement ${OSH_HELM_REPO}/placement --namespace=openstack \\\n    ${OSH_EXTRA_HELM_ARGS:=} \\\n    ${OSH_EXTRA_HELM_ARGS_PLACEMENT}\n\n#NOTE: Deploy nova\ntee /tmp/nova.yaml << EOF\nconf:\n  nova:\n    libvirt:\n      virt_type: qemu\n      cpu_mode: none\n  ceph:\n    enabled: ${CEPH_ENABLED}\nbootstrap:\n  wait_for_computes:\n    enabled: true\njobs:\n  cell_setup:\n    extended_wait:\n      enabled: true\nEOF\n: ${OSH_EXTRA_HELM_ARGS:=\"\"}\nhelm upgrade --install nova ${OSH_HELM_REPO}/nova \\\n    --namespace=openstack \\\n    --values=/tmp/nova.yaml \\\n    ${OSH_EXTRA_HELM_ARGS:=} \\\n    ${OSH_EXTRA_HELM_ARGS_NOVA}\n\n#NOTE: Deploy neutron\ntee /tmp/neutron.yaml << EOF\nnetwork:\n  interface:\n    # the CI env overlay interface is used by default\n    # for internal cluster communication\n    tunnel: brvxlan\nconf:\n  neutron:\n    DEFAULT:\n      l3_ha: False\n      max_l3_agents_per_router: 1\n      l3_ha_network_type: vxlan\n      dhcp_agents_per_network: 1\n  # provider1 is a tap interface used by default in the test env\n  # we create this interface while setting up the test env\n  auto_bridge_add:\n    br-ex: provider1\n  plugins:\n    ml2_conf:\n      ml2_type_flat:\n        flat_networks: public\n    openvswitch_agent:\n      agent:\n        tunnel_types: vxlan\n      ovs:\n        bridge_mappings: public:br-ex\n    linuxbridge_agent:\n      linux_bridge:\n        bridge_mappings: public:br-ex\nlabels:\n  agent:\n    l3:\n      node_selector_key: l3-agent\n      node_selector_value: enabled\nEOF\n\nhelm upgrade --install neutron ${OSH_HELM_REPO}/neutron \\\n    --namespace=openstack \\\n    --values=/tmp/neutron.yaml \\\n    ${OSH_EXTRA_HELM_ARGS:=} \\\n    ${OSH_EXTRA_HELM_ARGS_NEUTRON}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack 1200\n\n#NOTE: Validate Deployment info\nexport OS_CLOUD=openstack_helm\nopenstack service list\nsleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx\nopenstack compute service list\nopenstack network agent list\nopenstack hypervisor list\n\nif [ \"x${RUN_HELM_TESTS}\" == \"xno\" ]; then\n    exit 0\nfi\n\n./tools/deployment/common/run-helm-tests.sh nova\n./tools/deployment/common/run-helm-tests.sh neutron\n"
  },
  {
    "path": "tools/deployment/component/compute-kit/libvirt.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nset -xe\n\nexport OS_CLOUD=openstack_helm\nCEPH_ENABLED=false\nif openstack service list -f value -c Type | grep -q \"^volume\" && \\\n    openstack volume type list -f value -c Name | grep -q \"rbd\"; then\n  CEPH_ENABLED=true\nfi\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_LIBVIRT:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c libvirt ${FEATURES})\"}\n\n#NOTE: Deploy command\nhelm upgrade --install libvirt ${OSH_HELM_REPO}/libvirt \\\n  --namespace=openstack \\\n  --set conf.ceph.enabled=${CEPH_ENABLED} \\\n  ${OSH_EXTRA_HELM_ARGS:=} \\\n  ${OSH_EXTRA_HELM_ARGS_LIBVIRT}\n\n#NOTE: DO NOT wait for pods are ready, because libvirt depends\n# on neutron ovs agent pods or ovn controller pods\n"
  },
  {
    "path": "tools/deployment/component/compute-kit/openvswitch.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_OPENVSWITCH:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c openvswitch ${FEATURES})\"}\n\n#NOTE: Deploy command\nhelm upgrade --install openvswitch ${OSH_HELM_REPO}/openvswitch \\\n  --namespace=openstack \\\n  ${OSH_EXTRA_HELM_ARGS:=} \\\n  ${OSH_EXTRA_HELM_ARGS_OPENVSWITCH}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n"
  },
  {
    "path": "tools/deployment/component/freezer/freezer.sh",
    "content": "#!/bin/bash\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -ex\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_FREEZER:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c freezer ${FEATURES})\"}\n\n#NOTE: Deploy command\nhelm upgrade --install freezer ${OSH_HELM_REPO}/freezer \\\n  --namespace openstack \\\n  --create-namespace \\\n  --timeout 600s \\\n  ${OSH_EXTRA_HELM_ARGS:=} \\\n  ${OSH_EXTRA_HELM_ARGS_FREEZER}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n\necho \"OpenStack Freezer deployment complete.\"\n"
  },
  {
    "path": "tools/deployment/component/freezer/freezer_smoke_test.sh",
    "content": "#!/bin/bash\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -ex\n\n#NOTE: Install freezer client and check if it works\n(\n    cd ${HOME}\n    rm -rf freezer\n    git clone https://opendev.org/openstack/freezer.git -b stable/${OPENSTACK_RELEASE}\n    cd freezer\n    sudo pip install -r requirements.txt\n    sudo python3 setup.py install\n)\n\nunset OS_DOMAIN_NAME\nexport OS_AUTH_URL=http://keystone.openstack-helm.org/v3\nexport OS_PROJECT_NAME=admin\nexport OS_USERNAME=admin\nexport OS_PASSWORD=password\nexport OS_PROJECT_DOMAIN_NAME=default\nexport OS_USER_DOMAIN_NAME=default\nexport OS_IDENTITY_API_VERSION=3\nexport OS_AUTH_VERSION=3\n\nfreezer job-list\n"
  },
  {
    "path": "tools/deployment/component/glance/glance.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_GLANCE:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c glance ${FEATURES})\"}\n: ${RUN_HELM_TESTS:=\"yes\"}\n: ${GLANCE_BACKEND:=\"pvc\"}\n\n#NOTE: Deploy command\ntee /tmp/glance.yaml <<EOF\nstorage: ${GLANCE_BACKEND}\nEOF\n\nhelm upgrade --install glance ${OSH_HELM_REPO}/glance \\\n  --namespace=openstack \\\n  --values=/tmp/glance.yaml \\\n  --timeout=800s \\\n  ${OSH_EXTRA_HELM_ARGS:=} \\\n  ${OSH_EXTRA_HELM_ARGS_GLANCE}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n\nexport OS_CLOUD=openstack_helm\nopenstack service list\nsleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx\nopenstack image list\nfor image_id in $(openstack image list -f value -c ID); do\n  openstack image show ${image_id}\ndone\n\nif [ \"x${RUN_HELM_TESTS}\" == \"xno\" ]; then\n    exit 0\nfi\n\n./tools/deployment/common/run-helm-tests.sh glance\n"
  },
  {
    "path": "tools/deployment/component/heat/heat.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_HEAT:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c heat ${FEATURES})\"}\n\n#NOTE: Deploy command\nhelm upgrade --install heat ${OSH_HELM_REPO}/heat \\\n  --namespace=openstack \\\n  ${OSH_EXTRA_HELM_ARGS:=} \\\n  ${OSH_EXTRA_HELM_ARGS_HEAT}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n\n#NOTE: Validate Deployment info\nexport OS_CLOUD=openstack_helm\nopenstack service list\nopenstack endpoint list\nsleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx\n\nopenstack orchestration service list\n\nif [[ ${FEATURES//,/ } =~ (^|[[:space:]])tls($|[[:space:]]) ]]; then\n  curl --cacert /etc/openstack-helm/certs/ca/ca.pem -L https://heat.openstack-helm.org\nfi\n"
  },
  {
    "path": "tools/deployment/component/horizon/horizon.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_HORIZON:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c horizon ${FEATURES})\"}\n: ${RUN_HELM_TESTS:=\"yes\"}\n\n#NOTE: Deploy command\nhelm upgrade --install horizon ${OSH_HELM_REPO}/horizon \\\n    --namespace=openstack \\\n    ${OSH_EXTRA_HELM_ARGS:=} \\\n    ${OSH_EXTRA_HELM_ARGS_HORIZON}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n\nif [ \"x${RUN_HELM_TESTS}\" != \"xno\" ]; then\n    ./tools/deployment/common/run-helm-tests.sh horizon\nfi\n\nif [[ ${FEATURES//,/ } =~ (^|[[:space:]])tls($|[[:space:]]) ]]; then\n  curl --cacert /etc/openstack-helm/certs/ca/ca.pem -L https://horizon.openstack.svc.cluster.local\nfi\n"
  },
  {
    "path": "tools/deployment/component/keystone/keystone.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_KEYSTONE:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c keystone ${FEATURES})\"}\n: ${RUN_HELM_TESTS:=\"no\"}\n\n#NOTE: Deploy command\nhelm upgrade --install keystone ${OSH_HELM_REPO}/keystone \\\n    --namespace=openstack \\\n    ${OSH_EXTRA_HELM_ARGS:=} \\\n    ${OSH_EXTRA_HELM_ARGS_KEYSTONE}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n\nexport OS_CLOUD=openstack_helm\nsleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx\nopenstack endpoint list\n\n#NOTE: Validate feature gate options if required\nif [[ ${FEATURES//,/ } =~ (^|[[:space:]])ldap($|[[:space:]]) ]]; then\n  #NOTE: Do some additional queries here for LDAP\n  openstack domain list\n  openstack user list\n  openstack user list --domain ldapdomain\n\n  openstack group list --domain ldapdomain\n\n  openstack role add --user bob --project admin --user-domain ldapdomain --project-domain default admin\n\n  domain=\"ldapdomain\"\n  domainId=$(openstack domain show ${domain} -f value -c id)\n  token=$(openstack token issue -f value -c id)\n\n  #NOTE: Testing we can auth against the LDAP user\n  unset OS_CLOUD\n  openstack --os-auth-url http://keystone.openstack-helm.org/v3 --os-username bob --os-password password --os-user-domain-name ${domain} --os-identity-api-version 3 token issue\n\n  #NOTE: Test the domain specific thing works\n  curl --verbose -X GET \\\n    -H \"Content-Type: application/json\" \\\n    -H \"X-Auth-Token: $token\" \\\n    http://keystone.openstack-helm.org/v3/domains/${domainId}/config\nfi\n\nif [ \"x${RUN_HELM_TESTS}\" != \"xno\" ]; then\n    ./tools/deployment/common/run-helm-tests.sh keystone\nfi\n\nif [[ ${FEATURES//,/ } =~ (^|[[:space:]])tls($|[[:space:]]) ]]; then\n  curl --cacert /etc/openstack-helm/certs/ca/ca.pem -L https://keystone.openstack-helm.org\nfi\n"
  },
  {
    "path": "tools/deployment/component/magnum/magnum.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_MAGNUM:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c magnum ${FEATURES})\"}\n: ${RUN_HELM_TESTS:=\"yes\"}\n\n#NOTE: Deploy command\nhelm upgrade --install magnum ${OSH_HELM_REPO}/magnum \\\n  --namespace=openstack \\\n  --set pod.replicas.api=2 \\\n  --set pod.replicas.conductor=2 \\\n  ${OSH_EXTRA_HELM_ARGS} \\\n  ${OSH_EXTRA_HELM_ARGS_MAGNUM}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n\n#NOTE: Validate Deployment info\nexport OS_CLOUD=openstack_helm\nopenstack service list\n"
  },
  {
    "path": "tools/deployment/component/manila/manila.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_MANILA:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-}  -p ${OSH_VALUES_OVERRIDES_PATH} -c manila ${FEATURES})\"}\n: ${RUN_HELM_TESTS:=\"no\"}\n\n#NOTE: Deploy command\nhelm upgrade --install --debug manila ${OSH_HELM_REPO}/manila \\\n    --namespace=openstack \\\n    ${OSH_EXTRA_HELM_ARGS:=} \\\n    ${OSH_EXTRA_HELM_ARGS_MANILA:=}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack 1800\n"
  },
  {
    "path": "tools/deployment/component/mistral/mistral.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_MISTRAL:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c mistral ${FEATURES})\"}\n: ${RUN_HELM_TESTS:=\"yes\"}\n\n#NOTE: Deploy command\nhelm upgrade --install mistral ${OSH_HELM_REPO}/mistral \\\n  --namespace=openstack \\\n  --set pod.replicas.api=2 \\\n  --set pod.replicas.engine=2 \\\n  --set pod.replicas.event_engine=2 \\\n  --set pod.replicas.executor=2 \\\n  ${OSH_EXTRA_HELM_ARGS} \\\n  ${OSH_EXTRA_HELM_ARGS_MISTRAL}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n\n#NOTE: Validate Deployment\nexport OS_CLOUD=openstack_helm\nopenstack service list\n\n# Run helm test\nif [ \"x${RUN_HELM_TESTS}\" != \"xno\" ]; then\n    ./tools/deployment/common/run-helm-tests.sh mistral\nfi\n"
  },
  {
    "path": "tools/deployment/component/nfs-provisioner/nfs-provisioner.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_NFS_PROVISIONER:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c nfs-provisioner ${FEATURES})\"}\n: ${STORAGE_CLASS:=\"nfs-provisioner\"}\n: ${NAMESPACE:=nfs}\n\n#NOTE: Deploy command\nhelm upgrade --install --create-namespace nfs-provisioner ${OSH_HELM_REPO}/nfs-provisioner \\\n    --namespace=${NAMESPACE} \\\n    --set storageclass.name=\"${STORAGE_CLASS}\" \\\n    ${OSH_EXTRA_HELM_ARGS_NFS_PROVISIONER}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods ${NAMESPACE}\n"
  },
  {
    "path": "tools/deployment/component/octavia/create_dual_intermediate_CA.sh",
    "content": "#!/bin/bash\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\necho \"!!!!!!!!!!!!!!!Do not use this script for deployments!!!!!!!!!!!!!\"\necho \"Please use the Octavia Certificate Configuration guide:\"\necho \"https://docs.openstack.org/octavia/latest/admin/guides/certificates.html\"\necho \"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\"\n\n# This script produces weak security PKI to save resources in the test gates.\n# It should be modified to use stronger encryption (aes256), better pass\n# phrases, and longer keys (4096).\n# Please see the Octavia Certificate Configuration guide:\n# https://docs.openstack.org/octavia/latest/admin/guides/certificates.html\n\nset -x -e\n\nOPENSSL_CONF=\"$(readlink -f \"$(dirname \"$0\")\")\"/openssl.cnf\n\nCA_PATH=dual_ca\n\nrm -rf $CA_PATH\nmkdir $CA_PATH\nchmod 700 $CA_PATH\ncd $CA_PATH\n\nmkdir -p etc/octavia/certs\nchmod 700 etc/octavia/certs\n\n###### Client Root CA\nmkdir client_ca\ncd client_ca\nmkdir certs crl newcerts private\nchmod 700 private\ntouch index.txt\necho 1000 > serial\n\n# Create the client CA private key\nopenssl genpkey -algorithm RSA -out private/ca.key.pem -aes-128-cbc -pass pass:not-secure-passphrase\nchmod 400 private/ca.key.pem\n\n# Create the client CA root certificate\nopenssl req -config ${OPENSSL_CONF} -key private/ca.key.pem -new -x509 -sha256 -extensions v3_ca -days 7300 -out certs/ca.cert.pem -subj \"/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=ClientRootCA\" -passin pass:not-secure-passphrase\n\n###### Client Intermediate CA\nmkdir intermediate_ca\nmkdir intermediate_ca/certs intermediate_ca/crl intermediate_ca/newcerts intermediate_ca/private\nchmod 700 intermediate_ca/private\ntouch intermediate_ca/index.txt\necho 1000 > intermediate_ca/serial\n\n# Create the client intermediate CA private key\nopenssl genpkey -algorithm RSA -out intermediate_ca/private/intermediate.ca.key.pem -aes-128-cbc -pass pass:not-secure-passphrase\nchmod 400 intermediate_ca/private/intermediate.ca.key.pem\n\n# Create the client intermediate CA certificate signing request\nopenssl req -config ${OPENSSL_CONF} -key intermediate_ca/private/intermediate.ca.key.pem -new -sha256 -out intermediate_ca/client_intermediate.csr -subj \"/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=ClientIntermediateCA\" -passin pass:not-secure-passphrase\n\n# Create the client intermediate CA certificate\nopenssl ca -config ${OPENSSL_CONF} -name CA_intermediate -extensions v3_intermediate_ca -days 3650 -notext -md sha256 -in intermediate_ca/client_intermediate.csr -out intermediate_ca/certs/intermediate.cert.pem -passin pass:not-secure-passphrase -batch\n\n# Create the client CA certificate chain\ncat intermediate_ca/certs/intermediate.cert.pem certs/ca.cert.pem > intermediate_ca/ca-chain.cert.pem\n\n###### Create the client key and certificate\nopenssl genpkey -algorithm RSA -out intermediate_ca/private/controller.key.pem -aes-128-cbc -pass pass:not-secure-passphrase\nchmod 400 intermediate_ca/private/controller.key.pem\n\n# Create the client controller certificate signing request\nopenssl req -config ${OPENSSL_CONF} -key intermediate_ca/private/controller.key.pem -new -sha256 -out intermediate_ca/controller.csr -subj \"/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=OctaviaController\" -passin pass:not-secure-passphrase\n\n# Create the client controller certificate\nopenssl ca -config ${OPENSSL_CONF} -name CA_intermediate -extensions usr_cert -days 1825 -notext -md sha256 -in intermediate_ca/controller.csr -out intermediate_ca/certs/controller.cert.pem -passin pass:not-secure-passphrase -batch\n\n# Build the cancatenated client cert and key\nopenssl rsa -in intermediate_ca/private/controller.key.pem -out intermediate_ca/private/client.cert-and-key.pem -passin pass:not-secure-passphrase\n\ncat intermediate_ca/certs/controller.cert.pem >> intermediate_ca/private/client.cert-and-key.pem\n\n# We are done with the client CA\ncd ..\n\n###### Stash the octavia default client CA cert files\ncp client_ca/intermediate_ca/ca-chain.cert.pem etc/octavia/certs/client_ca.cert.pem\nchmod 444 etc/octavia/certs/client_ca.cert.pem\ncp client_ca/intermediate_ca/private/client.cert-and-key.pem etc/octavia/certs/client.cert-and-key.pem\nchmod 600 etc/octavia/certs/client.cert-and-key.pem\n\n###### Server Root CA\nmkdir server_ca\ncd server_ca\nmkdir certs crl newcerts private\nchmod 700 private\ntouch index.txt\necho 1000 > serial\n\n# Create the server CA private key\nopenssl genpkey -algorithm RSA -out private/ca.key.pem -aes-128-cbc -pass pass:not-secure-passphrase\nchmod 400 private/ca.key.pem\n\n# Create the server CA root certificate\nopenssl req -config ${OPENSSL_CONF} -key private/ca.key.pem -new -x509 -sha256 -extensions v3_ca -days 7300 -out certs/ca.cert.pem -subj \"/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=ServerRootCA\" -passin pass:not-secure-passphrase\n\n###### Server Intermediate CA\nmkdir intermediate_ca\nmkdir intermediate_ca/certs intermediate_ca/crl intermediate_ca/newcerts intermediate_ca/private\nchmod 700 intermediate_ca/private\ntouch intermediate_ca/index.txt\necho 1000 > intermediate_ca/serial\n\n# Create the server intermediate CA private key\nopenssl genpkey -algorithm RSA -out intermediate_ca/private/intermediate.ca.key.pem -aes-128-cbc -pass pass:not-secure-passphrase\nchmod 400 intermediate_ca/private/intermediate.ca.key.pem\n\n# Create the server intermediate CA certificate signing request\nopenssl req -config ${OPENSSL_CONF} -key intermediate_ca/private/intermediate.ca.key.pem -new -sha256 -out intermediate_ca/server_intermediate.csr -subj \"/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=ServerIntermediateCA\" -passin pass:not-secure-passphrase\n\n# Create the server intermediate CA certificate\nopenssl ca -config ${OPENSSL_CONF} -name CA_intermediate -extensions v3_intermediate_ca -days 3650 -notext -md sha256 -in intermediate_ca/server_intermediate.csr -out intermediate_ca/certs/intermediate.cert.pem -passin pass:not-secure-passphrase -batch\n\n# Create the server CA certificate chain\ncat intermediate_ca/certs/intermediate.cert.pem certs/ca.cert.pem > intermediate_ca/ca-chain.cert.pem\n\n# We are done with the server CA\ncd ..\n\n###### Stash the octavia default server CA cert files\ncp server_ca/intermediate_ca/ca-chain.cert.pem etc/octavia/certs/server_ca-chain.cert.pem\nchmod 444 etc/octavia/certs/server_ca-chain.cert.pem\ncp server_ca/intermediate_ca/certs/intermediate.cert.pem etc/octavia/certs/server_ca.cert.pem\nchmod 400 etc/octavia/certs/server_ca.cert.pem\ncp server_ca/intermediate_ca/private/intermediate.ca.key.pem etc/octavia/certs/server_ca.key.pem\nchmod 400 etc/octavia/certs/server_ca.key.pem\n\n##### Validate the Octavia PKI files\nset +x\necho \"################# Verifying the Octavia files ###########################\"\nopenssl verify -CAfile etc/octavia/certs/client_ca.cert.pem etc/octavia/certs/client.cert-and-key.pem\nopenssl verify -CAfile etc/octavia/certs/server_ca-chain.cert.pem etc/octavia/certs/server_ca.cert.pem\n\n# We are done, stop enforcing shell errexit\nset +e\n\necho \"!!!!!!!!!!!!!!!Do not use this script for deployments!!!!!!!!!!!!!\"\necho \"Please use the Octavia Certificate Configuration guide:\"\necho \"https://docs.openstack.org/octavia/latest/admin/guides/certificates.html\"\necho \"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\"\n"
  },
  {
    "path": "tools/deployment/component/octavia/heat_octavia_env.yaml",
    "content": "---\nheat_template_version: 2021-04-16\n\nparameters:\n  public_network_name:\n    type: string\n    default: public\n\n  public_physical_network_name:\n    type: string\n    default: public\n\n  public_subnet_name:\n    type: string\n    default: public\n\n  public_subnet_cidr:\n    type: string\n    default: 172.24.4.0/24\n\n  public_subnet_gateway:\n    type: string\n    default: 172.24.4.1\n\n  public_allocation_pool_start:\n    type: string\n    default: 172.24.4.10\n\n  public_allocation_pool_end:\n    type: string\n    default: 172.24.4.254\n\n  private_subnet_cidr:\n    type: string\n    default: 192.168.128.0/24\n\n  dns_nameserver:\n    type: string\n    default: 172.24.4.1\n\n  image_name:\n    type: string\n    default: Ubuntu Jammy\n\n  image_url:\n    type: string\n    default: \"https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img\"\n\n  ssh_key:\n    type: string\n    default: octavia-key\n\n  compute_flavor_id:\n    type: string\n\n  az_1:\n    type: string\n\n  az_2:\n    type: string\n\nresources:\n  public_net:\n    type: OS::Neutron::ProviderNet\n    properties:\n      name:\n        get_param: public_network_name\n      router_external: true\n      physical_network:\n        get_param: public_physical_network_name\n      network_type: flat\n\n  public_subnet:\n    type: OS::Neutron::Subnet\n    properties:\n      name:\n        get_param: public_subnet_name\n      network:\n        get_resource: public_net\n      cidr:\n        get_param: public_subnet_cidr\n      gateway_ip:\n        get_param: public_subnet_gateway\n      enable_dhcp: false\n      dns_nameservers:\n        - get_param: public_subnet_gateway\n      allocation_pools:\n        - start: {get_param: public_allocation_pool_start}\n          end: {get_param: public_allocation_pool_end}\n\n  private_net:\n    type: OS::Neutron::Net\n\n  private_subnet:\n    type: OS::Neutron::Subnet\n    properties:\n      network:\n        get_resource: private_net\n      cidr:\n        get_param: private_subnet_cidr\n      dns_nameservers:\n        - get_param: dns_nameserver\n\n  image:\n    type: OS::Glance::WebImage\n    properties:\n      name:\n        get_param: image_name\n      location:\n        get_param: image_url\n      container_format: bare\n      disk_format: qcow2\n      min_disk: 3\n      visibility: public\n\n  flavor_vm:\n    type: OS::Nova::Flavor\n    properties:\n      name: m1.test\n      disk: 3\n      ram: 1024\n      vcpus: 2\n\n  wait_handle_1:\n    type: OS::Heat::WaitConditionHandle\n\n  wait_handle_2:\n    type: OS::Heat::WaitConditionHandle\n\n  server_1:\n    type: OS::Nova::Server\n    properties:\n      image:\n        get_resource: image\n      flavor:\n        get_resource: flavor_vm\n      key_name:\n        get_param: ssh_key\n      networks:\n        - port:\n            get_resource: server_port_1\n      user_data_format: RAW\n      user_data:\n        str_replace:\n          template: |\n            #!/bin/bash\n            echo \"nameserver $nameserver\" > /etc/resolv.conf\n            echo \"127.0.0.1 $(hostname)\" >> /etc/hosts\n            systemctl stop systemd-resolved\n            systemctl disable systemd-resolved\n            mkdir -p /var/www/html/\n            echo \"Hello from server_1: $(hostname)\" > /var/www/html/index.html\n            nohup python3 -m http.server 8000 --directory /var/www/html > /dev/null 2>&1 &\n            $wc_notify --data-binary '{ \"status\": \"SUCCESS\" }'\n          params:\n            $nameserver: {get_param: dns_nameserver}\n            $wc_notify: {get_attr: ['wait_handle_1', 'curl_cli']}\n      availability_zone: {get_param: az_1}\n\n  wait_server_1:\n    type: OS::Heat::WaitCondition\n    properties:\n      handle: {get_resource: wait_handle_1}\n      timeout: 1200\n\n  server_2:\n    type: OS::Nova::Server\n    properties:\n      image:\n        get_resource: image\n      flavor:\n        get_resource: flavor_vm\n      key_name:\n        get_param: ssh_key\n      networks:\n        - port:\n            get_resource: server_port_2\n      user_data_format: RAW\n      user_data:\n        str_replace:\n          template: |\n            #!/bin/bash\n            echo \"nameserver $nameserver\" > /etc/resolv.conf\n            echo \"127.0.0.1 $(hostname)\" >> /etc/hosts\n            systemctl stop systemd-resolved\n            systemctl disable systemd-resolved\n            mkdir -p /var/www/html/\n            echo \"Hello from server_2: $(hostname)\" > /var/www/html/index.html\n            nohup python3 -m http.server 8000 --directory /var/www/html > /dev/null 2>&1 &\n            $wc_notify --data-binary '{ \"status\": \"SUCCESS\" }'\n          params:\n            $nameserver: {get_param: dns_nameserver}\n            $wc_notify: {get_attr: ['wait_handle_2', 'curl_cli']}\n      availability_zone: {get_param: az_2}\n\n  wait_server_2:\n    type: OS::Heat::WaitCondition\n    properties:\n      handle: {get_resource: wait_handle_2}\n      timeout: 1200\n\n  security_group:\n    type: OS::Neutron::SecurityGroup\n    properties:\n      name: default_port_security_group\n      rules:\n        - remote_ip_prefix: 0.0.0.0/0\n          protocol: tcp\n          port_range_min: 22\n          port_range_max: 22\n        - remote_ip_prefix: 0.0.0.0/0\n          protocol: tcp\n          port_range_min: 8000\n          port_range_max: 8000\n        - remote_ip_prefix: 0.0.0.0/0\n          protocol: icmp\n\n  server_port_1:\n    type: OS::Neutron::Port\n    properties:\n      network:\n        get_resource: private_net\n      fixed_ips:\n        - subnet:\n            get_resource: private_subnet\n      security_groups:\n        - get_resource: security_group\n\n  server_floating_ip_1:\n    type: OS::Neutron::FloatingIP\n    properties:\n      floating_network:\n        get_resource: public_net\n      port_id:\n        get_resource: server_port_1\n\n  server_port_2:\n    type: OS::Neutron::Port\n    properties:\n      network:\n        get_resource: private_net\n      fixed_ips:\n        - subnet:\n            get_resource: private_subnet\n      security_groups:\n        - get_resource: security_group\n\n  server_floating_ip_2:\n    type: OS::Neutron::FloatingIP\n    properties:\n      floating_network:\n        get_resource: public_net\n      port_id:\n        get_resource: server_port_2\n\n  router:\n    type: OS::Neutron::Router\n    properties:\n      external_gateway_info:\n        network:\n          get_resource: public_net\n\n  router_interface:\n    type: OS::Neutron::RouterInterface\n    properties:\n      router_id:\n        get_resource: router\n      subnet_id:\n        get_resource: private_subnet\n\n  flavor_profile:\n    type: \"OS::Octavia::FlavorProfile\"\n    properties:\n      provider_name: amphora\n      flavor_data:\n        str_replace:\n          template: |\n            {\n              \"loadbalancer_topology\": \"SINGLE\",\n              \"compute_flavor\": \"%compute_flavor%\"\n            }\n          params:\n            \"%compute_flavor%\": {get_param: compute_flavor_id}\n\n  flavor:\n    type: \"OS::Octavia::Flavor\"\n    properties:\n      flavor_profile:\n        get_resource: flavor_profile\n\n  loadbalancer:\n    type: \"OS::Octavia::LoadBalancer\"\n    properties:\n      name: osh\n      provider: amphora\n      vip_subnet:\n        get_resource: private_subnet\n      flavor:\n        get_resource: flavor\n\n  floating_ip:\n    type: OS::Neutron::FloatingIP\n    properties:\n      floating_network: {get_resource: public_net}\n      port_id: {get_attr: [loadbalancer, vip_port_id]}\n\n  listener:\n    type: \"OS::Octavia::Listener\"\n    properties:\n      protocol_port: 80\n      protocol: \"HTTP\"\n      loadbalancer:\n        get_resource: loadbalancer\n\n  pool:\n    type: \"OS::Octavia::Pool\"\n    properties:\n      lb_algorithm: \"ROUND_ROBIN\"\n      listener:\n        get_resource: listener\n      protocol: \"HTTP\"\n\n  monitor:\n    type: \"OS::Octavia::HealthMonitor\"\n    properties:\n      delay: 3\n      max_retries: 9\n      timeout: 3\n      type: \"PING\"\n      pool:\n        get_resource: pool\n\n  pool_member_1:\n    type: \"OS::Octavia::PoolMember\"\n    properties:\n      subnet:\n        get_resource: private_subnet\n      protocol_port: 8000\n      pool:\n        get_resource: pool\n      address:\n        get_attr:\n          - \"server_1\"\n          - \"first_address\"\n\n  pool_member_2:\n    type: \"OS::Octavia::PoolMember\"\n    properties:\n      subnet:\n        get_resource: private_subnet\n      protocol_port: 8000\n      pool:\n        get_resource: pool\n      address:\n        get_attr:\n          - \"server_2\"\n          - \"first_address\"\n...\n"
  },
  {
    "path": "tools/deployment/component/octavia/octavia.sh",
    "content": "#!/bin/bash\n\n# Copyright 2019 Samsung Electronics Co., Ltd.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_OCTAVIA:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c octavia ${FEATURES})\"}\n\nexport OS_CLOUD=openstack_helm\n\nOSH_AMPHORA_IMAGE_NAME=\"amphora-x64-haproxy-ubuntu-jammy\"\nOSH_AMPHORA_IMAGE_OWNER_ID=$(openstack image show \"${OSH_AMPHORA_IMAGE_NAME}\" -f value -c owner)\nOSH_AMPHORA_SECGROUP_LIST=$(openstack security group list -f value | grep lb-mgmt-sec-grp | awk '{print $1}')\nOSH_AMPHORA_FLAVOR_ID=$(openstack flavor show m1.amphora -f value -c id)\nOSH_AMPHORA_BOOT_NETWORK_LIST=$(openstack network list --name lb-mgmt-net -f value -c ID)\n# Test nodes are quite small (usually 8Gb RAM) and for testing Octavia\n# we need two worker VM instances and one amphora VM instance.\n# We are going to run them all on different K8s nodes.\n# The /tmp/inventory_k8s_nodes.txt file is created by the deploy-env role and contains the list\n# of all K8s nodes. Amphora instance is run on the first K8s node from the list.\nOSH_AMPHORA_TARGET_HOSTNAME=$(sed -n '1p' /tmp/inventory_k8s_nodes.txt)\nCONTROLLER_IP_PORT_LIST=$(cat /tmp/octavia_hm_controller_ip_port_list)\n\n#NOTE: Deploy command\ntee /tmp/octavia.yaml <<EOF\npod:\n  mounts:\n    octavia_api:\n      octavia_api:\n        volumeMounts:\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/server_ca.cert.pem\n            subPath: server_ca.cert.pem\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/server_ca-chain.cert.pem\n            subPath: server_ca-chain.cert.pem\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/private/server_ca.key.pem\n            subPath: server_ca.key.pem\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/client_ca.cert.pem\n            subPath: client_ca.cert.pem\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/private/client.cert-and-key.pem\n            subPath: client.cert-and-key.pem\n        volumes:\n          - name: octavia-certs\n            secret:\n              secretName: octavia-certs\n              defaultMode: 0644\n    octavia_worker:\n      octavia_worker:\n        volumeMounts:\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/server_ca.cert.pem\n            subPath: server_ca.cert.pem\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/server_ca-chain.cert.pem\n            subPath: server_ca-chain.cert.pem\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/private/server_ca.key.pem\n            subPath: server_ca.key.pem\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/client_ca.cert.pem\n            subPath: client_ca.cert.pem\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/private/client.cert-and-key.pem\n            subPath: client.cert-and-key.pem\n        volumes:\n          - name: octavia-certs\n            secret:\n              secretName: octavia-certs\n              defaultMode: 0644\n    octavia_housekeeping:\n      octavia_housekeeping:\n        volumeMounts:\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/server_ca.cert.pem\n            subPath: server_ca.cert.pem\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/server_ca-chain.cert.pem\n            subPath: server_ca-chain.cert.pem\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/private/server_ca.key.pem\n            subPath: server_ca.key.pem\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/client_ca.cert.pem\n            subPath: client_ca.cert.pem\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/private/client.cert-and-key.pem\n            subPath: client.cert-and-key.pem\n        volumes:\n          - name: octavia-certs\n            secret:\n              secretName: octavia-certs\n              defaultMode: 0644\n    octavia_health_manager:\n      octavia_health_manager:\n        volumeMounts:\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/server_ca.cert.pem\n            subPath: server_ca.cert.pem\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/server_ca-chain.cert.pem\n            subPath: server_ca-chain.cert.pem\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/private/server_ca.key.pem\n            subPath: server_ca.key.pem\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/client_ca.cert.pem\n            subPath: client_ca.cert.pem\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/private/client.cert-and-key.pem\n            subPath: client.cert-and-key.pem\n        volumes:\n          - name: octavia-certs\n            secret:\n              secretName: octavia-certs\n              defaultMode: 0644\n    octavia_driver_agent:\n      octavia_driver_agent:\n        volumeMounts:\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/server_ca.cert.pem\n            subPath: server_ca.cert.pem\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/server_ca-chain.cert.pem\n            subPath: server_ca-chain.cert.pem\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/private/server_ca.key.pem\n            subPath: server_ca.key.pem\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/client_ca.cert.pem\n            subPath: client_ca.cert.pem\n          - name: octavia-certs\n            mountPath: /etc/octavia/certs/private/client.cert-and-key.pem\n            subPath: client.cert-and-key.pem\n        volumes:\n          - name: octavia-certs\n            secret:\n              secretName: octavia-certs\n              defaultMode: 0644\nconf:\n  octavia:\n    controller_worker:\n      amp_image_owner_id: ${OSH_AMPHORA_IMAGE_OWNER_ID}\n      amp_secgroup_list: ${OSH_AMPHORA_SECGROUP_LIST}\n      amp_flavor_id: ${OSH_AMPHORA_FLAVOR_ID}\n      amp_boot_network_list: ${OSH_AMPHORA_BOOT_NETWORK_LIST}\n      amp_image_tag: amphora\n      amp_ssh_key_name: octavia-key\n    health_manager:\n      bind_port: 5555\n      bind_ip: 0.0.0.0\n      controller_ip_port_list: ${CONTROLLER_IP_PORT_LIST}\n    task_flow:\n      jobboard_enabled: false\n    nova:\n      availability_zone: nova:${OSH_AMPHORA_TARGET_HOSTNAME}\nEOF\nhelm upgrade --install octavia ${OSH_HELM_REPO}/octavia \\\n  --namespace=openstack \\\n  --values=/tmp/octavia.yaml \\\n  ${OSH_EXTRA_HELM_ARGS:=} \\\n  ${OSH_EXTRA_HELM_ARGS_OCTAVIA}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n\n#NOTE: Validate Deployment info\nopenstack service list\n"
  },
  {
    "path": "tools/deployment/component/octavia/octavia_certs.sh",
    "content": "#!/bin/bash\n\n# Copyright 2019 Samsung Electronics Co., Ltd.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\nfunction trim_data() {\n  local data_path=$1\n  cat $data_path | base64 -w0 | tr -d '\\n'\n}\n\nfunction create_secret() {\n  {\n  cat <<EOF\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: octavia-certs\ntype: Opaque\ndata:\n  server_ca.cert.pem: $(trim_data dual_ca/etc/octavia/certs/server_ca.cert.pem)\n  server_ca-chain.cert.pem: $(trim_data dual_ca/etc/octavia/certs/server_ca-chain.cert.pem)\n  server_ca.key.pem: $(trim_data dual_ca/etc/octavia/certs/server_ca.key.pem)\n  client_ca.cert.pem: $(trim_data dual_ca/etc/octavia/certs/client_ca.cert.pem)\n  client.cert-and-key.pem: $(trim_data dual_ca/etc/octavia/certs/client.cert-and-key.pem)\nEOF\n  }| kubectl apply --namespace openstack -f -\n}\n\n(\n    cd \"$(dirname \"$0\")\";\n    ./create_dual_intermediate_CA.sh\n    create_secret\n)\n"
  },
  {
    "path": "tools/deployment/component/octavia/octavia_resources.sh",
    "content": "#!/bin/bash\n\n# Copyright 2019 Samsung Electronics Co., Ltd.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\nexport OS_CLOUD=openstack_helm\n\nSSH_DIR=\"${HOME}/.ssh\"\nOPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS=\"${OPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS} -v ${SSH_DIR}:${SSH_DIR} -v /tmp:/tmp\"\nexport OPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS\n\n: ${OSH_LB_SUBNET:=\"172.31.0.0/24\"}\n: ${OSH_LB_SUBNET_START:=\"172.31.0.2\"}\n: ${OSH_LB_SUBNET_END=\"172.31.0.200\"}\n: ${OSH_AMPHORA_IMAGE_NAME:=\"amphora-x64-haproxy-ubuntu-jammy\"}\n: ${OSH_AMPHORA_IMAGE_FILE:=\"test-only-amphora-x64-haproxy-ubuntu-jammy.qcow2\"}\n: ${OSH_AMPHORA_IMAGE_URL:=\"https://tarballs.opendev.org/openstack/octavia/test-images/test-only-amphora-x64-haproxy-ubuntu-jammy.qcow2\"}\n\n# # This is for debugging, to be able to connect via ssh to the amphora instance from the cluster node\n# # and make the amphora able to connect to Internet.\n# # The /tmp/inventory_default_dev.txt file is created by the deploy-env role and contains\n# # the name of the default interface on a node.\n# sudo iptables -t nat -I POSTROUTING -o $(cat /tmp/inventory_default_dev.txt) -s ${OSH_LB_SUBNET} -j MASQUERADE\n# sudo iptables -t filter -I FORWARD -s ${OSH_LB_SUBNET} -j ACCEPT\n\n# Create Octavia management network and its security group\nopenstack network show lb-mgmt-net || \\\n    openstack network create lb-mgmt-net -f value -c id\nopenstack subnet show lb-mgmt-subnet || \\\n    openstack subnet create --subnet-range $OSH_LB_SUBNET --allocation-pool start=$OSH_LB_SUBNET_START,end=$OSH_LB_SUBNET_END --network lb-mgmt-net lb-mgmt-subnet -f value -c id\nopenstack security group show lb-mgmt-sec-grp || \\\n    { openstack security group create lb-mgmt-sec-grp; \\\n      openstack security group rule create --protocol icmp lb-mgmt-sec-grp; \\\n      openstack security group rule create --protocol tcp --dst-port 22 lb-mgmt-sec-grp; \\\n      openstack security group rule create --protocol tcp --dst-port 9443 lb-mgmt-sec-grp; }\n\n# Create security group for Octavia health manager\nopenstack security group show lb-health-mgr-sec-grp || \\\n    { openstack security group create lb-health-mgr-sec-grp; \\\n      openstack security group rule create --protocol udp --dst-port 5555 lb-health-mgr-sec-grp; }\n\n# Create security group for Octavia worker\nopenstack security group show lb-worker-sec-grp || \\\n    { openstack security group create lb-worker-sec-grp; }\n\n# Create ports for health manager (octavia-health-manager-port-{KUBE_NODE_NAME})\n# and the same for worker (octavia-worker-port-{KUBE_NODE_NAME})\n# octavia-health-manager and octavia-worker pods will be run on each network node as daemonsets.\n# The pods will create NICs on each network node attached to lb-mgmt-net.\nCONTROLLER_IP_PORT_LIST=''\nCTRLS=$(kubectl get nodes -l openstack-network-node=enabled -o name | awk -F\"/\" '{print $2}')\nfor node in $CTRLS\ndo\n  PORTNAME=octavia-health-manager-port-$node\n  openstack port show $PORTNAME || \\\n    openstack port create --security-group lb-health-mgr-sec-grp --device-owner Octavia:health-mgr --host=$node -c id -f value --network lb-mgmt-net $PORTNAME\n  IP=$(openstack port show $PORTNAME -f json | jq -r  '.fixed_ips[0].ip_address')\n  if [ -z $CONTROLLER_IP_PORT_LIST ]; then\n    CONTROLLER_IP_PORT_LIST=$IP:5555\n  else\n    CONTROLLER_IP_PORT_LIST=$CONTROLLER_IP_PORT_LIST,$IP:5555\n  fi\n  WORKER_PORTNAME=octavia-worker-port-$node\n  openstack port show $WORKER_PORTNAME || \\\n    openstack port create --security-group lb-worker-sec-grp --device-owner Octavia:worker --host=$node -c id -f value --network lb-mgmt-net $WORKER_PORTNAME\n  openstack port show $WORKER_PORTNAME -f json | jq -r  '.fixed_ips[0].ip_address'\ndone\n\n# Each health manager information should be passed into octavia configuration.\necho $CONTROLLER_IP_PORT_LIST > /tmp/octavia_hm_controller_ip_port_list\n\n# Create a flavor for amphora instance\nopenstack flavor show m1.amphora || \\\n    openstack flavor create --ram 1024 --disk 3 --vcpus 1 m1.amphora\n\n# Create key pair to connect amphora instance via management network\nmkdir -p ${SSH_DIR}\nopenstack keypair show octavia-key || \\\n  openstack keypair create --private-key ${SSH_DIR}/octavia_key octavia-key\nsudo chown $(id -un) ${SSH_DIR}/octavia_key\nchmod 600 ${SSH_DIR}/octavia_key\n\n# accept diffie-hellman-group1-sha1 algo for SSH (for compatibility with older images)\nsudo tee -a /etc/ssh/ssh_config <<EOF\n    KexAlgorithms +diffie-hellman-group1-sha1\n    HostKeyAlgorithms +ssh-rsa\n    PubkeyAcceptedKeyTypes +ssh-rsa\nEOF\n\nif [ ! -f \"/tmp/${OSH_AMPHORA_IMAGE_FILE}\" ]; then\n  curl --fail -sSL ${OSH_AMPHORA_IMAGE_URL} -o /tmp/${OSH_AMPHORA_IMAGE_FILE}\nfi\n\nopenstack image show ${OSH_AMPHORA_IMAGE_NAME} || \\\n    openstack image create -f value -c id \\\n    --public \\\n    --container-format=bare \\\n    --disk-format qcow2 \\\n    --min-disk 2 \\\n    --file /tmp/${OSH_AMPHORA_IMAGE_FILE} \\\n    ${OSH_AMPHORA_IMAGE_NAME}\nOSH_AMPHORA_IMAGE_ID=$(openstack image show ${OSH_AMPHORA_IMAGE_NAME} -f value -c id)\nopenstack image set --tag amphora ${OSH_AMPHORA_IMAGE_ID}\n"
  },
  {
    "path": "tools/deployment/component/octavia/octavia_test.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\nexport OS_CLOUD=openstack_helm\n\nHEAT_DIR=\"$(readlink -f ./tools/deployment/component/octavia)\"\nSSH_DIR=\"${HOME}/.ssh\"\n\nOPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS=\"${OPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS} -v ${HEAT_DIR}:${HEAT_DIR} -v ${SSH_DIR}:${SSH_DIR}\"\nexport OPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS\n\nCOMPUTE_FLAVOR_ID=$(openstack flavor show -f value -c id m1.amphora)\n# The /tmp/inventory_k8s_nodes.txt file is created by the deploy-env role and contains the list\n# of all K8s nodes. Amphora instance is run on the first K8s node from the list.\n# Worker VM instances are run on the rest of the nodes.\nTARGET_HOST_1=$(sed -n '2p' /tmp/inventory_k8s_nodes.txt)\nTARGET_HOST_2=$(sed -n '3p' /tmp/inventory_k8s_nodes.txt)\n\nopenstack stack show \"octavia-env\" || \\\n  openstack stack create --wait \\\n    --parameter compute_flavor_id=${COMPUTE_FLAVOR_ID} \\\n    --parameter az_1=\"nova:${TARGET_HOST_1}\" \\\n    --parameter az_2=\"nova:${TARGET_HOST_2}\" \\\n    -t ${HEAT_DIR}/heat_octavia_env.yaml \\\n    octavia-env\n\nsleep 30\n\nLB_FLOATING_IP=$(openstack floating ip list --port $(openstack loadbalancer show osh -c vip_port_id -f value) -f value -c \"Floating IP Address\" | head -n1)\n\necho -n > /tmp/curl.txt\ncurl http://${LB_FLOATING_IP} >> /tmp/curl.txt\ncurl http://${LB_FLOATING_IP} >> /tmp/curl.txt\ngrep \"Hello from server_1\" /tmp/curl.txt\ngrep \"Hello from server_2\" /tmp/curl.txt\n"
  },
  {
    "path": "tools/deployment/component/octavia/openssl.cnf",
    "content": "# OpenSSL root CA configuration file.\n\n[ ca ]\n# `man ca`\ndefault_ca = CA_default\n\n[ CA_default ]\n# Directory and file locations.\ndir               = ./\ncerts             = $dir/certs\ncrl_dir           = $dir/crl\nnew_certs_dir     = $dir/newcerts\ndatabase          = $dir/index.txt\nserial            = $dir/serial\nRANDFILE          = $dir/private/.rand\n\n# The root key and root certificate.\nprivate_key       = $dir/private/ca.key.pem\ncertificate       = $dir/certs/ca.cert.pem\n\n# For certificate revocation lists.\ncrlnumber         = $dir/crlnumber\ncrl               = $dir/crl/ca.crl.pem\ncrl_extensions    = crl_ext\ndefault_crl_days  = 30\n\n# SHA-1 is deprecated, so use SHA-2 instead.\ndefault_md        = sha256\n\nname_opt          = ca_default\ncert_opt          = ca_default\n# 10 years\ndefault_days      = 7300\npreserve          = no\npolicy            = policy_strict\n\n[ CA_intermediate ]\n# Directory and file locations.\ndir               = ./intermediate_ca\ncerts             = $dir/certs\ncrl_dir           = $dir/crl\nnew_certs_dir     = $dir/newcerts\ndatabase          = $dir/index.txt\nserial            = $dir/serial\nRANDFILE          = $dir/private/.rand\n\n# The root key and root certificate.\nprivate_key       = ./private/ca.key.pem\ncertificate       = ./certs/ca.cert.pem\n\n# For certificate revocation lists.\ncrlnumber         = $dir/crlnumber\ncrl               = $dir/crl/ca.crl.pem\ncrl_extensions    = crl_ext\ndefault_crl_days  = 30\n\n# SHA-1 is deprecated, so use SHA-2 instead.\ndefault_md        = sha256\n\nname_opt          = ca_default\ncert_opt          = ca_default\n# 5 years\ndefault_days      = 3650\npreserve          = no\npolicy            = policy_strict\n\n[ policy_strict ]\n# The root CA should only sign intermediate certificates that match.\n# See the POLICY FORMAT section of `man ca`.\ncountryName             = match\nstateOrProvinceName     = match\norganizationName        = match\norganizationalUnitName  = optional\ncommonName              = supplied\nemailAddress            = optional\n\n[ req ]\n# Options for the `req` tool (`man req`).\ndefault_bits        = 2048\ndistinguished_name  = req_distinguished_name\nstring_mask         = utf8only\n\n# SHA-1 is deprecated, so use SHA-2 instead.\ndefault_md          = sha256\n\n# Extension to add when the -x509 option is used.\nx509_extensions     = v3_ca\n\n[ req_distinguished_name ]\n# See <https://en.wikipedia.org/wiki/Certificate_signing_request>.\ncountryName                     = Country Name (2 letter code)\nstateOrProvinceName             = State or Province Name\nlocalityName                    = Locality Name\n0.organizationName              = Organization Name\norganizationalUnitName          = Organizational Unit Name\ncommonName                      = Common Name\nemailAddress                    = Email Address\n\n# Optionally, specify some defaults.\ncountryName_default             = US\nstateOrProvinceName_default     = Oregon\nlocalityName_default            = Corvallis\n0.organizationName_default      = OpenStack\norganizationalUnitName_default  = Octavia\nemailAddress_default            =\ncommonName_default              = example.org\n\n[ v3_ca ]\n# Extensions for a typical CA (`man x509v3_config`).\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid:always,issuer\nbasicConstraints = critical, CA:true\nkeyUsage = critical, digitalSignature, cRLSign, keyCertSign\n\n[ v3_intermediate_ca ]\n# Extensions for a typical intermediate CA (`man x509v3_config`).\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid:always,issuer\nbasicConstraints = critical, CA:true, pathlen:0\nkeyUsage = critical, digitalSignature, cRLSign, keyCertSign\n\n[ usr_cert ]\n# Extensions for client certificates (`man x509v3_config`).\nbasicConstraints = CA:FALSE\nnsCertType = client, email\nnsComment = \"OpenSSL Generated Client Certificate\"\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid,issuer\nkeyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, emailProtection\n\n[ server_cert ]\n# Extensions for server certificates (`man x509v3_config`).\nbasicConstraints = CA:FALSE\nnsCertType = server\nnsComment = \"OpenSSL Generated Server Certificate\"\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid,issuer:always\nkeyUsage = critical, digitalSignature, keyEncipherment\nextendedKeyUsage = serverAuth\n\n[ crl_ext ]\n# Extension for CRLs (`man x509v3_config`).\nauthorityKeyIdentifier=keyid:always\n"
  },
  {
    "path": "tools/deployment/component/ovn/ovn.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_OVN:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c ovn ${FEATURES})\"}\n\ntee /tmp/ovn.yaml << EOF\nvolume:\n  ovn_ovsdb_nb:\n    enabled: false\n  ovn_ovsdb_sb:\n    enabled: false\nnetwork:\n  interface:\n    tunnel: null\nconf:\n  ovn_bridge_mappings: public:br-ex\n  auto_bridge_add:\n    br-ex: provider1\nEOF\n\n#NOTE: Deploy command\n: ${OSH_EXTRA_HELM_ARGS:=\"\"}\nhelm upgrade --install ovn ${OSH_HELM_REPO}/ovn \\\n  --namespace=openstack \\\n  --values=/tmp/ovn.yaml \\\n  ${OSH_EXTRA_HELM_ARGS} \\\n  ${OSH_EXTRA_HELM_ARGS_OVN}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n"
  },
  {
    "path": "tools/deployment/component/redis/redis.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n# NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_ZAQAR:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c redis ${FEATURES})\"}\n\nhelm upgrade --install redis ${OSH_HELM_REPO}/redis \\\n    --namespace openstack \\\n    --create-namespace \\\n    --timeout 600s \\\n    ${OSH_EXTRA_HELM_ARGS:=} \\\n    ${OSH_EXTRA_HELM_ARGS_ZAQAR}\n\n# NOTE: Wait for pods to be ready\nhelm osh wait-for-pods openstack\n"
  },
  {
    "path": "tools/deployment/component/skyline/skyline.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_SKYLINE:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c skyline ${FEATURES})\"}\n\n#NOTE: Deploy command\nhelm upgrade --install skyline ${OSH_HELM_REPO}/skyline \\\n  --namespace=openstack \\\n  ${OSH_EXTRA_HELM_ARGS:=} \\\n  ${OSH_EXTRA_HELM_ARGS_SKYLINE}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack 1200\n"
  },
  {
    "path": "tools/deployment/component/swift/swift.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_SWIFT:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c swift ${FEATURES})\"}\n\ntee /tmp/swift.yaml << EOF\nring:\n  replicas: 1\n  devices:\n    - name: loop100\n      weight: 100\nEOF\n\n#NOTE: Deploy command\nhelm upgrade --install swift ${OSH_HELM_REPO}/swift \\\n  --namespace=openstack \\\n  --values=/tmp/swift.yaml \\\n  ${OSH_EXTRA_HELM_ARGS:=} \\\n  ${OSH_EXTRA_HELM_ARGS_SWIFT}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack 1200\n\nopenstack service list\nopenstack endpoint list\n\n# Testing Swift\nopenstack container list\nopenstack container create test-container\nopenstack container list\n\necho \"Hello World\" > hello-world.txt\nexport OPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS=\"-v $(pwd):/mnt\"\nopenstack object create test-container /mnt/hello-world.txt\nopenstack object list test-container\nopenstack object delete test-container /mnt/hello-world.txt\nopenstack container delete test-container\nopenstack container list\n"
  },
  {
    "path": "tools/deployment/component/tacker/tacker.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_TACKER:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c tacker ${FEATURES})\"}\n: ${RUN_HELM_TESTS:=\"no\"}\n: ${STORAGE_CLASS:=\"nfs-provisioner\"}\n\n#NOTE: Deploy command\nhelm upgrade --install tacker ${OSH_HELM_REPO}/tacker \\\n    --namespace=openstack \\\n    --set storage.storageClass=${STORAGE_CLASS} \\\n    ${OSH_EXTRA_HELM_ARGS:=} \\\n    ${OSH_EXTRA_HELM_ARGS_TACKER}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n"
  },
  {
    "path": "tools/deployment/component/trove/trove.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_TROVE:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c trove ${FEATURES})\"}\n: ${RUN_HELM_TESTS:=\"yes\"}\n\n#NOTE: Deploy command\ntee /tmp/trove.yaml <<EOF\nconf:\n  trove:\n    DEFAULT:\n      default_datastore: mysql\nEOF\nhelm upgrade --install trove ${OSH_HELM_REPO}/trove \\\n  --namespace=openstack \\\n  --values=/tmp/trove.yaml \\\n  --timeout=600s \\\n  ${OSH_EXTRA_HELM_ARGS:=} \\\n  ${OSH_EXTRA_HELM_ARGS_TROVE}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n\n#NOTE: Validate Deployment info\nexport OS_CLOUD=openstack_helm\nopenstack service list\nsleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx\nopenstack database instance list\n\n# Delete the test pod if it still exists\nkubectl delete pods -l application=trove,release_group=trove,component=test --namespace=openstack --ignore-not-found\n\nif [ \"x${RUN_HELM_TESTS}\" != \"xno\" ]; then\n    ./tools/deployment/common/run-helm-tests.sh trove\nfi\n"
  },
  {
    "path": "tools/deployment/component/watcher/watcher.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_MANILA:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-}  -p ${OSH_VALUES_OVERRIDES_PATH} -c watcher ${FEATURES})\"}\n: ${RUN_HELM_TESTS:=\"no\"}\n\n#NOTE: Deploy command\nhelm upgrade --install watcher ${OSH_HELM_REPO}/watcher \\\n    --namespace=openstack \\\n    ${OSH_EXTRA_HELM_ARGS} \\\n    ${OSH_EXTRA_HELM_ARGS_WATCHER}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack 1800\n\n#NOTE: Validate Deployment\nexport OS_CLOUD=openstack_helm\nopenstack service list"
  },
  {
    "path": "tools/deployment/component/zaqar/zaqar.sh",
    "content": "#!/bin/bash\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nset -xe\n\n# NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_ZAQAR:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c zaqar ${FEATURES})\"}\n\n# NOTE: Deploy Zaqar\necho \"Deploying OpenStack Zaqar\"\n\nhelm upgrade --install zaqar ${OSH_HELM_REPO}/zaqar \\\n    --namespace openstack \\\n    --create-namespace \\\n    --timeout 600s \\\n    ${OSH_EXTRA_HELM_ARGS:=} \\\n    ${OSH_EXTRA_HELM_ARGS_ZAQAR}\n\n# NOTE: Wait for pods to be ready\nhelm osh wait-for-pods openstack\n\necho \"OpenStack Zaqar deployment complete.\"\n"
  },
  {
    "path": "tools/deployment/component/zaqar/zaqar_smoke_test.sh",
    "content": "#!/bin/bash\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -ex\n\nexport OS_CLOUD=openstack_helm\n\nOS_PROJECT_ID=$(openstack project show admin -c id -f value)\nQUEUE_NAME=\"test\"\nCLIENT_ID=$(uuidgen)\n\nopenstack --os-project-id $OS_PROJECT_ID messaging queue list\n\nopenstack --os-project-id $OS_PROJECT_ID messaging queue create $QUEUE_NAME\n\nopenstack --os-project-id $OS_PROJECT_ID messaging message post $QUEUE_NAME '{\"body\": \"hello world 1\"}' --client-id $CLIENT_ID\nopenstack --os-project-id $OS_PROJECT_ID messaging message post $QUEUE_NAME '{\"body\": \"hello world 2\"}' --client-id $CLIENT_ID\n\nopenstack --os-project-id $OS_PROJECT_ID messaging message list $QUEUE_NAME --client-id $CLIENT_ID --echo\n"
  },
  {
    "path": "tools/deployment/db/mariadb-backup.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_MARIADB_BACKUP:=\"$(helm osh get-values-overrides -c mariadb-backup ${FEATURES})\"}\n\n#NOTE: Deploy command\nhelm upgrade --install mariadb-backup ${OSH_HELM_REPO}/mariadb-backup \\\n    --namespace=openstack \\\n    --wait \\\n    --timeout 900s \\\n    ${OSH_EXTRA_HELM_ARGS:=} \\\n    ${OSH_EXTRA_HELM_ARGS_MARIADB_BACKUP}\n\nhelm osh wait-for-pods openstack\n\nkubectl create job --from=cronjob/mariadb-backup mariadb-backup-manual-001 -n openstack\n\nhelm osh wait-for-pods openstack\n\nkubectl logs jobs/mariadb-backup-manual-001 -n openstack\n"
  },
  {
    "path": "tools/deployment/db/mariadb-operator-cluster.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n# Specify the Rook release tag to use for the Rook operator here\n: ${MARIADB_OPERATOR_RELEASE:=\"0.22.0\"}\n\n# install mariadb-operator\nhelm repo add mariadb-operator https://mariadb-operator.github.io/mariadb-operator\nhelm upgrade --install --create-namespace mariadb-operator mariadb-operator/mariadb-operator --version ${MARIADB_OPERATOR_RELEASE} -n mariadb-operator\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods mariadb-operator\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_MARIADB_CLUSTER:=\"$(helm osh get-values-overrides -p ${OSH_VALUES_OVERRIDES_PATH} -c mariadb-cluster ${FEATURES})\"}\n\n#NOTE: Deploy command\n# Deploying downscaled cluster\n: ${OSH_EXTRA_HELM_ARGS:=\"\"}\nhelm upgrade --install mariadb-cluster ${OSH_HELM_REPO}/mariadb-cluster \\\n    --namespace=openstack \\\n    --wait \\\n    --timeout 900s \\\n    --values values_overrides/mariadb-cluster/downscaled.yaml \\\n    ${OSH_EXTRA_HELM_ARGS} \\\n    ${OSH_EXTRA_HELM_ARGS_MARIADB_CLUSTER}\n\nsleep 30\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n\nkubectl get pods --namespace=openstack -o wide\n\n#NOTE: Deploy command\n# Upscaling the cluster to 3 instances\n# mariadb-operator is not handinling changes in appropriate statefulset\n# so a special job has to delete the statefulset in order\n# to let mariadb-operator to re-create the sts with new params\nhelm upgrade --install mariadb-cluster ./mariadb-cluster \\\n    --namespace=openstack \\\n    --wait \\\n    --timeout 900s \\\n    --values values_overrides/mariadb-cluster/upscaled.yaml \\\n    ${OSH_EXTRA_HELM_ARGS} \\\n    ${OSH_EXTRA_HELM_ARGS_MARIADB_CLUSTER}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n\nkubectl get pods --namespace=openstack -o wide\n\n# Delete the test pod if it still exists\nkubectl delete pods -l application=mariadb,release_group=mariadb-cluster,component=test --namespace=openstack --ignore-not-found\n#NOTE: Validate the deployment\nhelm test mariadb-cluster --namespace openstack\n"
  },
  {
    "path": "tools/deployment/db/mariadb.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n#NOTE: Define variables\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_MARIADB:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c mariadb ${FEATURES})\"}\n: ${NAMESPACE:=\"openstack\"}\n: ${RUN_HELM_TESTS:=\"no\"}\n\n#NOTE: Deploy command\nhelm upgrade --install mariadb ${OSH_HELM_REPO}/mariadb \\\n    --namespace=${NAMESPACE} \\\n    ${MONITORING_HELM_ARGS:=\"--set monitoring.prometheus.enabled=true\"} \\\n    --set pod.replicas.server=1 \\\n    ${VOLUME_HELM_ARGS:=\"--set volume.enabled=false --set volume.use_local_path_for_single_pod_cluster.enabled=true\"} \\\n    --timeout=600s \\\n    ${OSH_EXTRA_HELM_ARGS:=} \\\n    ${OSH_EXTRA_HELM_ARGS_MARIADB}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods ${NAMESPACE}\n\nif [ \"x${RUN_HELM_TESTS}\" != \"xno\" ]; then\n    # Delete the test pod if it still exists\n    kubectl delete pods -l application=mariadb,release_group=mariadb,component=test --namespace=${NAMESPACE} --ignore-not-found\n    #NOTE: Validate the deployment\n    helm test mariadb --namespace ${NAMESPACE}\nfi\n"
  },
  {
    "path": "tools/deployment/db/postgresql.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n#NOTE: Deploy command\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS:=\"\"}\n: ${OSH_EXTRA_HELM_ARGS_POSTGRESQL:=\"$(helm osh get-values-overrides -p ${OSH_VALUES_OVERRIDES_PATH} -c postgresql ${FEATURES})\"}\n: ${NAMESPACE:=openstack}\n\nhelm upgrade --install postgresql ${OSH_HELM_REPO}/postgresql \\\n    --namespace=${NAMESPACE} \\\n    ${MONITORING_HELM_ARGS:=\"--set monitoring.prometheus.enabled=true\"} \\\n    --set pod.replicas.server=1 \\\n    ${VOLUME_HELM_ARGS:=\"--set storage.pvc.enabled=false --set storage.host.host_path=/tmp/postgresql-data --set conf.postgresql.archive_mode=off\"} \\\n    ${OSH_EXTRA_HELM_ARGS} \\\n    ${OSH_EXTRA_HELM_ARGS_POSTGRESQL}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods ${NAMESPACE}\n"
  },
  {
    "path": "tools/deployment/logging/elasticsearch.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n#NOTE: Deploy command\ntee /tmp/elasticsearch.yaml << EOF\njobs:\n  verify_repositories:\n    cron: \"*/3 * * * *\"\nmonitoring:\n  prometheus:\n    enabled: true\npod:\n  replicas:\n    client: 1\n    data: 1\n    master: 2\nconf:\n  elasticsearch:\n    snapshots:\n      enabled: true\n  api_objects:\n    snapshot_repo:\n      endpoint: _snapshot/ceph-rgw\n      body:\n        type: s3\n        settings:\n          client: default\n          bucket: elasticsearch-bucket\n    slm_policy:\n      endpoint: _slm/policy/snapshots\n      body:\n        schedule: \"0 */15 * * * ?\"\n        name: \"<snapshot-{now/d}>\"\n        repository: ceph-rgw\n        config:\n          indices:\n            - \"<*-{now/d}>\"\n        retention:\n          expire_after: 30d\n    ilm_policy:\n      endpoint: _ilm/policy/cleanup\n      body:\n        policy:\n          phases:\n            delete:\n              min_age: 5d\n              actions:\n                delete: {}\n    test_empty: {}\nstorage:\n  s3:\n    clients:\n      # These values configure the s3 clients section of elasticsearch.yml, with access_key and secret_key being saved to the keystore\n      default:\n        # not needed when using Rook Ceph CRDs\n        # auth:\n        #   username: elasticsearch\n        #   access_key: \"elastic_access_key\"\n        #   secret_key: \"elastic_secret_key\"\n        settings:\n          # endpoint: Defaults to the ceph-rgw endpoint\n          # protocol: Defaults to http\n          path_style_access: true # Required for ceph-rgw S3 API\n        create_user: true # Attempt to create the user at the ceph_object_store endpoint, authenticating using the secret named at .Values.secrets.rgw.admin\n      backup: # Change this as you'd like\n        # not needed when using Rook Ceph CRDs\n        # auth:\n        #   username: backup\n        #   access_key: \"backup_access_key\"\n        #   secret_key: \"backup_secret_key\"\n        settings:\n          # endpoint: rook-ceph-rgw-default.ceph.svc.cluster.local # Using the ingress here to test the endpoint override\n          path_style_access: true\n        create_user: true\n    buckets: # List of buckets to create (if required).\n      - name: elasticsearch-bucket\n        client: default\n        storage_class: ceph-bucket # this is valid when using Rook CRDs\n        # not needed when using Rook Ceph CRDs\n        # options: # list of extra options for s3cmd\n        #   - --region=\"default:osh-infra\"\n      - name: backup-bucket\n        client: backup\n        storage_class: ceph-bucket # this is valid when using Rook CRDs\n        # not needed when using Rook Ceph CRDs\n        # options: # list of extra options for s3cmd\n        #   - --region=\"default:backup\"\nendpoints:\n  ceph_object_store:\n    name: radosgw\n    namespace: ceph\n    hosts:\n      default: rook-ceph-rgw-default\n      public: radosgw\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      api:\n        default: 8080\n        public: 80\nnetwork:\n  elasticsearch:\n    ingress:\n      classes:\n        namespace: ingress-osh-infra\ndependencies:\n  static:\n    elasticsearch_templates:\n      services:\n        - endpoint: internal\n          service: elasticsearch\n      jobs: null\n      custom_resources:\n        - apiVersion: objectbucket.io/v1alpha1\n          kind: ObjectBucket\n          name: obc-osh-infra-elasticsearch-bucket\n          fields:\n            - key: \"status.phase\"\n              value: \"Bound\"\n        - apiVersion: objectbucket.io/v1alpha1\n          kind: ObjectBucket\n          name: obc-osh-infra-backup-bucket\n          fields:\n            - key: \"status.phase\"\n              value: \"Bound\"\n    snapshot_repository:\n      services:\n        - endpoint: internal\n          service: elasticsearch\n      jobs: null\n      custom_resources:\n        - apiVersion: objectbucket.io/v1alpha1\n          kind: ObjectBucket\n          name: obc-osh-infra-elasticsearch-bucket\n          fields:\n            - key: \"status.phase\"\n              value: \"Bound\"\n        - apiVersion: objectbucket.io/v1alpha1\n          kind: ObjectBucket\n          name: obc-osh-infra-backup-bucket\n          fields:\n            - key: \"status.phase\"\n              value: \"Bound\"\nmanifests:\n  job_s3_user: false\n  job_s3_bucket: false\n  object_bucket_claim: true\n\nimages:\n  tags:\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n\nEOF\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_ELASTICSEARCH:=\"$(helm osh get-values-overrides -p ${OSH_VALUES_OVERRIDES_PATH} -c elasticsearch ${FEATURES})\"}\n\nhelm upgrade --install elasticsearch ${OSH_HELM_REPO}/elasticsearch \\\n  --namespace=osh-infra \\\n  --values=/tmp/elasticsearch.yaml\\\n  ${OSH_EXTRA_HELM_ARGS} \\\n  ${OSH_EXTRA_HELM_ARGS_ELASTICSEARCH}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods osh-infra\n\n# Delete the test pod if it still exists\nkubectl delete pods -l application=elasticsearch,release_group=elasticsearch,component=test --namespace=osh-infra --ignore-not-found\nhelm test elasticsearch --namespace osh-infra\n"
  },
  {
    "path": "tools/deployment/logging/fluentbit.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_FLUENTBIT:=\"$(helm osh get-values-overrides -p ${OSH_VALUES_OVERRIDES_PATH} -c fluentbit ${FEATURES})\"}\n\nhelm upgrade --install fluentbit ${OSH_HELM_REPO}/fluentbit \\\n  --namespace=osh-infra \\\n  ${OSH_EXTRA_HELM_ARGS:=} \\\n  ${OSH_EXTRA_HELM_ARGS_FLUENTBIT}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods osh-infra\n"
  },
  {
    "path": "tools/deployment/logging/fluentd.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_FLUENTD:=\"$(helm osh get-values-overrides -p ${OSH_VALUES_OVERRIDES_PATH} -c fluentd ${FEATURES})\"}\n\ntee /tmp/fluentd.yaml << EOF\npod:\n  env:\n    fluentd:\n      vars:\n        MY_TEST_VAR: FOO\n      secrets:\n        MY_TEST_SECRET: BAR\nconf:\n  fluentd:\n    conf:\n      # These fields are rendered as helm templates\n      input: |\n        <source>\n          @type prometheus\n          port {{ tuple \"fluentd\" \"internal\" \"metrics\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n        </source>\n\n        <source>\n          @type prometheus_monitor\n        </source>\n\n        <source>\n          @type prometheus_output_monitor\n        </source>\n\n        <source>\n          @type prometheus_tail_monitor\n        </source>\n\n        <source>\n          bind 0.0.0.0\n          port \"#{ENV['FLUENTD_PORT']}\"\n          @type forward\n        </source>\n\n        <source>\n          @type tail\n          @id in_tail_container_logs\n          path \"/var/log/containers/*.log\"\n          pos_file \"/var/log/fluentd-containers.log.pos\"\n          tag kubernetes.*\n          read_from_head true\n          emit_unmatched_lines true\n          <parse>\n            @type \"multi_format\"\n            <pattern>\n              format json\n              time_key \"time\"\n              time_type string\n              time_format \"%Y-%m-%dT%H:%M:%S.%NZ\"\n              keep_time_key false\n            </pattern>\n            <pattern>\n              format regexp\n              expression /^(?<time>.+) (?<stream>stdout|stderr)( (.))? (?<log>.*)$/\n              time_format \"%Y-%m-%dT%H:%M:%S.%NZ\"\n              keep_time_key false\n            </pattern>\n          </parse>\n        </source>\n\n        <source>\n          @type tail\n          tag libvirt.*\n          path /var/log/libvirt/**.log\n          pos_file \"/var/log/fluentd-libvirt.log.pos\"\n          read_from_head true\n          <parse>\n            @type none\n          </parse>\n        </source>\n\n        <source>\n          @type systemd\n          tag auth\n          path /var/log/journal\n          matches [{ \"SYSLOG_FACILITY\":\"10\" }]\n          read_from_head true\n\n          <storage>\n            @type local\n            path /var/log/fluentd-systemd-auth.json\n          </storage>\n\n          <entry>\n            fields_strip_underscores true\n            fields_lowercase true\n          </entry>\n        </source>\n\n        <source>\n          @type systemd\n          tag journal.*\n          path /var/log/journal\n          matches [{ \"_SYSTEMD_UNIT\": \"docker.service\" }]\n          read_from_head true\n\n          <storage>\n            @type local\n            path /var/log/fluentd-systemd-docker.json\n          </storage>\n\n          <entry>\n            fields_strip_underscores true\n            fields_lowercase true\n          </entry>\n        </source>\n\n        <source>\n          @type systemd\n          tag journal.*\n          path /var/log/journal\n          matches [{ \"_SYSTEMD_UNIT\": \"kubelet.service\" }]\n          read_from_head true\n\n          <storage>\n            @type local\n            path /var/log/fluentd-systemd-kubelet.json\n          </storage>\n\n          <entry>\n            fields_strip_underscores true\n            fields_lowercase true\n          </entry>\n        </source>\n\n        <source>\n          @type systemd\n          tag kernel\n          path /var/log/journal\n          matches [{ \"_TRANSPORT\": \"kernel\" }]\n          read_from_head true\n\n          <storage>\n            @type local\n            path /var/log/fluentd-systemd-kernel.json\n          </storage>\n\n          <entry>\n            fields_strip_underscores true\n            fields_lowercase true\n          </entry>\n        </source>\n\n        <match **>\n          @type relabel\n          @label @filter\n        </match>\n\n      filter: |\n        <label @FLUENT_LOG>\n          <match **>\n            @type null\n            @id ignore_fluent_logs\n          </match>\n        </label>\n\n        <label @filter>\n          <match kubernetes.var.log.containers.fluentd**>\n            @type relabel\n            @label @FLUENT_LOG\n          </match>\n\n          <filter kubernetes.**>\n            @type kubernetes_metadata\n          </filter>\n\n          <filter libvirt.**>\n            @type record_transformer\n            <record>\n              hostname \"#{ENV['NODE_NAME']}\"\n              fluentd_pod \"#{ENV['POD_NAME']}\"\n            </record>\n          </filter>\n          <match **>\n            @type relabel\n            @label @output\n          </match>\n        </label>\n      output: |\n        <label @output>\n          <match fluent.**>\n            @type null\n          </match>\n\n          <match **>\n            <buffer>\n              chunk_limit_size 2M\n              flush_interval 5s\n              flush_thread_count 8\n              queue_limit_length 32\n              retry_forever false\n              retry_max_interval 30\n            </buffer>\n            host \"#{ENV['ELASTICSEARCH_HOST']}\"\n            reload_connections false\n            reconnect_on_error true\n            reload_on_failure true\n            include_tag_key true\n            logstash_format true\n            password \"#{ENV['ELASTICSEARCH_PASSWORD']}\"\n            port \"#{ENV['ELASTICSEARCH_PORT']}\"\n            @type elasticsearch\n            user \"#{ENV['ELASTICSEARCH_USERNAME']}\"\n          </match>\n        </label>\nEOF\nhelm upgrade --install fluentd ${OSH_HELM_REPO}/fluentd \\\n    --namespace=osh-infra \\\n    --values=/tmp/fluentd.yaml \\\n  ${OSH_EXTRA_HELM_ARGS} \\\n  ${OSH_EXTRA_HELM_ARGS_FLUENTD}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods osh-infra\n"
  },
  {
    "path": "tools/deployment/logging/kibana.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_KIBANA:=\"$(helm osh get-values-overrides -p ${OSH_VALUES_OVERRIDES_PATH} -c kibana ${FEATURES})\"}\n\n#NOTE: Deploy command\nhelm upgrade --install kibana ${OSH_HELM_REPO}/kibana \\\n  --namespace=osh-infra \\\n  --set network.kibana.ingress.classes.namespace=ingress-osh-infra \\\n  ${OSH_EXTRA_HELM_ARGS} \\\n  ${OSH_EXTRA_HELM_ARGS_KIBANA}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods osh-infra\n"
  },
  {
    "path": "tools/deployment/monitoring/alertmanager.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n\n#NOTE: Deploy command\nhelm upgrade --install prometheus-alertmanager ${OSH_HELM_REPO}/prometheus-alertmanager \\\n    --namespace=osh-infra \\\n    ${VOLUME_HELM_ARGS:=\"--set storage.alertmanager.enabled=false --set storage.alertmanager.use_local_path.enabled=true\"} \\\n    --set pod.replicas.alertmanager=1\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods osh-infra\n"
  },
  {
    "path": "tools/deployment/monitoring/blackbox-exporter.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n\n#NOTE: Deploy command\nhelm upgrade --install prometheus-blackbox-exporter \\\n    ${OSH_HELM_REPO}/prometheus-blackbox-exporter --namespace=osh-infra\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods osh-infra\n"
  },
  {
    "path": "tools/deployment/monitoring/grafana.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\nFEATURES=\"calico ceph containers coredns elasticsearch kubernetes nginx nodes openstack prometheus home_dashboard persistentvolume apparmor ${FEATURES}\"\n: ${OSH_EXTRA_HELM_ARGS_GRAFANA:=$(helm osh get-values-overrides -p ${OSH_VALUES_OVERRIDES_PATH} -c grafana ${FEATURES} 2>/dev/null)}\n\n#NOTE: Deploy command\nhelm upgrade --install grafana ${OSH_HELM_REPO}/grafana \\\n  --namespace=osh-infra \\\n  --set network.grafana.ingress.classes.namespace=\"ingress-osh-infra\" \\\n  ${OSH_EXTRA_HELM_ARGS:=} \\\n  ${OSH_EXTRA_HELM_ARGS_GRAFANA}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods osh-infra\n\n# Delete the test pod if it still exists\nkubectl delete pods -l application=grafana,release_group=grafana,component=test --namespace=osh-infra --ignore-not-found\nhelm test grafana --namespace osh-infra\n"
  },
  {
    "path": "tools/deployment/monitoring/kube-state-metrics.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_KUBE_STATE_METRICS:=\"$(helm osh get-values-overrides -p ${OSH_VALUES_OVERRIDES_PATH} -c prometheus-kube-state-metrics ${FEATURES})\"}\n\n#NOTE: Deploy command\nhelm upgrade --install prometheus-kube-state-metrics \\\n    ${OSH_HELM_REPO}/prometheus-kube-state-metrics --namespace=kube-system \\\n    ${OSH_EXTRA_HELM_ARGS_KUBE_STATE_METRICS}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods kube-system\n"
  },
  {
    "path": "tools/deployment/monitoring/mysql-exporter.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_MARIADB_MYSQL_EXPORTER:=\"$(helm osh get-values-overrides -p ${OSH_VALUES_OVERRIDES_PATH} -c prometheus-mysql-exporter ${FEATURES})\"}\n\n#NOTE: Deploy command\nhelm upgrade --install prometheus-mysql-exporter ${OSH_HELM_REPO}/prometheus-mysql-exporter \\\n    --namespace=openstack \\\n    --wait \\\n    --timeout 900s \\\n    ${OSH_EXTRA_HELM_ARGS:=} \\\n    ${OSH_EXTRA_HELM_ARGS_MARIADB_MYSQL_EXPORTER}\n\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n\nkubectl get pods --namespace=openstack -o wide\n"
  },
  {
    "path": "tools/deployment/monitoring/nagios.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_NAGIOS:=\"$(helm osh get-values-overrides -p ${OSH_VALUES_OVERRIDES_PATH} -c nagios ${FEATURES})\"}\n\n#NOTE: Deploy command\nhelm upgrade --install nagios ${OSH_HELM_REPO}/nagios \\\n  --namespace=osh-infra \\\n  --set network.nagios.ingress.classes.namespace=ingress-osh-infra \\\n  ${OSH_EXTRA_HELM_ARGS:=} \\\n  ${OSH_EXTRA_HELM_ARGS_NAGIOS}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods osh-infra\n\n# Delete the test pod if it still exists\nkubectl delete pods -l application=nagios,release_group=nagios,component=test --namespace=osh-infra --ignore-not-found\nhelm test nagios --namespace osh-infra\n"
  },
  {
    "path": "tools/deployment/monitoring/node-exporter.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_NODE_EXPORTER:=\"$(helm osh get-values-overrides -p ${OSH_VALUES_OVERRIDES_PATH} -c prometheus-node-exporter ${FEATURES})\"}\n\n#NOTE: Deploy command\nhelm upgrade --install prometheus-node-exporter \\\n    ${OSH_HELM_REPO}/prometheus-node-exporter --namespace=kube-system \\\n    ${OSH_EXTRA_HELM_ARGS_NODE_EXPORTER}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods kube-system\n"
  },
  {
    "path": "tools/deployment/monitoring/node-problem-detector.sh",
    "content": "#!/bin/bash\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n\n#NOTE: Deploy command\ntee /tmp/kubernetes-node-problem-detector.yaml << EOF\nmonitoring:\n  prometheus:\n    pod:\n      enabled: false\n    service:\n      enabled: true\nmanifests:\n  service: true\nEOF\nhelm upgrade --install kubernetes-node-problem-detector \\\n    ${OSH_HELM_REPO}/kubernetes-node-problem-detector --namespace=kube-system \\\n    --values=/tmp/kubernetes-node-problem-detector.yaml\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods kube-system\n"
  },
  {
    "path": "tools/deployment/monitoring/openstack-exporter.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n\n\nset -xe\n\n# Check if Keystone API DNS and HTTP endpoint are available; skip deployment if not\nKEYSTONE_HOST=\"keystone.openstack-helm.org\"\nKEYSTONE_PORT=80\nKEYSTONE_URL=\"http://$KEYSTONE_HOST:$KEYSTONE_PORT/v3\"\nTIMEOUT=${TIMEOUT:-60}\nINTERVAL=2\nstart_time=$(date +%s)\n\n# DNS check\nwhile ! getent hosts \"$KEYSTONE_HOST\" >/dev/null; do\n  now=$(date +%s)\n  elapsed=$((now - start_time))\n  if [ $elapsed -ge $TIMEOUT ]; then\n    echo \"[INFO] Keystone API DNS not found after $TIMEOUT seconds, skipping prometheus-openstack-exporter deployment.\"\n    exit 0\n  fi\n  echo \"[INFO] Waiting for Keystone DNS... ($elapsed/$TIMEOUT)\"\n  sleep $INTERVAL\ndone\n\n# HTTP check\nwhile ! curl -sf \"$KEYSTONE_URL\" >/dev/null; do\n  now=$(date +%s)\n  elapsed=$((now - start_time))\n  if [ $elapsed -ge $TIMEOUT ]; then\n    echo \"[INFO] Keystone API not responding after $TIMEOUT seconds, skipping prometheus-openstack-exporter deployment.\"\n    exit 0\n  fi\n  echo \"[INFO] Waiting for Keystone API... ($elapsed/$TIMEOUT)\"\n  sleep $INTERVAL\ndone\n\necho \"[INFO] Keystone API is available. Proceeding with exporter deployment.\"\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_OS_EXPORTER:=\"$(helm osh get-values-overrides -p ${OSH_VALUES_OVERRIDES_PATH} -c prometheus-openstack-exporter ${FEATURES})\"}\n\n#NOTE: Deploy command\nhelm upgrade --install prometheus-openstack-exporter \\\n    ${OSH_HELM_REPO}/prometheus-openstack-exporter \\\n    --namespace=openstack \\\n    ${OSH_EXTRA_HELM_ARGS_OS_EXPORTER}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods openstack\n"
  },
  {
    "path": "tools/deployment/monitoring/process-exporter.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_PROCESS_EXPORTER:=\"$(helm osh get-values-overrides -p ${OSH_VALUES_OVERRIDES_PATH} -c prometheus-process-exporter ${FEATURES})\"}\n\n#NOTE: Deploy command\nhelm upgrade --install prometheus-process-exporter \\\n    ${OSH_HELM_REPO}/prometheus-process-exporter --namespace=kube-system \\\n    ${OSH_EXTRA_HELM_ARGS_PROCESS_EXPORTER}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods kube-system\n"
  },
  {
    "path": "tools/deployment/monitoring/prometheus.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\nFEATURES=\"alertmanager ceph elasticsearch kubernetes nodes openstack postgresql apparmor ${FEATURES}\"\n: ${OSH_EXTRA_HELM_ARGS_PROMETHEUS:=\"$(helm osh get-values-overrides -p ${OSH_VALUES_OVERRIDES_PATH} -c prometheus ${FEATURES})\"}\n\n\n#NOTE: Deploy command\nhelm upgrade --install prometheus ${OSH_HELM_REPO}/prometheus \\\n    --namespace=osh-infra \\\n    --set network.prometheus.ingress.classes.namespace=ingress-osh-infra \\\n    ${VOLUME_HELM_ARGS:=\"--set storage.enabled=false --set storage.use_local_path.enabled=true\"} \\\n    ${OSH_EXTRA_HELM_ARGS:=} \\\n    ${OSH_EXTRA_HELM_ARGS_PROMETHEUS}\n\n#NOTE: Wait for deploy\nhelm osh wait-for-pods osh-infra\n\n# Delete the test pod if it still exists\nkubectl delete pods -l application=prometheus,release_group=prometheus,component=test --namespace=osh-infra --ignore-not-found\nhelm test prometheus --namespace osh-infra\n"
  },
  {
    "path": "tools/deployment/openstack/keystone.sh",
    "content": "#!/bin/bash\n\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nset -xe\n\n: ${OSH_HELM_REPO:=\"../openstack-helm\"}\n: ${OSH_VALUES_OVERRIDES_PATH:=\"../openstack-helm/values_overrides\"}\n: ${OSH_EXTRA_HELM_ARGS_KEYSTONE:=\"$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c keystone ${FEATURES})\"}\n\n# Install Keystone\nhelm upgrade --install keystone ${OSH_HELM_REPO}/keystone \\\n    --namespace=openstack \\\n    ${OSH_EXTRA_HELM_ARGS:=} \\\n    ${OSH_EXTRA_HELM_ARGS_KEYSTONE}\n\nhelm osh wait-for-pods openstack\n\n# Testing basic functionality\nexport OS_CLOUD=openstack_helm\nsleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx\nopenstack endpoint list\n"
  },
  {
    "path": "tools/gate/selenium/grafana-selenium.sh",
    "content": "#!/bin/bash\n\nset -xe\n\nexport CHROMEDRIVER=\"${CHROMEDRIVER:=\"/etc/selenium/chromedriver\"}\"\nexport ARTIFACTS_DIR=\"${ARTIFACTS_DIR:=\"/tmp/artifacts/\"}\"\n\nexport GRAFANA_USER=\"admin\"\nexport GRAFANA_PASSWORD=\"password\"\nexport GRAFANA_URI=\"grafana.osh-infra.svc.cluster.local\"\n\npython3 $(readlink -f $(dirname $0))/grafanaSelenium.py\n"
  },
  {
    "path": "tools/gate/selenium/grafanaSelenium.py",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n#    http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import NoSuchElementException\nfrom seleniumtester import SeleniumTester\n\nst = SeleniumTester('Grafana')\n\nusername = st.get_variable('GRAFANA_USER')\npassword = st.get_variable('GRAFANA_PASSWORD')\ngrafana_uri = st.get_variable('GRAFANA_URI')\ngrafana_url = 'http://{0}'.format(grafana_uri)\n\ntry:\n    st.logger.info('Attempting to connect to Grafana')\n    st.browser.get(grafana_url)\n    el = WebDriverWait(st.browser, 15).until(\n        EC.title_contains('Grafana')\n    )\n    st.logger.info('Connected to Grafana')\nexcept TimeoutException:\n    st.logger.critical('Timed out waiting to connect to Grafana')\n    st.browser.quit()\n    sys.exit(1)\n\nst.logger.info(\"Attempting to log into Grafana dashboard\")\ntry:\n    st.browser.find_element(By.NAME, 'user').send_keys(username)\n    st.browser.find_element(By.NAME, 'password').send_keys(password)\n    st.browser.find_element(By.CLASS_NAME, 'css-1mhnkuh').click()\n    st.logger.info(\"Successfully logged in to Grafana\")\nexcept NoSuchElementException:\n    st.logger.error(\"Failed to log in to Grafana\")\n    st.browser.quit()\n    sys.exit(1)\n\nst.browser.quit()\n"
  },
  {
    "path": "tools/gate/selenium/kibana-selenium.sh",
    "content": "#!/bin/bash\n\nset -xe\n\nexport CHROMEDRIVER=\"${CHROMEDRIVER:=\"/etc/selenium/chromedriver\"}\"\nexport ARTIFACTS_DIR=\"${ARTIFACTS_DIR:=\"/tmp/artifacts/\"}\"\n\nexport KIBANA_USER=\"admin\"\nexport KIBANA_PASSWORD=\"changeme\"\nexport KIBANA_URI=\"kibana.osh-infra.svc.cluster.local\"\n\nexport KERNEL_QUERY=\"discove?r_g=()&_a=(columns:!(_source),index:'kernel*',interval:auto,query:(language:kuery,query:''),sort:!('@timestamp',desc))\"\nexport JOURNAL_QUERY=\"discove?r_g=()&_a=(columns:!(_source),index:'journal*',interval:auto,query:(language:kuery,query:''),sort:!('@timestamp',desc))\"\nexport LOGSTASH_QUERY=\"discove?r_g=()&_a=(columns:!(_source),index:'logstash*',interval:auto,query:(language:kuery,query:''),sort:!('@timestamp',desc))\"\n\npython3 $(readlink -f $(dirname $0))/kibanaSelenium.py\n"
  },
  {
    "path": "tools/gate/selenium/kibanaSelenium.py",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n#    http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nfrom seleniumtester import SeleniumTester\n\nst = SeleniumTester('Kibana')\n\nusername = st.get_variable('KIBANA_USER')\npassword = st.get_variable('KIBANA_PASSWORD')\nkibana_uri = st.get_variable('KIBANA_URI')\nkibana_url = 'http://{0}:{1}@{2}'.format(username, password, kibana_uri)\n\ntry:\n    st.logger.info('Attempting to connect to Kibana')\n    st.browser.get(kibana_url)\n    el = WebDriverWait(st.browser, 45).until(\n        EC.title_contains('Kibana')\n    )\n    st.logger.info('Connected to Kibana')\nexcept TimeoutException:\n    st.logger.critical('Timed out waiting for Kibana')\n    st.browser.quit()\n    sys.exit(1)\n\nkernel_query = st.get_variable('KERNEL_QUERY')\njournal_query = st.get_variable('JOURNAL_QUERY')\nlogstash_query = st.get_variable('LOGSTASH_QUERY')\n\nqueries = [(kernel_query, 'Kernel'),\n           (journal_query, 'Journal'),\n           (logstash_query, 'Logstash')]\n\nfor query, name in queries:\n    retry = 3\n    while retry > 0:\n        query_url = '{}/app/kibana#/{}'.format(kibana_url, query)\n\n        try:\n            st.logger.info('Attempting to query {} index'.format(name))\n            st.browser.get(query_url)\n            WebDriverWait(st.browser, 60).until(\n                EC.presence_of_element_located(\n                    (By.XPATH, '/html/body/div[2]/div/div/div/div[3]/'\n                    'discover-app/main/div/div[2]/div/div[2]/section[2]/'\n                    'doc-table/div/table/tbody/tr[1]/td[2]')\n                )\n            )\n            st.logger.info('{} index loaded successfully'.format(name))\n            st.take_screenshot('Kibana {} Index'.format(name))\n            retry = 0\n\n        except TimeoutException:\n            if retry > 1:\n                st.logger.warning('Timed out loading {} index'.format(name))\n            else:\n                st.logger.error('Could not load {} index'.format(name))\n\n        retry -= 1\n        if retry <= 0:\n            # Reset test condition\n            st.browser.get(kibana_url)\n\nst.browser.quit()\n"
  },
  {
    "path": "tools/gate/selenium/nagios-selenium.sh",
    "content": "#!/bin/bash\n\nset -xe\n\nexport CHROMEDRIVER=\"${CHROMEDRIVER:=\"/etc/selenium/chromedriver\"}\"\nexport ARTIFACTS_DIR=\"${ARTIFACTS_DIR:=\"/tmp/artifacts/\"}\"\n\nexport NAGIOS_USER=\"nagiosadmin\"\nexport NAGIOS_PASSWORD=\"password\"\nexport NAGIOS_URI=\"nagios.osh-infra.svc.cluster.local\"\n\npython3 $(readlink -f $(dirname $0))/nagiosSelenium.py\n"
  },
  {
    "path": "tools/gate/selenium/nagiosSelenium.py",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n#    http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import NoSuchElementException\nfrom seleniumtester import SeleniumTester\n\nst = SeleniumTester('Nagios')\n\nusername = st.get_variable('NAGIOS_USER')\npassword = st.get_variable('NAGIOS_PASSWORD')\nnagios_uri = st.get_variable('NAGIOS_URI')\nnagios_url = 'http://{0}:{1}@{2}'.format(username, password, nagios_uri)\n\ntry:\n    st.logger.info('Attempting to connect to Nagios')\n    st.browser.get(nagios_url)\n    el = WebDriverWait(st.browser, 15).until(\n        EC.title_contains('Nagios')\n    )\n    st.logger.info('Connected to Nagios')\nexcept TimeoutException:\n    st.logger.critical('Timed out waiting for Nagios')\n    st.browser.quit()\n    sys.exit(1)\n\ntry:\n    st.logger.info('Switching Focus to Navigation side frame')\n    sideFrame = st.browser.switch_to.frame('side')\nexcept NoSuchElementException:\n    st.logger.error('Failed selecting side frame')\n    st.browser.quit()\n    sys.exit(1)\n\ntry:\n    st.logger.info('Attempting to visit Services page')\n    st.click_link_by_name('Services')\n    st.take_screenshot('Nagios Services')\nexcept TimeoutException:\n    st.logger.error('Failed to load Services page')\n    st.browser.quit()\n    sys.exit(1)\n\ntry:\n    st.logger.info('Attempting to visit Host Groups page')\n    st.click_link_by_name('Host Groups')\n    st.take_screenshot('Nagios Host Groups')\nexcept TimeoutException:\n    st.logger.error('Failed to load Host Groups page')\n    st.browser.quit()\n    sys.exit(1)\n\ntry:\n    st.logger.info('Attempting to visit Hosts page')\n    st.click_link_by_name('Hosts')\n    st.take_screenshot('Nagios Hosts')\nexcept TimeoutException:\n    st.logger.error('Failed to load Hosts page')\n    st.browser.quit()\n    sys.exit(1)\n\nst.browser.quit()\n"
  },
  {
    "path": "tools/gate/selenium/prometheus-selenium.sh",
    "content": "#!/bin/bash\n\nset -xe\n\nexport CHROMEDRIVER=\"${CHROMEDRIVER:=\"/etc/selenium/chromedriver\"}\"\nexport ARTIFACTS_DIR=\"${ARTIFACTS_DIR:=\"/tmp/artifacts/\"}\"\n\nexport PROMETHEUS_USER=\"admin\"\nexport PROMETHEUS_PASSWORD=\"changeme\"\nexport PROMETHEUS_URI=\"prometheus.osh-infra.svc.cluster.local\"\n\npython3 tools/gate/selenium/prometheusSelenium.py\n"
  },
  {
    "path": "tools/gate/selenium/prometheusSelenium.py",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n#    http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nfrom seleniumtester import SeleniumTester\n\nst = SeleniumTester('Prometheus')\n\nusername = st.get_variable('PROMETHEUS_USER')\npassword = st.get_variable('PROMETHEUS_PASSWORD')\nprometheus_uri = st.get_variable('PROMETHEUS_URI')\nprometheus_url = 'http://{}:{}@{}'.format(username, password, prometheus_uri)\n\ntry:\n    st.logger.info('Attempting to connect to Prometheus')\n    st.browser.get(prometheus_url)\n    el = WebDriverWait(st.browser, 15).until(\n        EC.title_contains('Prometheus')\n    )\n    st.logger.info('Connected to Prometheus')\n    st.take_screenshot('Prometheus Dashboard')\nexcept TimeoutException:\n    st.logger.critical('Timed out waiting for Prometheus')\n    st.browser.quit()\n    sys.exit(1)\n\ntry:\n    st.logger.info('Attempting to view Runtime Information')\n    st.click_link_by_name('Status')\n    st.click_link_by_name('Runtime & Build Information')\n    el = WebDriverWait(st.browser, 15).until(\n        EC.presence_of_element_located((By.XPATH, '/html/body/div/table[1]'))\n    )\n    st.take_screenshot('Prometheus Runtime Info')\nexcept TimeoutException:\n    st.logger.error('Failed to load Runtime Information page')\n    st.browser.quit()\n    sys.exit(1)\n\ntry:\n    st.logger.info('Attempting to view Runtime Information')\n    st.click_link_by_name('Status')\n    st.click_link_by_name('Command-Line Flags')\n    el = WebDriverWait(st.browser, 15).until(\n        EC.presence_of_element_located((By.XPATH, '/html/body/div/table'))\n    )\n    st.take_screenshot('Prometheus Command Line Flags')\nexcept TimeoutException:\n    st.logger.error('Failed to load Command Line Flags page')\n    st.browser.quit()\n    sys.exit(1)\n\nst.browser.quit()\n"
  },
  {
    "path": "tools/gate/selenium/seleniumtester.py",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n#    http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport logging\nimport sys\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import ScreenshotException\n\nclass SeleniumTester():\n    def __init__(self, name):\n        self.logger = self.get_logger(name)\n        self.chrome_driver = self.get_variable('CHROMEDRIVER')\n        self.artifacts_dir = self.get_variable('ARTIFACTS_DIR')\n        self.initialize_artifiacts_dir()\n        self.browser = self.get_browser()\n\n    def get_logger(self, name):\n        logger = logging.getLogger('{} Selenium Tests'.format(name))\n        logger.setLevel(logging.DEBUG)\n        ch = logging.StreamHandler()\n        ch.setLevel(logging.DEBUG)\n        formatter = logging.Formatter(\n            '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n        )\n\n        # Set the formatter and add the handler\n        ch.setFormatter(formatter)\n        logger.addHandler(ch)\n        return logger\n\n    def get_variable(self, env_var):\n        if env_var in os.environ:\n            self.logger.info('Found \"{}\"'.format(env_var))\n            return os.environ[env_var]\n        else:\n            self.logger.critical(\n                'Variable \"{}\" is not defined!'.format(env_var)\n            )\n            sys.exit(1)\n\n    def get_browser(self):\n        options = Options()\n        options.add_argument('--headless')\n        options.add_argument('--no-sandbox')\n        options.add_argument('--window-size=1920x1080')\n        service = Service(executable_path=self.chrome_driver)\n        browser = webdriver.Chrome(service=service, options=options)\n        return browser\n\n    def initialize_artifiacts_dir(self):\n        if self.artifacts_dir and not os.path.exists(self.artifacts_dir):\n            os.makedirs(self.artifacts_dir)\n            self.logger.info(\n                'Created {} for test artifacts'.format(self.artifacts_dir)\n            )\n\n    def click_link_by_name(self, link_name):\n        try:\n            el = WebDriverWait(self.browser, 15).until(\n                EC.presence_of_element_located((By.LINK_TEXT, link_name))\n            )\n            self.logger.info(\"Clicking '{}' link\".format(link_name))\n            link = self.browser.find_element(By.LINK_TEXT, link_name)\n            link.click()\n        except (TimeoutException, NoSuchElementException):\n            self.logger.error(\"Failed clicking '{}' link\".format(link_name))\n            self.browser.quit()\n            sys.exit(1)\n\n    def take_screenshot(self, page_name):\n        file_name = page_name.replace(' ', '_')\n        try:\n            el = WebDriverWait(self.browser, 15)\n            self.browser.save_screenshot(\n                '{}{}.png'.format(self.artifacts_dir, file_name)\n            )\n            self.logger.info(\n                \"Successfully captured {} screenshot\".format(page_name)\n            )\n        except ScreenshotException:\n            self.logger.error(\n                \"Failed to capture {} screenshot\".format(page_name)\n                )\n            self.browser.quit()\n            sys.exit(1)\n"
  },
  {
    "path": "tools/gate/selenium/skyline-selenium.sh",
    "content": "#!/bin/bash\n\nset -xe\n\nexport CHROMEDRIVER=\"${CHROMEDRIVER:=\"/etc/selenium/chromedriver\"}\"\nexport ARTIFACTS_DIR=\"${ARTIFACTS_DIR:=\"/tmp/artifacts/\"}\"\n\nexport SKYLINE_USER=\"admin\"\nexport SKYLINE_PASSWORD=\"password\"\nexport SKYLINE_URI=\"skyline.openstack-helm.org\"\n\npython3 $(readlink -f $(dirname $0))/skylineSelenium.py\n"
  },
  {
    "path": "tools/gate/selenium/skylineSelenium.py",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n#    http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import NoSuchElementException\nfrom seleniumtester import SeleniumTester\nimport time\n\nst = SeleniumTester('Skiline')\n\nusername = st.get_variable('SKYLINE_USER')\npassword = st.get_variable('SKYLINE_PASSWORD')\nskyline_uri = st.get_variable('SKYLINE_URI')\nlogin_url = 'http://{0}/auth/login'.format(skyline_uri)\noverview_url = 'http://{0}/base/overview'.format(skyline_uri)\n\ntry:\n    st.logger.info('Attempting to connect to Skyline')\n    st.browser.get(login_url)\n    el = WebDriverWait(st.browser, 15).until(\n        EC.title_contains('Cloud')\n    )\n    st.logger.info('Connected to Skyline')\nexcept TimeoutException:\n    st.logger.critical('Timed out waiting to connect to Skyline')\n    st.browser.quit()\n    sys.exit(1)\n\ntime.sleep(5)\nst.logger.info(\"Attempting to log into Skyline dashboard\")\ntry:\n    print(f\"Cookies before login: {st.browser.get_cookies()}\")\n    st.browser.find_element(By.ID, 'normal_login_domain').send_keys(username)\n    st.browser.find_element(By.ID, 'normal_login_password').send_keys(password)\n    st.browser.find_element(By.CLASS_NAME, 'login-form-button').click()\n    st.logger.info(\"Submitted login form\")\n    time.sleep(5)\n    st.logger.info(f\"Current url: {st.browser.current_url}\")\n    for cookie in st.browser.get_cookies():\n        if cookie['name'] == 'session':\n            st.logger.info(f\"Session cookie: {cookie['name']} = {cookie['value']}\")\n            st.logger.info('Successfully logged in to Skyline')\nexcept NoSuchElementException:\n    st.logger.error(\"Failed to log in to Skyline\")\n    st.browser.quit()\n    sys.exit(1)\n\nst.browser.quit()\n"
  },
  {
    "path": "tox.ini",
    "content": "[tox]\nminversion = 3.1\nenvlist = docs\nskipsdist = True\nignore_basepython_conflict = True\n\n[testenv]\nbasepython = python3\nsetenv = VIRTUAL_ENV={envdir}\ndeps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}\npassenv = *_proxy,*_PROXY\n\n[testenv:venv]\ncommands = {posargs}\n\n[testenv:linters]\ndeps = pre-commit\nallowlist_externals = pre-commit\ncommands = pre-commit run --all-files --show-diff-on-failure {posargs}\n\n[testenv:docs]\ndeps =\n  -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}\n  -r{toxinidir}/doc/requirements.txt\ncommands =\n  rm -rf doc/build\n  make helm-docs\n  sphinx-build -W --keep-going -b html -j auto doc/source doc/build/html\nallowlist_externals =\n  make\n  rm\n\n; NOTE(kozhukalov): Temporarily disable the pdf generation because\n;                   it is broken after merging the openstack-helm-infra.\n;                   It is likely due to long lines.\n; [testenv:pdf-docs]\n; envdir = {toxworkdir}/docs\n; deps = {[testenv:docs]deps}\n; allowlist_externals =\n;   make\n;   rm\n; commands =\n;   rm -rf doc/build/pdf\n;   make helm-docs\n;   sphinx-build -W --keep-going -b latex -j auto doc/source doc/build/pdf\n;   make -C doc/build/pdf\n\n[testenv:releasenotes]\ndeps = -r{toxinidir}/releasenotes/requirements.txt\ncommands = sphinx-build -a -W -E -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html\n"
  },
  {
    "path": "trove/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Trove\nname: trove\nversion: 2025.2.0\nhome: https://docs.openstack.org/trove/latest/\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Trove/OpenStack_Project_Trove_vertical.png\nsources:\n  - https://opendev.org/openstack/trove\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "trove/templates/bin/_db-purge.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\n# Purge deleted instances older than 30 days\ntrove-manage --config-file /etc/trove/trove.conf db_purge --age_in_days 30\n"
  },
  {
    "path": "trove/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\ntrove-manage --config-file /etc/trove/trove.conf db_sync\n"
  },
  {
    "path": "trove/templates/bin/_trove-api.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec trove-api \\\n  --config-file /etc/trove/trove.conf\n"
  },
  {
    "path": "trove/templates/bin/_trove-conductor.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec trove-conductor \\\n  --config-file /etc/trove/trove.conf\n"
  },
  {
    "path": "trove/templates/bin/_trove-taskmanager.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nexec trove-taskmanager \\\n  --config-file /etc/trove/trove.conf\n"
  },
  {
    "path": "trove/templates/certificates.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.certificates }}\n{{- $envAll := . }}\n{{- $endpoint := \"database\" }}\n{{- range $key1, $cert := tuple \"public\" \"internal\" }}\n{{- $endpointScheme := tuple $endpoint \"service\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" }}\n{{- if eq $endpointScheme \"https\" }}\n{{- $certName := index $envAll.Values.secrets.tls $endpoint \"api\" $cert }}\n{{- $endpointHost := index $envAll.Values.endpoints $endpoint \"host_fqdn_override\" $cert \"host\" }}\n{{- $endpointClusterHostname := tuple $endpoint $cert $envAll | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\n{{- $endpointHostname := $endpointClusterHostname }}\n{{- if $endpointHost }}\n{{- $endpointHostname = $endpointHost }}\n{{- end }}\n---\napiVersion: cert-manager.io/v1\nkind: Certificate\nmetadata:\n  name: {{ $certName }}\nspec:\n  secretName: {{ $certName }}\n  issuerRef:\n    name: {{ index $envAll.Values.endpoints $endpoint \"host_fqdn_override\" $cert \"tls\" \"issuerRef\" \"name\" }}\n    kind: {{ index $envAll.Values.endpoints $endpoint \"host_fqdn_override\" $cert \"tls\" \"issuerRef\" \"kind\" }}\n  commonName: {{ $endpointHostname }}\n  dnsNames:\n    - {{ $endpointHostname }}\n    - {{ $endpointClusterHostname }}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n{{- $rallyTests := .Values.conf.rally_tests }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: trove-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n  rally-test.sh: |\n{{ tuple $rallyTests | include \"helm-toolkit.scripts.rally_test\" | indent 4 }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  db-purge.sh: |\n{{ tuple \"bin/_db-purge.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  trove-api.sh: |\n{{ tuple \"bin/_trove-api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  trove-conductor.sh: |\n{{ tuple \"bin/_trove-conductor.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  trove-taskmanager.sh: |\n{{ tuple \"bin/_trove-taskmanager.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.trove.database.connection)) (empty .Values.conf.trove.database.connection) }}\n{{- $_ := tuple \"oslo_db\" \"internal\" \"trove\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\"| set .Values.conf.trove.database \"connection\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.trove.DEFAULT.transport_url }}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"trove\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_transport_endpoint_uri_lookup\" | set .Values.conf.trove.DEFAULT \"transport_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.trove.DEFAULT.trove_auth_url }}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.trove.DEFAULT \"trove_auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.trove.DEFAULT.nova_compute_url }}\n{{- $_ := tuple \"compute\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.trove.DEFAULT \"nova_compute_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.trove.DEFAULT.neutron_url }}\n{{- $_ := tuple \"network\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.trove.DEFAULT \"neutron_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.trove.DEFAULT.cinder_url }}\n{{- $_ := tuple \"volumev3\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.trove.DEFAULT \"cinder_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.trove.DEFAULT.glance_url }}\n{{- $_ := tuple \"image\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | set .Values.conf.trove.DEFAULT \"glance_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.trove.keystone_authtoken.auth_uri }}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.trove.keystone_authtoken \"auth_uri\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.trove.keystone_authtoken.auth_url }}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.trove.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.trove.keystone_authtoken.region_name }}\n{{- $_ := set .Values.conf.trove.keystone_authtoken \"region_name\" .Values.endpoints.identity.auth.trove.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.trove.keystone_authtoken.project_name }}\n{{- $_ := set .Values.conf.trove.keystone_authtoken \"project_name\" .Values.endpoints.identity.auth.trove.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.trove.keystone_authtoken.project_domain_name }}\n{{- $_ := set .Values.conf.trove.keystone_authtoken \"project_domain_name\" .Values.endpoints.identity.auth.trove.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.trove.keystone_authtoken.user_domain_name }}\n{{- $_ := set .Values.conf.trove.keystone_authtoken \"user_domain_name\" .Values.endpoints.identity.auth.trove.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.trove.keystone_authtoken.username }}\n{{- $_ := set .Values.conf.trove.keystone_authtoken \"username\" .Values.endpoints.identity.auth.trove.username -}}\n{{- end -}}\n{{- if empty .Values.conf.trove.keystone_authtoken.password }}\n{{- $_ := set .Values.conf.trove.keystone_authtoken \"password\" .Values.endpoints.identity.auth.trove.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.trove.keystone_authtoken.memcached_servers }}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.trove.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n{{- if empty .Values.conf.trove.keystone_authtoken.memcache_secret_key }}\n{{- $_ := set .Values.conf.trove.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if empty .Values.conf.trove.service_credentials.auth_url }}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.trove.service_credentials \"auth_url\" -}}\n{{- end -}}\n{{- if empty .Values.conf.trove.service_credentials.region_name }}\n{{- $_ := set .Values.conf.trove.service_credentials \"region_name\" .Values.endpoints.identity.auth.trove.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.trove.service_credentials.project_name }}\n{{- $_ := set .Values.conf.trove.service_credentials \"project_name\" .Values.endpoints.identity.auth.trove.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.trove.service_credentials.project_domain_name }}\n{{- $_ := set .Values.conf.trove.service_credentials \"project_domain_name\" .Values.endpoints.identity.auth.trove.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.trove.service_credentials.user_domain_name }}\n{{- $_ := set .Values.conf.trove.service_credentials \"user_domain_name\" .Values.endpoints.identity.auth.trove.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.trove.service_credentials.username }}\n{{- $_ := set .Values.conf.trove.service_credentials \"username\" .Values.endpoints.identity.auth.trove.username -}}\n{{- end -}}\n{{- if empty .Values.conf.trove.service_credentials.password }}\n{{- $_ := set .Values.conf.trove.service_credentials \"password\" .Values.endpoints.identity.auth.trove.password -}}\n{{- end -}}\n\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: trove-etc\ntype: Opaque\ndata:\n  trove.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.trove | b64enc }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.logging | b64enc }}\n  api-paste.ini: {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.paste | b64enc }}\n  policy.yaml: {{ toYaml .Values.conf.policy | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/cron-job-trove-db-purge.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.cron_job_db_purge }}\n{{- $envAll := . }}\n\n{{- $mounts_trove_db_purge := .Values.pod.mounts.trove_db_purge.trove_db_purge }}\n{{- $mounts_trove_db_purge_init := .Values.pod.mounts.trove_db_purge.init_container }}\n\n{{- $serviceAccountName := \"trove-db-purge\" }}\n{{ tuple $envAll \"db_purge\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: trove-db-purge\nspec:\n  schedule: {{ .Values.jobs.db_purge.cron | quote }}\n  successfulJobsHistoryLimit: {{ .Values.jobs.db_purge.history.success }}\n  failedJobsHistoryLimit: {{ .Values.jobs.db_purge.history.failed }}\n  startingDeadlineSeconds: {{ .Values.jobs.db_purge.starting_deadline }}\n  concurrencyPolicy: Forbid\n  jobTemplate:\n    metadata:\n      labels:\n{{ tuple $envAll \"trove\" \"db-purge\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n    spec:\n      template:\n        metadata:\n          labels:\n{{ tuple $envAll \"trove\" \"db-purge\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n        spec:\n{{ tuple \"trove_db_purge\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 10 }}\n{{ tuple \"trove_db_purge\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 10 }}\n          serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"db_purge\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 10 }}\n          restartPolicy: OnFailure\n{{ if $envAll.Values.pod.tolerations.trove.enabled }}\n{{ tuple $envAll \"trove\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 10 }}\n{{ end }}\n          nodeSelector:\n            {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}\n          initContainers:\n{{ tuple $envAll \"db_purge\" $mounts_trove_db_purge_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 12 }}\n          containers:\n            - name: trove-db-purge\n{{ tuple $envAll \"trove_db_purge\" | include \"helm-toolkit.snippets.image\" | indent 14 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.db_purge | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 14 }}\n{{ dict \"envAll\" $envAll \"application\" \"db_purge\" \"container\" \"trove_db_purge\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 14 }}\n              command:\n                - /tmp/db-purge.sh\n              env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.trove }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 16 }}\n{{- end }}\n              volumeMounts:\n                - name: pod-tmp\n                  mountPath: /tmp\n                - name: trove-bin\n                  mountPath: /tmp/db-purge.sh\n                  subPath: db-purge.sh\n                  readOnly: true\n                - name: trove-etc\n                  mountPath: /etc/trove/trove.conf\n                  subPath: trove.conf\n                  readOnly: true\n                - name: trove-etc\n                  mountPath: {{ .Values.conf.trove.DEFAULT.log_config_append }}\n                  subPath: {{ base .Values.conf.trove.DEFAULT.log_config_append }}\n                  readOnly: true\n{{ if $mounts_trove_db_purge.volumeMounts }}{{ toYaml $mounts_trove_db_purge.volumeMounts | indent 16 }}{{ end }}\n          volumes:\n            - name: pod-tmp\n              emptyDir: {}\n            - name: trove-bin\n              configMap:\n                name: trove-bin\n                defaultMode: 0555\n            - name: trove-etc\n              secret:\n                secretName: trove-etc\n                defaultMode: 0444\n{{ if $mounts_trove_db_purge.volumes }}{{ toYaml $mounts_trove_db_purge.volumes | indent 12 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/deployment-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"troveApiLivenessProbeTemplate\" }}\nhttpGet:\n  scheme: {{ tuple \"database\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n  path: /\n  port: {{ tuple \"database\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- define \"troveApiReadinessProbeTemplate\" }}\nhttpGet:\n  scheme: {{ tuple \"database\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n  path: /\n  port: {{ tuple \"database\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- if .Values.manifests.deployment_api }}\n{{- $envAll := . }}\n\n{{- $mounts_trove_api := .Values.pod.mounts.trove_api.trove_api }}\n{{- $mounts_trove_api_init := .Values.pod.mounts.trove_api.init_container }}\n\n{{- $serviceAccountName := \"trove-api\" }}\n{{ tuple $envAll \"api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: trove-api\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"trove\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"trove\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"trove\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"trove_api\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"trove-api\" \"containerNames\" (list \"trove-api\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"trove_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"trove_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"trove_api\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"trove\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n{{ if $envAll.Values.pod.tolerations.trove.enabled }}\n{{ tuple $envAll \"trove\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.api.node_selector_key }}: {{ .Values.labels.api.node_selector_value }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.api.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"api\" $mounts_trove_api_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: trove-api\n{{ tuple $envAll \"trove_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"trove_api\" \"container\" \"trove_api\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.trove }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n          command:\n            - /tmp/trove-api.sh\n            - start\n          ports:\n            - name: t-api\n              containerPort: {{ tuple \"database\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{ dict \"envAll\" $envAll  \"component\" \"api\" \"container\" \"trove-api\" \"type\" \"liveness\" \"probeTemplate\" (include \"troveApiLivenessProbeTemplate\" $envAll  | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll  \"component\" \"api\" \"container\" \"trove-api\" \"type\" \"readiness\" \"probeTemplate\" (include \"troveApiReadinessProbeTemplate\" $envAll  | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.trove.oslo_concurrency.lock_path }}\n            - name: trove-bin\n              mountPath: /tmp/trove-api.sh\n              subPath: trove-api.sh\n              readOnly: true\n            - name: trove-etc\n              mountPath: /etc/trove/trove.conf\n              subPath: trove.conf\n              readOnly: true\n            - name: trove-etc\n              mountPath: {{ .Values.conf.trove.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.trove.DEFAULT.log_config_append }}\n              readOnly: true\n            - name: trove-etc\n              mountPath: /etc/trove/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: trove-etc\n              mountPath: /etc/trove/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n{{ if $mounts_trove_api.volumeMounts }}{{ toYaml $mounts_trove_api.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: trove-bin\n          configMap:\n            name: trove-bin\n            defaultMode: 0555\n        - name: trove-etc\n          secret:\n            secretName: trove-etc\n            defaultMode: 0444\n{{ if $mounts_trove_api.volumes }}{{ toYaml $mounts_trove_api.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/deployment-conductor.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_conductor }}\n{{- $envAll := . }}\n\n{{- $mounts_trove_conductor := .Values.pod.mounts.trove_conductor.trove_conductor }}\n{{- $mounts_trove_conductor_init := .Values.pod.mounts.trove_conductor.init_container }}\n\n{{- $serviceAccountName := \"trove-conductor\" }}\n{{ tuple $envAll \"conductor\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: trove-conductor\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"trove\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.conductor }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"trove\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"trove\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"trove_conductor\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"trove-conductor\" \"containerNames\" (list \"trove-conductor\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"trove_conductor\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"trove_conductor\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"trove_conductor\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"trove\" \"conductor\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n{{ if $envAll.Values.pod.tolerations.trove.enabled }}\n{{ tuple $envAll \"trove\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.conductor.node_selector_key }}: {{ .Values.labels.conductor.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"conductor\" $mounts_trove_conductor_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: trove-conductor\n{{ tuple $envAll \"trove_conductor\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.conductor | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"trove_conductor\" \"container\" \"trove_conductor\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.trove }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n          command:\n            - /tmp/trove-conductor.sh\n            - start\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.trove.oslo_concurrency.lock_path }}\n            - name: trove-bin\n              mountPath: /tmp/trove-conductor.sh\n              subPath: trove-conductor.sh\n              readOnly: true\n            - name: trove-etc\n              mountPath: /etc/trove/trove.conf\n              subPath: trove.conf\n              readOnly: true\n            - name: trove-etc\n              mountPath: {{ .Values.conf.trove.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.trove.DEFAULT.log_config_append }}\n              readOnly: true\n            - name: trove-etc\n              mountPath: /etc/trove/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n{{ if $mounts_trove_conductor.volumeMounts }}{{ toYaml $mounts_trove_conductor.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: trove-bin\n          configMap:\n            name: trove-bin\n            defaultMode: 0555\n        - name: trove-etc\n          secret:\n            secretName: trove-etc\n            defaultMode: 0444\n{{ if $mounts_trove_conductor.volumes }}{{ toYaml $mounts_trove_conductor.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/deployment-taskmanager.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_taskmanager }}\n{{- $envAll := . }}\n\n{{- $mounts_trove_taskmanager := .Values.pod.mounts.trove_taskmanager.trove_taskmanager }}\n{{- $mounts_trove_taskmanager_init := .Values.pod.mounts.trove_taskmanager.init_container }}\n\n{{- $serviceAccountName := \"trove-taskmanager\" }}\n{{ tuple $envAll \"taskmanager\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: trove-taskmanager\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"trove\" \"taskmanager\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.taskmanager }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"trove\" \"taskmanager\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"trove\" \"taskmanager\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"trove_taskmanager\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"trove-taskmanager\" \"containerNames\" (list \"trove-taskmanager\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"trove_taskmanager\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"trove_taskmanager\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"trove_taskmanager\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"trove\" \"taskmanager\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n{{ if $envAll.Values.pod.tolerations.trove.enabled }}\n{{ tuple $envAll \"trove\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      nodeSelector:\n        {{ .Values.labels.taskmanager.node_selector_key }}: {{ .Values.labels.taskmanager.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"taskmanager\" $mounts_trove_taskmanager_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: trove-taskmanager\n{{ tuple $envAll \"trove_taskmanager\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.taskmanager | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"trove_taskmanager\" \"container\" \"trove_taskmanager\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n          env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.trove }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 12 }}\n{{- end }}\n          command:\n            - /tmp/trove-taskmanager.sh\n            - start\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.trove.oslo_concurrency.lock_path }}\n            - name: trove-bin\n              mountPath: /tmp/trove-taskmanager.sh\n              subPath: trove-taskmanager.sh\n              readOnly: true\n            - name: trove-etc\n              mountPath: /etc/trove/trove.conf\n              subPath: trove.conf\n              readOnly: true\n            - name: trove-etc\n              mountPath: {{ .Values.conf.trove.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.trove.DEFAULT.log_config_append }}\n              readOnly: true\n            - name: trove-etc\n              mountPath: /etc/trove/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n{{ if $mounts_trove_taskmanager.volumeMounts }}{{ toYaml $mounts_trove_taskmanager.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: trove-bin\n          configMap:\n            name: trove-bin\n            defaultMode: 0555\n        - name: trove-etc\n          secret:\n            secretName: trove-etc\n            defaultMode: 0444\n{{ if $mounts_trove_taskmanager.volumes }}{{ toYaml $mounts_trove_taskmanager.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "trove/templates/ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_api .Values.network.api.ingress.public }}\n{{- $envAll := . }}\n{{- $ingressOpts := dict \"envAll\" . \"backendService\" \"api\" \"backendServiceType\" \"database\" \"backendPort\" \"t-api\" -}}\n{{- if .Values.network.api.ingress.classes -}}\n{{- $_ := set $ingressOpts \"ingressClassName\" ( index .Values.network.api.ingress.classes .Release.Namespace | default ( index .Values.network.api.ingress.classes \"cluster\" ) ) -}}\n{{- end -}}\n{{- $secretName := index $envAll.Values.secrets.tls.database.api ( $ingressOpts.backendService | replace \"-\" \"_\" ) -}}\n{{- if and .Values.manifests.certificates $secretName -}}\n{{- $_ := set $ingressOpts \"certIssuer\" .Values.endpoints.database.host_fqdn_override.public.tls.issuerRef.name -}}\n{{- end -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" \"trove\" -}}\n{{- if .Values.pod.tolerations.trove.enabled -}}\n{{- $_ := set $dbDropJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"trove\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbInitJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbInitJob \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.trove.enabled -}}\n{{- $_ := set $dbInitJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"trove\" \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) -}}\n{{- $_ := set $dbSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.trove.enabled -}}\n{{- $_ := set $dbSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"trove\" -}}\n{{- if .Values.pod.tolerations.trove.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_endpoints\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksEndpointsJob := dict \"envAll\" . \"serviceName\" \"trove\" \"serviceTypes\" ( tuple \"database\" ) -}}\n{{- $_ := set $ksEndpointsJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_endpoints\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.trove.enabled -}}\n{{- $_ := set $ksEndpointsJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksEndpointsJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_service\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-2\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"trove\" \"serviceTypes\" ( tuple \"database\" ) -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_service\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.trove.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"trove\" -}}\n{{- $_ := set $ksUserJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.trove.enabled -}}\n{{- $_ := set $ksUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/job-rabbit-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.rabbit_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rabbitInitJob := dict \"envAll\" . \"serviceName\" \"trove\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $rabbitInitJob \"tlsSecret\" .Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $rabbitInitJob \"jobAnnotations\" (include \"metadata.annotations.job.rabbit_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.trove.enabled -}}\n{{- $_ := set $rabbitInitJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $rabbitInitJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/network_policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.network_policy -}}\n{{- $envAll := . }}\n---\napiVersion: networking.k8s.io/v1\nkind: NetworkPolicy\nmetadata:\n  name: trove-default\nspec:\n  podSelector:\n    matchLabels:\n{{ tuple $envAll \"trove\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  policyTypes:\n    - Ingress\n    - Egress\n  ingress:\n    - from:\n      - podSelector:\n          matchLabels:\n{{ tuple $envAll \"trove\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 12 }}\n      ports:\n{{ tuple \"database\" \"service\" \"api\" . | include \"helm-toolkit.manifests.network_policy_list\" | indent 8 }}\n  egress:\n    - to:\n      - namespaceSelector:\n          matchLabels:\n            name: kube-system\n    - to:\n      - namespaceSelector:\n          matchLabels:\n            name: {{ .Release.Namespace }}\n      ports:\n{{ tuple \"oslo_db\" \"internal\" \"mysql\" . | include \"helm-toolkit.manifests.network_policy_list\" | indent 8 }}\n{{ tuple \"oslo_messaging\" \"internal\" \"amqp\" . | include \"helm-toolkit.manifests.network_policy_list\" | indent 8 }}\n{{ tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.manifests.network_policy_list\" | indent 8 }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/pdb-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_api }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: trove-api\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.api.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"trove\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/pod-rally-test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pod_rally_test }}\n{{- $envAll := . }}\n\n{{- $mounts_trove_tests := .Values.pod.mounts.trove_tests.trove_tests }}\n{{- $mounts_trove_tests_init := .Values.pod.mounts.trove_tests.init_container }}\n\n{{- $serviceAccountName := \"trove-tests\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: \"trove-tests\"\n  labels:\n{{ tuple $envAll \"trove\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": \"test\"\n    \"helm.sh/hook-weight\": \"10\"\nspec:\n{{ tuple \"trove_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 2 }}\n{{ tuple \"trove_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 2 }}\n  restartPolicy: Never\n  serviceAccountName: {{ $serviceAccountName }}\n{{ if $envAll.Values.pod.tolerations.trove.enabled }}\n{{ tuple $envAll \"trove\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 2 }}\n{{ end }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n  initContainers:\n{{ tuple $envAll \"tests\" $mounts_trove_tests_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n  containers:\n    - name: trove-tests\n{{ tuple $envAll \"test\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: RALLY_ENV_NAME\n          value: {{.Release.Name | quote }}\n      command:\n        - /tmp/rally-test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: trove-bin\n          mountPath: /tmp/rally-test.sh\n          subPath: rally-test.sh\n          readOnly: true\n        - name: rally-db\n          mountPath: /var/lib/rally\n{{ if $mounts_trove_tests.volumeMounts }}{{ toYaml $mounts_trove_tests.volumeMounts | indent 8 }}{{ end }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: trove-bin\n      configMap:\n        name: trove-bin\n        defaultMode: 0555\n    - name: rally-db\n      emptyDir: {}\n{{ if $mounts_trove_tests.volumes }}{{ toYaml $mounts_trove_tests.volumes | indent 4 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"trove\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n{{- $userClassData := index $envAll.Values.endpoints.oslo_db.auth $userClass }}\n{{- if $userClassData.username }}\n  DB_CONNECTION: {{ tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{ include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"database\" ) }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"trove\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n{{- $identityClass := index $envAll.Values.endpoints.identity.auth $userClass }}\n{{- if $identityClass.username }}\n  OS_AUTH_URL: {{ tuple \"identity\" \"internal\" \"api\" $envAll | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | b64enc }}\n  OS_REGION_NAME: {{ $identityClass.region_name | b64enc }}\n  OS_INTERFACE: {{ $identityClass.interface | default \"internal\" | b64enc }}\n  OS_ENDPOINT_TYPE: {{ $identityClass.interface | default \"internal\" | b64enc }}\n  OS_PROJECT_DOMAIN_NAME: {{ $identityClass.project_domain_name | b64enc }}\n  OS_PROJECT_NAME: {{ $identityClass.project_name | b64enc }}\n  OS_USER_DOMAIN_NAME: {{ $identityClass.user_domain_name | b64enc }}\n  OS_USERNAME: {{ $identityClass.username | b64enc }}\n  OS_PASSWORD: {{ $identityClass.password | b64enc }}\n  OS_DEFAULT_DOMAIN: {{ $identityClass.default_domain_id | default \"default\" | b64enc }}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- $rabbitmqProtocol := \"http\" }}\n{{- if $envAll.Values.manifests.certificates }}\n{{- $rabbitmqProtocol = \"https\" }}\n{{- end }}\n{{- range $key1, $userClass := tuple \"admin\" \"trove\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_messaging\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass $rabbitmqProtocol $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_registry }}\n{{- $envAll := . }}\n{{- if .Values.endpoints.oci_image_registry.auth.enabled }}\n{{- $secretName := .Values.secrets.oci_image_registry.trove }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: kubernetes.io/dockerconfigjson\ndata:\n  .dockerconfigjson: {{ include \"helm-toolkit.utils.imagePullSecret\" ( dict \"images\" ( list .Values.endpoints.oci_image_registry ) \"secret\" ( dict \"name\" $secretName \"namespace\" $envAll.Release.Namespace ) ) }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/service-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_api }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"database\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: t-api\n      port: {{ tuple \"database\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.api.node_port.enabled }}\n      nodePort: {{ .Values.network.api.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"trove\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.api.node_port.enabled }}\n  type: NodePort\n  {{ if .Values.network.api.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{ end }}\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "trove/templates/service-ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_ingress_api }}\n{{- $envAll := . }}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendServiceType\" \"database\" \"backendPort\" \"t-api\" -}}\n{{- $secretName := $envAll.Values.secrets.tls.database.api.internal -}}\n{{- if and .Values.manifests.certificates $secretName -}}\n{{- $_ := set $ingressOpts \"certIssuer\" .Values.endpoints.database.host_fqdn_override.default.tls.issuerRef.name -}}\n{{- end -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "trove/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for trove.\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  conductor:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  taskmanager:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nrelease_group: null\n\nimages:\n  tags:\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    db_init: quay.io/airshipit/heat:2025.1-ubuntu_noble\n    trove_db_sync: quay.io/airshipit/trove:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/heat:2025.1-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ks_user: quay.io/airshipit/heat:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/heat:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/heat:2025.1-ubuntu_noble\n    trove_api: quay.io/airshipit/trove:2025.1-ubuntu_noble\n    trove_conductor: quay.io/airshipit/trove:2025.1-ubuntu_noble\n    trove_taskmanager: quay.io/airshipit/trove:2025.1-ubuntu_noble\n    trove_db_purge: quay.io/airshipit/trove:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: docker.io/docker:17.07.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\njobs:\n  db_purge:\n    cron: \"0 0 * * *\"\n    starting_deadline: 600\n    history:\n      success: 3\n      failed: 1\n\npod:\n  probes:\n    api:\n      trove-api:\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 5\n            periodSeconds: 10\n            timeoutSeconds: 1\n            failureThreshold: 3\n            successThreshold: 1\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 30\n  security_context:\n    db_purge:\n      pod:\n        runAsUser: 42424\n      container:\n        trove_db_purge:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    trove_api:\n      pod:\n        runAsUser: 42424\n      container:\n        trove_api:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    trove_conductor:\n      pod:\n        runAsUser: 42424\n      container:\n        trove_conductor:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n    trove_taskmanager:\n      pod:\n        runAsUser: 42424\n      container:\n        trove_taskmanager:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    trove:\n      enabled: false\n      tolerations:\n      - key: node-role.kubernetes.io/master\n        operator: Exists\n        effect: NoSchedule\n      - key: node-role.kubernetes.io/control-plane\n        operator: Exists\n        effect: NoSchedule\n  mounts:\n    trove_api:\n      init_container: null\n      trove_api:\n        volumeMounts:\n        volumes:\n    trove_conductor:\n      init_container: null\n      trove_conductor:\n        volumeMounts:\n        volumes:\n    trove_taskmanager:\n      init_container: null\n      trove_taskmanager:\n        volumeMounts:\n        volumes:\n    trove_db_purge:\n      init_container: null\n      trove_db_purge:\n        volumeMounts:\n        volumes:\n    trove_tests:\n      init_container: null\n      trove_tests:\n        volumeMounts:\n        volumes:\n    trove_db_sync:\n      trove_db_sync:\n        volumeMounts:\n        volumes:\n  replicas:\n    api: 1\n    conductor: 1\n    taskmanager: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    disruption_budget:\n      api:\n        min_available: 0\n    termination_grace_period:\n      api:\n        timeout: 30\n  resources:\n    enabled: false\n    api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    conductor:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    taskmanager:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rabbit_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_purge:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30878\n\nconf:\n  paste:\n    composite:trove:\n      use: call:trove.common.wsgi:versioned_urlmap\n      /: versions\n      /v1.0: troveapi\n    app:versions:\n      paste.app_factory: trove.versions:app_factory\n    pipeline:troveapi:\n      pipeline: http_proxy_to_wsgi faultwrapper authtoken authorization contextwrapper ratelimit extensions troveapp\n    filter:extensions:\n      paste.filter_factory: trove.common.extensions:factory\n    filter:authtoken:\n      paste.filter_factory: keystonemiddleware.auth_token:filter_factory\n    filter:authorization:\n      paste.filter_factory: trove.common.auth:AuthorizationMiddleware.factory\n    filter:contextwrapper:\n      paste.filter_factory: trove.common.wsgi:ContextMiddleware.factory\n    filter:faultwrapper:\n      paste.filter_factory: trove.common.wsgi:FaultWrapper.factory\n    filter:ratelimit:\n      paste.filter_factory: trove.common.limits:RateLimitingMiddleware.factory\n    filter:http_proxy_to_wsgi:\n      paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory\n    app:troveapp:\n      paste.app_factory: trove.common.api:app_factory\n  policy: {}\n  trove:\n    DEFAULT:\n      log_config_append: /etc/trove/logging.conf\n      trove_api_workers: 4\n      transport_url: null\n      # control_exchange: trove\n      # ip_regex: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$\n      # black_list_regex: ^10\\.0\\.0\\.[0-9]+$\n      default_datastore: mysql\n      datastore_registry_ext: mysql:trove.guestagent.datastore.mysql.manager.Manager\n      trove_conductor_workers: 1\n      notification_service_id: mysql:2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b\n      os_region_name: RegionOne\n      # nova_compute_url: \"\"\n      # neutron_url: \"\"\n      # cinder_url: \"\"\n      # swift_url: \"\"\n      # glance_url: \"\"\n      # heat_url: \"\"\n      trove_volume_support: true\n      network_isolation: false\n    database:\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    keystone_authtoken:\n      auth_type: password\n      auth_version: v3\n      memcache_security_strategy: ENCRYPT\n    service_credentials:\n      auth_url: null\n      region_name: RegionOne\n      interface: internal\n      auth_type: password\n    oslo_messaging_notifications:\n      driver: messagingv2\n    oslo_messaging_rabbit:\n      rabbit_ha_queues: true\n    oslo_concurrency:\n      lock_path: /var/lock\n    oslo_policy:\n      policy_file: /etc/trove/policy.yaml\n  logging:\n    loggers:\n      keys:\n        - root\n        - trove\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: DEBUG\n      handlers:\n        - stdout\n    logger_trove:\n      level: DEBUG\n      handlers:\n        - stdout\n      qualname: trove\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    formatter_default:\n      format: \"%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n  rally_tests:\n    run_tempest: false\n    tests:\n      TroveInstances.create_and_delete_instance:\n        - runner:\n            type: constant\n            times: 1\n            concurrency: 1\n          context:\n            users:\n              tenants: 1\n              users_per_tenant: 1\n          args:\n            flavor_name: \"m1.tiny\"\n            volume_size: 1\n            databases:\n              - name: \"testdb\"\n            users:\n              - name: \"testuser\"\n                password: \"testpass\"\n                databases:\n                  - \"testdb\"\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - trove-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    api:\n      jobs:\n        - trove-db-sync\n        - trove-ks-user\n        - trove-ks-endpoints\n        - trove-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_messaging\n    conductor:\n      jobs:\n        - trove-db-sync\n        - trove-ks-user\n        - trove-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_messaging\n    taskmanager:\n      jobs:\n        - trove-db-sync\n        - trove-ks-user\n        - trove-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: oslo_messaging\n    db_drop:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - trove-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_endpoints:\n      jobs:\n        - trove-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    rabbit_init:\n      services:\n        - endpoint: internal\n          service: oslo_messaging\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n    tests:\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: database\n\n# Names of secrets used by bootstrap and environmental checks\nsecrets:\n  identity:\n    admin: trove-keystone-admin\n    trove: trove-keystone-user\n  oslo_db:\n    admin: trove-db-admin\n    trove: trove-db-user\n  oslo_messaging:\n    admin: trove-rabbitmq-admin\n    trove: trove-rabbitmq-user\n  tls:\n    database:\n      api:\n        public: trove-tls-public\n        internal: trove-tls-api\n  oci_image_registry:\n    trove: trove-oci-image-registry-key\n\n# typically overridden by environmental\n# values, but should include all endpoints\n# required by this chart\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      trove:\n        username: trove\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      trove:\n        role: admin\n        region_name: RegionOne\n        username: trove\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      nova:\n        role: admin,service\n        region_name: RegionOne\n        username: nova\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      neutron:\n        role: admin,service\n        region_name: RegionOne\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n        username: neutron\n        password: password\n      cinder:\n        role: admin,service\n        region_name: RegionOne\n        username: cinder\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      glance:\n        role: admin,service\n        region_name: RegionOne\n        username: glance\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  database:\n    name: trove\n    hosts:\n      default: trove-api\n      public: trove\n    host_fqdn_override:\n      default: null\n      # NOTE: this chart supports TLS for fqdn over-ridden public\n      # endpoints using the following format:\n      # public:\n      #   host: null\n      #   tls:\n      #     crt: null\n      #     key: null\n    path:\n      default: /v1.0/%(tenant_id)s\n    scheme:\n      default: http\n    port:\n      api:\n        default: 8779\n        public: 80\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n        secret:\n          tls:\n            internal: mariadb-tls-direct\n      trove:\n        username: trove\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /trove\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_messaging:\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n        secret:\n          tls:\n            internal: rabbitmq-tls-direct\n      trove:\n        username: trove\n        password: password\n    statefulset:\n      replicas: 2\n      name: rabbitmq-rabbitmq\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /trove\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n  oslo_cache:\n    auth:\n      # NOTE: this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  fluentd:\n    namespace: null\n    name: fluentd\n    hosts:\n      default: fluentd-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: 'http'\n    port:\n      service:\n        default: 24224\n      metrics:\n        default: 24220\n  compute:\n    name: nova\n    hosts:\n      default: nova-api\n      public: nova\n    host_fqdn_override:\n      default: null\n      # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public\n      # endpoints using the following format:\n      # public:\n      #   host: null\n      #   tls:\n      #     crt: null\n      #     key: null\n    path:\n      default: \"/v2.1/\"\n    scheme:\n      default: 'http'\n      service: 'http'\n    port:\n      api:\n        default: 8774\n        public: 80\n        service: 8774\n      novncproxy:\n        default: 6080\n  network:\n    name: neutron\n    hosts:\n      default: neutron-server\n      public: neutron\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 9696\n        public: 80\n  volumev3:\n    name: cinderv3\n    hosts:\n      default: cinder-api\n      public: cinder\n    host_fqdn_override:\n      default: null\n    path:\n      default: '/v3'\n      healthcheck: /healthcheck\n    scheme:\n      default: http\n    port:\n      api:\n        default: 8776\n        public: 80\n  image:\n    name: glance\n    hosts:\n      default: glance-api\n      public: glance\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme:\n      default: http\n    port:\n      api:\n        default: 9292\n        public: 80\n\nmanifests:\n  certificates: false\n  configmap_bin: true\n  configmap_etc: true\n  cron_job_db_purge: true\n  deployment_api: true\n  deployment_conductor: true\n  deployment_taskmanager: true\n  ingress_api: true\n  job_bootstrap: false\n  job_db_init: true\n  job_db_sync: true\n  job_db_drop: false\n  job_image_repo_sync: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  job_rabbit_init: true\n  network_policy: false\n  pdb_api: true\n  pod_rally_test: true\n  secret_db: true\n  secret_ingress_tls: true\n  secret_keystone: true\n  secret_rabbitmq: true\n  secret_registry: true\n  service_api: true\n  service_ingress_api: true\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "values_overrides/aodh/2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    aodh_db_sync: quay.io/airshipit/aodh:2025.1-ubuntu_noble\n    aodh_api: quay.io/airshipit/aodh:2025.1-ubuntu_noble\n    aodh_evaluator: quay.io/airshipit/aodh:2025.1-ubuntu_noble\n    aodh_listener: quay.io/airshipit/aodh:2025.1-ubuntu_noble\n    aodh_notifier: quay.io/airshipit/aodh:2025.1-ubuntu_noble\n    aodh_alarms_cleaner: quay.io/airshipit/aodh:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/aodh/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    aodh_db_sync: quay.io/airshipit/aodh:2025.2-ubuntu_noble\n    aodh_api: quay.io/airshipit/aodh:2025.2-ubuntu_noble\n    aodh_evaluator: quay.io/airshipit/aodh:2025.2-ubuntu_noble\n    aodh_listener: quay.io/airshipit/aodh:2025.2-ubuntu_noble\n    aodh_notifier: quay.io/airshipit/aodh:2025.2-ubuntu_noble\n    aodh_alarms_cleaner: quay.io/airshipit/aodh:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/aodh/annotations.yaml",
    "content": "---\nannotations:\n  pod:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    aodh_api:\n      another.tld/foo: \"bar\"\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      aodh:\n        custom.tld/key: \"value\"\n    tls:\n      alarming_api_public:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/aodh/gateway.yaml",
    "content": "# Gateway API overrides for Aodh.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  alarming:\n    host_fqdn_override:\n      public:\n        host: aodh.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n  service_ingress_api: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: aodh-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.alarming.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: aodh-api\n              port: 8042\n...\n"
  },
  {
    "path": "values_overrides/aodh/mariadb-operator.yaml",
    "content": "---\nconf:\n  aodh:\n    database:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  aodh_api:\n    - aodh-db-conn\n  aodh_db_sync:\n    - aodh-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: aodh\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: aodh\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: aodh-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: aodh-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"aodh\"\n      table: \"*\"\n      username: aodh\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: aodh-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: aodh\n      passwordSecretKeyRef:\n        name: aodh-db-password\n        key: password\n      database: aodh\n      secretName: aodh-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n\n...\n"
  },
  {
    "path": "values_overrides/barbican/2024.2-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    scripted_test: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    db_init: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    barbican_db_sync: \"quay.io/airshipit/barbican:2024.2-ubuntu_jammy\"\n    db_drop: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    ks_user: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    ks_service: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    barbican_api: \"quay.io/airshipit/barbican:2024.2-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/barbican/2025.1-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    scripted_test: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    barbican_db_sync: \"quay.io/airshipit/barbican:2025.1-ubuntu_jammy\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    barbican_api: \"quay.io/airshipit/barbican:2025.1-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/barbican/2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    scripted_test: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    barbican_db_sync: \"quay.io/airshipit/barbican:2025.1-ubuntu_noble\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    barbican_api: \"quay.io/airshipit/barbican:2025.1-ubuntu_noble\"\n...\n"
  },
  {
    "path": "values_overrides/barbican/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    scripted_test: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    barbican_db_sync: \"quay.io/airshipit/barbican:2025.2-ubuntu_noble\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    barbican_api: \"quay.io/airshipit/barbican:2025.2-ubuntu_noble\"\n...\n"
  },
  {
    "path": "values_overrides/barbican/annotations.yaml",
    "content": "---\nannotations:\n  pod:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    barbican_api:\n      another.tld/foo: \"bar\"\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      barbican:\n        custom.tld/key: \"value\"\n    tls:\n      key_manager_api_public:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/barbican/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    barbican:\n      container:\n        barbican_api:\n          appArmorProfile:\n            type: RuntimeDefault\n    test:\n      container:\n        barbican_test:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/barbican/gateway.yaml",
    "content": "# Gateway API overrides for Barbican.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  key_manager:\n    host_fqdn_override:\n      public:\n        host: barbican.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n  service_ingress_api: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: barbican-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.key_manager.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: barbican-api\n              port: 9311\n...\n"
  },
  {
    "path": "values_overrides/barbican/loci-2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    scripted_test: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    barbican_db_sync: \"quay.io/airshipit/barbican:2025.1-ubuntu_noble_loci\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    barbican_api: \"quay.io/airshipit/barbican:2025.1-ubuntu_noble_loci\"\n...\n"
  },
  {
    "path": "values_overrides/barbican/loci-2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    scripted_test: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    barbican_db_sync: \"quay.io/airshipit/barbican:2025.2-ubuntu_noble_loci\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    barbican_api: \"quay.io/airshipit/barbican:2025.2-ubuntu_noble_loci\"\n...\n"
  },
  {
    "path": "values_overrides/barbican/mariadb-operator.yaml",
    "content": "---\nconf:\n  barbican:\n    database:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  barbican_api:\n    - barbican-db-conn\n  barbican_db_sync:\n    - barbican-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: barbican\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: barbican\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: barbican-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: barbican-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"barbican\"\n      table: \"*\"\n      username: barbican\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: barbican-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: barbican\n      passwordSecretKeyRef:\n        name: barbican-db-password\n        key: password\n      database: barbican\n      secretName: barbican-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/barbican/netpol.yaml",
    "content": "---\nmanifests:\n  network_policy: true\n\nnetwork_policy:\n  barbican:\n    ingress:\n      - from:\n        - podSelector:\n            matchLabels:\n              application: barbican\n        - podSelector:\n            matchLabels:\n              application: ingress\n        - podSelector:\n            matchLabels:\n              application: horizon\n        - podSelector:\n            matchLabels:\n              application: heat\n        - podSelector:\n            matchLabels:\n              application: magnum\n        ports:\n        - protocol: TCP\n          port: 80\n        - protocol: TCP\n          port: 9311\n    egress:\n      - to:\n        - ipBlock:\n            cidr: %%%REPLACE_API_ADDR%%%/32\n        ports:\n          - protocol: TCP\n            port: %%%REPLACE_API_PORT%%%\n...\n"
  },
  {
    "path": "values_overrides/barbican/tls-offloading.yaml",
    "content": "---\nendpoints:\n  identity:\n    auth:\n      admin:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      barbican:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n\ntls:\n  identity: true\n...\n"
  },
  {
    "path": "values_overrides/barbican/tls.yaml",
    "content": "---\nmanifests:\n  certificates: true\ntls:\n  identity: true\n  oslo_messaging: true\n  oslo_db: true\n...\n"
  },
  {
    "path": "values_overrides/blazar/2025.1-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    blazar_api: quay.io/airshipit/blazar:2025.1-ubuntu_jammy\n    blazar_manager: quay.io/airshipit/blazar:2025.1-ubuntu_jammy\n    blazar_db_sync: quay.io/airshipit/blazar:2025.1-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/blazar/2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    blazar_api: quay.io/airshipit/blazar:2025.1-ubuntu_noble\n    blazar_manager: quay.io/airshipit/blazar:2025.1-ubuntu_noble\n    blazar_db_sync: quay.io/airshipit/blazar:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/blazar/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    blazar_api: quay.io/airshipit/blazar:2025.2-ubuntu_noble\n    blazar_manager: quay.io/airshipit/blazar:2025.2-ubuntu_noble\n    blazar_db_sync: quay.io/airshipit/blazar:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/blazar/gateway.yaml",
    "content": "# Gateway API overrides for Blazar.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  reservation:\n    host_fqdn_override:\n      public:\n        host: blazar.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n  service_ingress_api: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: blazar-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.reservation.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: blazar-api\n              port: 1234\n...\n"
  },
  {
    "path": "values_overrides/blazar/loci-2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    blazar_api: quay.io/airshipit/blazar:2025.1-ubuntu_noble_loci\n    blazar_manager: quay.io/airshipit/blazar:2025.1-ubuntu_noble_loci\n    blazar_db_sync: quay.io/airshipit/blazar:2025.1-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/blazar/loci-2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    db_drop: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_service: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    blazar_api: quay.io/airshipit/blazar:2025.2-ubuntu_noble_loci\n    blazar_manager: quay.io/airshipit/blazar:2025.2-ubuntu_noble_loci\n    blazar_db_sync: quay.io/airshipit/blazar:2025.2-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/blazar/mariadb-operator.yaml",
    "content": "---\nconf:\n  blazar:\n    database:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  blazar_api:\n    - blazar-db-conn\n  blazar_db_sync:\n    - blazar-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: blazar\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: blazar\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: blazar-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: blazar-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"blazar\"\n      table: \"*\"\n      username: blazar\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: blazar-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: blazar\n      passwordSecretKeyRef:\n        name: blazar-db-password\n        key: password\n      database: blazar\n      secretName: blazar-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/ceilometer/2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ceilometer_central: quay.io/airshipit/ceilometer:2025.1-ubuntu_noble\n    ceilometer_compute: quay.io/airshipit/ceilometer:2025.1-ubuntu_noble\n    ceilometer_ipmi: quay.io/airshipit/ceilometer:2025.1-ubuntu_noble\n    ceilometer_notification: quay.io/airshipit/ceilometer:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/ceilometer/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ceilometer_central: quay.io/airshipit/ceilometer:2025.2-ubuntu_noble\n    ceilometer_compute: quay.io/airshipit/ceilometer:2025.2-ubuntu_noble\n    ceilometer_ipmi: quay.io/airshipit/ceilometer:2025.2-ubuntu_noble\n    ceilometer_notification: quay.io/airshipit/ceilometer:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/ceilometer/annotations.yaml",
    "content": "---\nannotations:\n  pod:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    ceilometer_compute:\n      another.tld/foo: \"bar\"\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      ceilometer:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/ceph-client/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    checkdns:\n      container:\n        checkdns:\n          appArmorProfile:\n            type: RuntimeDefault\n    mds:\n      container:\n        mds:\n          appArmorProfile:\n            type: RuntimeDefault\n        init_dirs:\n          appArmorProfile:\n            type: RuntimeDefault\n    rbd_pool:\n      container:\n        rbd_pool:\n          appArmorProfile:\n            type: RuntimeDefault\n    bootstrap:\n      container:\n        bootstrap:\n          appArmorProfile:\n            type: RuntimeDefault\n    test:\n      container:\n        test:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/ceph-mon/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    mon:\n      container:\n        ceph_mon:\n          appArmorProfile:\n            type: RuntimeDefault\n        ceph_init_dirs:\n          appArmorProfile:\n            type: RuntimeDefault\n        ceph_log_ownership:\n          appArmorProfile:\n            type: RuntimeDefault\n    mgr:\n      container:\n        mgr:\n          appArmorProfile:\n            type: RuntimeDefault\n        init_dirs:\n          appArmorProfile:\n            type: RuntimeDefault\n    moncheck:\n      container:\n        ceph_mon:\n          appArmorProfile:\n            type: RuntimeDefault\n    bootstrap:\n      container:\n        ceph_bootstrap:\n          appArmorProfile:\n            type: RuntimeDefault\n    storage_keys_generator:\n      container:\n        ceph_storage_keys_generator:\n          appArmorProfile:\n            type: RuntimeDefault\n    ceph:\n      container:\n        ceph_mon_keyring_generator:\n          appArmorProfile:\n            type: RuntimeDefault\n        ceph_mgr_keyring_generator:\n          appArmorProfile:\n            type: RuntimeDefault\n        ceph_mds_keyring_generator:\n          appArmorProfile:\n            type: RuntimeDefault\n        ceph_osd_keyring_generator:\n          appArmorProfile:\n            type: RuntimeDefault\n    post_apply:\n      container:\n        ceph_mon_post_apply:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/ceph-osd/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    osd:\n      container:\n        osd_pod:\n          appArmorProfile:\n            type: RuntimeDefault\n        log_runner:\n          appArmorProfile:\n            type: RuntimeDefault\n        ceph_init_dirs:\n          appArmorProfile:\n            type: RuntimeDefault\n        ceph_log_ownership:\n          appArmorProfile:\n            type: RuntimeDefault\n        osd_init:\n          appArmorProfile:\n            type: RuntimeDefault\n    test:\n      container:\n        ceph_cluster_helm_test:\n          appArmorProfile:\n            type: RuntimeDefault\n    post_apply:\n      container:\n        ceph_osd_post_apply:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/ceph-provisioners/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    cephfs_client_key_generator:\n      container:\n        ceph_storage_keys_generator:\n          appArmorProfile:\n            type: RuntimeDefault\n    provisioner:\n      container:\n        ceph_rbd_provisioner:\n          appArmorProfile:\n            type: RuntimeDefault\n        ceph_rbd_snapshotter:\n          appArmorProfile:\n            type: RuntimeDefault\n        ceph_rbd_attacher:\n          appArmorProfile:\n            type: RuntimeDefault\n        ceph_rbd_resizer:\n          appArmorProfile:\n            type: RuntimeDefault\n        ceph_rbd_cephcsi:\n          appArmorProfile:\n            type: RuntimeDefault\n    test:\n      container:\n        test:\n          appArmorProfile:\n            type: RuntimeDefault\n    client_key_generator:\n      container:\n        ceph_storage_keys_generator:\n          appArmorProfile:\n            type: RuntimeDefault\n    plugin:\n      container:\n        ceph_rbd_registrar:\n          appArmorProfile:\n            type: RuntimeDefault\n        ceph_csi_rbd_plugin:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/ceph-rgw/2024.2-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    ks_endpoints: 'quay.io/airshipit/heat:2024.2-ubuntu_jammy'\n    ks_service: 'quay.io/airshipit/heat:2024.2-ubuntu_jammy'\n    ks_user: 'quay.io/airshipit/heat:2024.2-ubuntu_jammy'\n...\n"
  },
  {
    "path": "values_overrides/ceph-rgw/2025.1-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    ks_endpoints: 'quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy'\n    ks_service: 'quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy'\n    ks_user: 'quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy'\n...\n"
  },
  {
    "path": "values_overrides/ceph-rgw/2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    ks_endpoints: 'quay.io/airshipit/openstack-client:2025.1-ubuntu_noble'\n    ks_service: 'quay.io/airshipit/openstack-client:2025.1-ubuntu_noble'\n    ks_user: 'quay.io/airshipit/openstack-client:2025.1-ubuntu_noble'\n...\n"
  },
  {
    "path": "values_overrides/ceph-rgw/2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    ks_endpoints: 'quay.io/airshipit/openstack-client:2025.2-ubuntu_noble'\n    ks_service: 'quay.io/airshipit/openstack-client:2025.2-ubuntu_noble'\n    ks_user: 'quay.io/airshipit/openstack-client:2025.2-ubuntu_noble'\n...\n"
  },
  {
    "path": "values_overrides/ceph-rgw/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    rgw:\n      container:\n        rgw:\n          appArmorProfile:\n            type: RuntimeDefault\n        init_dirs:\n          appArmorProfile:\n            type: RuntimeDefault\n        rgw_init:\n          appArmorProfile:\n            type: RuntimeDefault\n    bootstrap:\n      container:\n        bootstrap:\n          appArmorProfile:\n            type: RuntimeDefault\n        keyring_placement:\n          appArmorProfile:\n            type: RuntimeDefault\n    rgw_storage_init:\n      container:\n        rgw_storage_init:\n          appArmorProfile:\n            type: RuntimeDefault\n        keyring_placement:\n          appArmorProfile:\n            type: RuntimeDefault\n    rgw_s3_admin:\n      container:\n        create_s3_admin:\n          appArmorProfile:\n            type: RuntimeDefault\n        keyring_placement:\n          appArmorProfile:\n            type: RuntimeDefault\n    rgw_pool:\n      container:\n        rgw_pool:\n          appArmorProfile:\n            type: RuntimeDefault\n    rgw_test:\n      container:\n        ceph_rgw_ks_validation:\n          appArmorProfile:\n            type: RuntimeDefault\n        ceph_rgw_s3_validation:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/ceph-rgw/gateway.yaml",
    "content": "# Gateway API overrides for Ceph RGW.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  ceph_object_store:\n    host_fqdn_override:\n      public:\n        host: ceph-rgw.openstack-helm.org\n\nmanifests:\n  ingress_rgw: false\n  service_ingress_rgw: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: ceph-rgw-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.ceph_object_store.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: ceph-rgw\n              port: 8088\n...\n"
  },
  {
    "path": "values_overrides/ceph-rgw/loci-2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    ks_endpoints: 'quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci'\n    ks_service: 'quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci'\n    ks_user: 'quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci'\n...\n"
  },
  {
    "path": "values_overrides/ceph-rgw/loci-2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    ks_endpoints: 'quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci'\n    ks_service: 'quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci'\n    ks_user: 'quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci'\n...\n"
  },
  {
    "path": "values_overrides/ceph-rgw/netpol.yaml",
    "content": "---\nmanifests:\n  network_policy: true\nnetwork_policy:\n  rgw:\n    egress:\n      - to:\n        - ipBlock:\n            cidr: 172.17.0.1/16\n      - to:\n        ports:\n          - protocol: TCP\n            port: 80\n          - protocol: TCP\n            port: 443\n      - to:\n        - ipBlock:\n            cidr: %%%REPLACE_API_ADDR%%%/32\n        ports:\n          - protocol: TCP\n            port: %%%REPLACE_API_PORT%%%\n...\n"
  },
  {
    "path": "values_overrides/ceph-rgw/tls.yaml",
    "content": "---\nendpoints:\n  object_store:\n    scheme:\n      default: https\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: ceph-rgw-ks-tls-api\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\n  ceph_object_store:\n    scheme:\n      default: https\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: ceph-rgw-s3-tls-api\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"nginx\"\n        cluster: \"nginx-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        nginx.ingress.kubernetes.io/proxy-body-size: \"0\"\n        nginx.ingress.kubernetes.io/proxy-max-temp-file-size: \"0\"\n        nginx.ingress.kubernetes.io/backend-protocol: \"HTTPS\"\n      external_policy_local: false\n      node_port:\n        enabled: false\n        port: 30004\n    public: 192.168.0.0/16\n    cluster: 192.168.0.0/16\n\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/cinder/2024.2-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    db_init: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    cinder_db_sync: \"quay.io/airshipit/cinder:2024.2-ubuntu_jammy\"\n    db_drop: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    ks_user: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    ks_service: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    cinder_api: \"quay.io/airshipit/cinder:2024.2-ubuntu_jammy\"\n    bootstrap: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    cinder_scheduler: \"quay.io/airshipit/cinder:2024.2-ubuntu_jammy\"\n    cinder_volume: \"quay.io/airshipit/cinder:2024.2-ubuntu_jammy\"\n    cinder_volume_usage_audit: \"quay.io/airshipit/cinder:2024.2-ubuntu_jammy\"\n    cinder_db_purge: \"quay.io/airshipit/cinder:2024.2-ubuntu_jammy\"\n    cinder_storage_init: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n    cinder_backup: \"quay.io/airshipit/cinder:2024.2-ubuntu_jammy\"\n    cinder_backup_storage_init: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/cinder/2025.1-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    cinder_db_sync: \"quay.io/airshipit/cinder:2025.1-ubuntu_jammy\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    cinder_api: \"quay.io/airshipit/cinder:2025.1-ubuntu_jammy\"\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    cinder_scheduler: \"quay.io/airshipit/cinder:2025.1-ubuntu_jammy\"\n    cinder_volume: \"quay.io/airshipit/cinder:2025.1-ubuntu_jammy\"\n    cinder_volume_usage_audit: \"quay.io/airshipit/cinder:2025.1-ubuntu_jammy\"\n    cinder_db_purge: \"quay.io/airshipit/cinder:2025.1-ubuntu_jammy\"\n    cinder_storage_init: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n    cinder_backup: \"quay.io/airshipit/cinder:2025.1-ubuntu_jammy\"\n    cinder_backup_storage_init: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/cinder/2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    cinder_db_sync: \"quay.io/airshipit/cinder:2025.1-ubuntu_noble\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    cinder_api: \"quay.io/airshipit/cinder:2025.1-ubuntu_noble\"\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    cinder_scheduler: \"quay.io/airshipit/cinder:2025.1-ubuntu_noble\"\n    cinder_volume: \"quay.io/airshipit/cinder:2025.1-ubuntu_noble\"\n    cinder_volume_usage_audit: \"quay.io/airshipit/cinder:2025.1-ubuntu_noble\"\n    cinder_db_purge: \"quay.io/airshipit/cinder:2025.1-ubuntu_noble\"\n    cinder_storage_init: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n    cinder_backup: \"quay.io/airshipit/cinder:2025.1-ubuntu_noble\"\n    cinder_backup_storage_init: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/cinder/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    db_init: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    cinder_db_sync: \"quay.io/airshipit/cinder:2025.2-ubuntu_noble\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    cinder_api: \"quay.io/airshipit/cinder:2025.2-ubuntu_noble\"\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    cinder_scheduler: \"quay.io/airshipit/cinder:2025.2-ubuntu_noble\"\n    cinder_volume: \"quay.io/airshipit/cinder:2025.2-ubuntu_noble\"\n    cinder_volume_usage_audit: \"quay.io/airshipit/cinder:2025.2-ubuntu_noble\"\n    cinder_db_purge: \"quay.io/airshipit/cinder:2025.2-ubuntu_noble\"\n    cinder_storage_init: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n    cinder_backup: \"quay.io/airshipit/cinder:2025.2-ubuntu_noble\"\n    cinder_backup_storage_init: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/cinder/annotations.yaml",
    "content": "---\nannotations:\n  pod:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    cinder_api:\n      another.tld/foo: \"bar\"\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      cinder:\n        custom.tld/key: \"value\"\n    rbd:\n      volume_external:\n        custom.tld/key: \"value\"\n    tls:\n      volume_api_public:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/cinder/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    cinder_api:\n      container:\n        cinder_api:\n          appArmorProfile:\n            type: RuntimeDefault\n        ceph_coordination_volume_perms:\n          appArmorProfile:\n            type: RuntimeDefault\n    cinder_backup:\n      container:\n        cinder_backup:\n          appArmorProfile:\n            type: RuntimeDefault\n        ceph_coordination_volume_perms:\n          appArmorProfile:\n            type: RuntimeDefault\n    cinder_scheduler:\n      container:\n        cinder_scheduler:\n          appArmorProfile:\n            type: RuntimeDefault\n        ceph_coordination_volume_perms:\n          appArmorProfile:\n            type: RuntimeDefault\n    cinder_volume:\n      container:\n        cinder_volume:\n          appArmorProfile:\n            type: RuntimeDefault\n        ceph_coordination_volume_perms:\n          appArmorProfile:\n            type: RuntimeDefault\n        init_cinder_conf:\n          appArmorProfile:\n            type: RuntimeDefault\n    storage_init:\n      container:\n        cinder_backup_storage_init:\n          appArmorProfile:\n            type: RuntimeDefault\n    create_internal_tenant:\n      container:\n        create_internal_tenant:\n          appArmorProfile:\n            type: RuntimeDefault\n    volume_usage_audit:\n      container:\n        cinder_volume_usage_audit:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/cinder/backend_pure.yaml",
    "content": "# NOTE: In order for below code to work, package \"purestorage\"\n# needs to be built into Cinder and Nova images.\n\n---\npod:\n  useHostNetwork:\n    volume: true\n    backup: true\n  security_context:\n    cinder_volume:\n      container:\n        cinder_volume:\n          readOnlyRootFilesystem: true\n          privileged: true\n    cinder_backup:\n      container:\n        cinder_backup:\n          privileged: true\nbootstrap:\n  volume_types:\n    # volume type for PURE with multiattach on\n    PURE-MULTIATTACH:\n      multiattach: \"\\\"<is> True\\\"\"\n      volume_backend_name: \"PURE_BE\"\n      access_type: \"private\"\n      grant_access:\n        default:\n          - admin\nconf:\n  cinder:\n    DEFAULT:\n      enabled_backends: \"rbd1,PURE\"\n  backends:\n    PURE:\n      pure_eradicate_on_delete: true\n      volume_backend_name: PURE_BE\n      # NOTE: Replace below pure-api-token-value with the real token value\n      pure_api_token: pure-api-token-value\n      volume_driver: cinder.volume.drivers.pure.PureISCSIDriver\n      use_multipath_for_image_xfer: true\n      # NOTE: Replace below 1.1.1.1 with the real ip value\n      san_ip: 1.1.1.1\n  enable_iscsi: true\n...\n"
  },
  {
    "path": "values_overrides/cinder/external-ceph-backend.yaml",
    "content": "# Note: This yaml file serves as an example for overriding the manifest\n# to enable additional externally managed Ceph Cinder backend.\n# values_overrides/libvirt/cinder-external-ceph-backend.yaml in repo\n# openstack-helm is also needed for the attachment of ceph volumes.\n---\nceph_client:\n  enable_external_ceph_backend: True\n  external_ceph:\n    rbd_user: cinder2\n    rbd_user_keyring: RBDUserKeyRing\n    conf:\n      global:\n        auth client required: none\n        auth cluster required: none\n        auth service required: none\n        cluster network: 172.31.0.128/25\n        fsid: 538fe375-1ee2-4719-a89e-1ff2cd851a1f\n        log_to_syslog: False\n        mon_host: \"[v2:172.31.0.187:3300,v1:172.31.0.187:6789],[v2:172.31.0.188:3300,v1:172.31.0.188:6789],[v2:172.31.0.189:3300,v1:172.31.0.189:6789]\"\n        mon initial members: \"host1,host2,host3\"\n        mon_allow_pool_delete: True\n        mon_compact_on_trim: False\n        mutex_perf_counter: False\n        osd pool default crush rule: -1\n        public network: 172.31.0.128/25\n      osd:\n        ms_dispatch_throttle_bytes: 1048576000\n        objecter_inflight_op_bytes: 1048576000\n        objecter_inflight_ops: 102400\n        osd memory target: 17599882854\n        osd_max_pg_log_entries: 10\n        osd_min_pg_log_entries: 10\n        osd_pg_log_dups_tracked: 10\n        osd_pg_log_trim_min: 10\n\nconf:\n  cinder:\n    DEFAULT:\n      enabled_backends: \"rbd1,rbd2\"\n  backends:\n    rbd2:\n      volume_driver: cinder.volume.drivers.rbd.RBDDriver\n      volume_backend_name: rbd2\n      rbd_pool: cinder2.volumes\n      rbd_ceph_conf: \"/etc/ceph/external-ceph.conf\"\n      rbd_flatten_volume_from_snapshot: False\n      report_discard_supported: True\n      rbd_max_clone_depth: 5\n      rbd_store_chunk_size: 4\n      rados_connect_timeout: -1\n      rbd_user: cinder2\n      rbd_secret_uuid: 3f0133e4-8384-4743-9473-fecacc095c74\n      image_volume_cache_enabled: True\n      image_volume_cache_max_size_gb: 200\n      image_volume_cache_max_count: 50\n...\n"
  },
  {
    "path": "values_overrides/cinder/external-ceph-configmap.yaml",
    "content": "# Note: This yaml file serves as an example for overriding the manifest\n# to enable additional externally managed Ceph Cinder backend.\n# Configuration of external ceph cluster is provided by a pre-existing configmap.\n# For backup external ceph, backup-external-ceph configmap with ceph.conf data field.\n# For 2nd tier external ceph, external-ceph configmap with external-ceph.conf data field.\n---\nbackup:\n  external_ceph_rbd:\n    enabled: true\n    configmap: backup-external-ceph\n\nceph_client:\n  enable_external_ceph_backend: True\n  external_ceph:\n    rbd_user: cinder2\n    rbd_user_keyring: RBDUserKeyRing\n    configmap: external-ceph\n\nconf:\n  cinder:\n    DEFAULT:\n      enabled_backends: \"rbd1,rbd2\"\n  backends:\n    rbd2:\n      volume_driver: cinder.volume.drivers.rbd.RBDDriver\n      volume_backend_name: rbd2\n      rbd_pool: cinder2.volumes\n      rbd_ceph_conf: \"/etc/ceph/external-ceph.conf\"\n      rbd_flatten_volume_from_snapshot: False\n      report_discard_supported: True\n      rbd_max_clone_depth: 5\n      rbd_store_chunk_size: 4\n      rados_connect_timeout: -1\n      rbd_user: cinder2\n      rbd_secret_uuid: 3f0133e4-8384-4743-9473-fecacc095c74\n      image_volume_cache_enabled: True\n      image_volume_cache_max_size_gb: 200\n      image_volume_cache_max_count: 50\n...\n"
  },
  {
    "path": "values_overrides/cinder/gateway.yaml",
    "content": "# Gateway API overrides for Cinder.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  volumev3:\n    host_fqdn_override:\n      public:\n        host: cinder.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n  service_ingress_api: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: cinder-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.volumev3.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: cinder-api\n              port: 8776\n...\n"
  },
  {
    "path": "values_overrides/cinder/loci-2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    cinder_db_sync: \"quay.io/airshipit/cinder:2025.1-ubuntu_noble_loci\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    cinder_api: \"quay.io/airshipit/cinder:2025.1-ubuntu_noble_loci\"\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    cinder_scheduler: \"quay.io/airshipit/cinder:2025.1-ubuntu_noble_loci\"\n    cinder_volume: \"quay.io/airshipit/cinder:2025.1-ubuntu_noble_loci\"\n    cinder_volume_usage_audit: \"quay.io/airshipit/cinder:2025.1-ubuntu_noble_loci\"\n    cinder_db_purge: \"quay.io/airshipit/cinder:2025.1-ubuntu_noble_loci\"\n    cinder_storage_init: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n    cinder_backup: \"quay.io/airshipit/cinder:2025.1-ubuntu_noble_loci\"\n    cinder_backup_storage_init: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/cinder/loci-2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    db_init: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    cinder_db_sync: \"quay.io/airshipit/cinder:2025.2-ubuntu_noble_loci\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    cinder_api: \"quay.io/airshipit/cinder:2025.2-ubuntu_noble_loci\"\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    cinder_scheduler: \"quay.io/airshipit/cinder:2025.2-ubuntu_noble_loci\"\n    cinder_volume: \"quay.io/airshipit/cinder:2025.2-ubuntu_noble_loci\"\n    cinder_volume_usage_audit: \"quay.io/airshipit/cinder:2025.2-ubuntu_noble_loci\"\n    cinder_db_purge: \"quay.io/airshipit/cinder:2025.2-ubuntu_noble_loci\"\n    cinder_storage_init: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n    cinder_backup: \"quay.io/airshipit/cinder:2025.2-ubuntu_noble_loci\"\n    cinder_backup_storage_init: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/cinder/mariadb-operator.yaml",
    "content": "---\nconf:\n  cinder:\n    database:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  cinder_api:\n    - cinder-db-conn\n  cinder_db_sync:\n    - cinder-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: cinder\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: cinder\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: cinder-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: cinder-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"cinder\"\n      table: \"*\"\n      username: cinder\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: cinder-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: cinder\n      passwordSecretKeyRef:\n        name: cinder-db-password\n        key: password\n      database: cinder\n      secretName: cinder-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/cinder/netpol.yaml",
    "content": "---\nmanifests:\n  network_policy: true\nnetwork_policy:\n  cinder:\n    egress:\n      - to:\n        - ipBlock:\n            cidr: 172.17.0.1/16\n      - to:\n        - ipBlock:\n            cidr: %%%REPLACE_API_ADDR%%%/16\n      - to:\n        - ipBlock:\n            cidr: %%%REPLACE_API_ADDR%%%/32\n        ports:\n          - protocol: TCP\n            port: %%%REPLACE_API_PORT%%%\n...\n"
  },
  {
    "path": "values_overrides/cinder/nfs-cinder-backup.yaml",
    "content": "---\nconf:\n  cinder:\n    DEFAULT:\n      backup_driver: cinder.backup.drivers.nfs.NFSBackupDriver\n      backup_mount_point_base: /backup/openstack/cinder\n      backup_share: 10.30.1.3:/\npod:\n  mounts:\n    cinder_backup:\n      cinder_backup:\n        volumeMounts:\n        - mountPath: /backup\n          name: nfs-backup\n        volumes:\n        - emptyDir: {}\n          name: nfs-backup\n  security_context:\n    cinder_backup:\n      container:\n        cinder_backup:\n          readOnlyRootFilesystem: false\n          runAsGroup: 42424\n          runAsUser: 42424\n...\n"
  },
  {
    "path": "values_overrides/cinder/qos.yaml",
    "content": "# NOTE: In this yaml file, an example qos is created\n# and associated with volume type rbd1\n\n---\nbootstrap:\n  high-iops:\n    consumer: front-end\n    properties:\n      read_iops_sec: 20000\n      write_iops_sec: 10000\n    associates:\n      - rbd1\n...\n"
  },
  {
    "path": "values_overrides/cinder/rabbitmq4.yaml",
    "content": "---\n# Upgrading from rabbitmq 3.x to 4.x requires:\n# 1: upgrading to the latest rabbitmq 3.x release and enabling all feature flags\n# 2: removing all rabbitmq 3.x openstack vhost ha policies\n# 3: setting rabbit_ha_queues to false in all openstack component configs\n# 4: wiping the rabbitmq database if rabbit_ha_queues and/or vhost ha policies were used with 3.x\nconf:\n  cinder:\n    oslo_messaging_rabbit:\n      rabbit_ha_queues: false\n\n# Note: rabbit_ha_queues is true by default for all openstack components in openstack-helm\n\n# Steps to wipe rabbitmq database:\n# 1: rabbitmqctl stop_app\n# 2: rabbitmqctl force_reset\n# 3: rabbitmqctl start_app\n# 4: rerun all openstack component rabbit-init jobs to recreate rabbitmq vhosts and users\n\n# Note: rabbitmq classic v2 vs quorum queues\n# With rabbitmq 4.x classic queues have been replaced with classic v2 queues. Classic v2 queues\n# do not support high availability. For HA, quorum queues must be used. Quorum queues are HA by default.\n# Classic v2 queues are the default in Rabbitmq 4.x.\n#\n# To enable quorum queues with rabbitmq 4.x you can use:\n#\n# conf:\n#   cinder:\n#     oslo_messaging_rabbit:\n#       rabbit_ha_queues: false\n#       rabbit_quorum_queues: true\n#       rabbit_transient_quorum_queue: true\n#       use_queue_manager: true\n...\n"
  },
  {
    "path": "values_overrides/cinder/tls-offloading.yaml",
    "content": "---\nendpoints:\n  identity:\n    auth:\n      admin:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      test:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n\ntls:\n  identity: true\n...\n"
  },
  {
    "path": "values_overrides/cinder/tls.yaml",
    "content": "---\npod:\n  security_context:\n    cinder_api:\n      container:\n        cinder_api:\n          runAsUser: 0\n          readOnlyRootFilesystem: false\nnetwork:\n  api:\n    ingress:\n      annotations:\n        nginx.ingress.kubernetes.io/backend-protocol: \"https\"\nconf:\n  software:\n    apache2:\n      binary: apache2\n      start_parameters: -DFOREGROUND\n      site_dir: /etc/apache2/sites-enabled\n      conf_dir: /etc/apache2/conf-enabled\n      mods_dir: /etc/apache2/mods-available\n      a2enmod:\n        - ssl\n      a2dismod: null\n  mpm_event: |\n    <IfModule mpm_event_module>\n      ServerLimit         1024\n      StartServers        32\n      MinSpareThreads     32\n      MaxSpareThreads     256\n      ThreadsPerChild     25\n      MaxRequestsPerChild 128\n      ThreadLimit         720\n    </IfModule>\n  wsgi_cinder: |\n    {{- $portInt := tuple \"volume\" \"internal\" \"api\" $ | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    Listen {{ $portInt }}\n    <Directory /var/lib/openstack/bin>\n        Require all granted\n    </Directory>\n    <VirtualHost *:{{ $portInt }}>\n      ServerName {{ printf \"%s.%s.svc.%s\" \"cinder-api\" .Release.Namespace .Values.endpoints.cluster_domain_suffix }}\n      WSGIDaemonProcess cinder-api processes=1 threads=1 user=cinder display-name=%{GROUP}\n      WSGIProcessGroup cinder-api\n      WSGIScriptAlias /  /var/lib/openstack/bin/cinder-wsgi\n      WSGIApplicationGroup %{GLOBAL}\n      WSGIPassAuthorization On\n      AllowEncodedSlashes On\n      SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n      ErrorLogFormat \"%{cu}t %M\"\n      ErrorLog /dev/stdout\n      CustomLog /dev/stdout combined env=!forwarded\n      CustomLog /dev/stdout proxy env=forwarded\n\n      SSLEngine on\n      SSLCertificateFile      /etc/cinder/certs/tls.crt\n      SSLCertificateKeyFile   /etc/cinder/certs/tls.key\n      SSLProtocol             all -SSLv3 -TLSv1 -TLSv1.1\n      SSLCipherSuite          ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256\n      SSLHonorCipherOrder     on\n    </VirtualHost>\n  cinder:\n    DEFAULT:\n      glance_ca_certificates_file: /etc/cinder/certs/ca.crt\n    keystone_authtoken:\n      cafile: /etc/cinder/certs/ca.crt\n    oslo_messaging_rabbit:\n      ssl: true\n      ssl_ca_file: /etc/rabbitmq/certs/ca.crt\n      ssl_cert_file: /etc/rabbitmq/certs/tls.crt\n      ssl_key_file: /etc/rabbitmq/certs/tls.key\n\nendpoints:\n  identity:\n    auth:\n      admin:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      cinder:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      test:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n    scheme:\n      default: https\n    port:\n      api:\n        default: 443\n  image:\n    scheme:\n      default: https\n    port:\n      api:\n        public: 443\n  image_registry:\n    scheme:\n      default: https\n    port:\n      api:\n        public: 443\n  volume:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: cinder-tls-api\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\n    scheme:\n      default: https\n      internal: https\n    port:\n      api:\n        public: 443\n  volumev2:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: cinder-tls-api\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\n    scheme:\n      default: https\n      internal: https\n    port:\n      api:\n        public: 443\n  volumev3:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: cinder-tls-api\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\n    scheme:\n      default: https\n      internal: https\n    port:\n      api:\n        public: 443\n  ingress:\n    port:\n      ingress:\n        default: 443\n  oslo_messaging:\n    port:\n      https:\n        default: 15680\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/cloudkitty/2024.2-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    db_drop: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    ks_user: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    ks_service: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    ks_endpoints: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    cloudkitty_api: quay.io/airshipit/cloudkitty:2024.2-ubuntu_jammy\n    cloudkitty_db_sync: quay.io/airshipit/cloudkitty:2024.2-ubuntu_jammy\n    cloudkitty_processor: quay.io/airshipit/cloudkitty:2024.2-ubuntu_jammy\n    cloudkitty_storage_init: quay.io/airshipit/cloudkitty:2024.2-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/cloudkitty/2025.1-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    cloudkitty_api: quay.io/airshipit/cloudkitty:2025.1-ubuntu_jammy\n    cloudkitty_db_sync: quay.io/airshipit/cloudkitty:2025.1-ubuntu_jammy\n    cloudkitty_processor: quay.io/airshipit/cloudkitty:2025.1-ubuntu_jammy\n    cloudkitty_storage_init: quay.io/airshipit/cloudkitty:2025.1-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/cloudkitty/2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    cloudkitty_api: quay.io/airshipit/cloudkitty:2025.1-ubuntu_noble\n    cloudkitty_db_sync: quay.io/airshipit/cloudkitty:2025.1-ubuntu_noble\n    cloudkitty_processor: quay.io/airshipit/cloudkitty:2025.1-ubuntu_noble\n    cloudkitty_storage_init: quay.io/airshipit/cloudkitty:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/cloudkitty/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    cloudkitty_api: quay.io/airshipit/cloudkitty:2025.2-ubuntu_noble\n    cloudkitty_db_sync: quay.io/airshipit/cloudkitty:2025.2-ubuntu_noble\n    cloudkitty_processor: quay.io/airshipit/cloudkitty:2025.2-ubuntu_noble\n    cloudkitty_storage_init: quay.io/airshipit/cloudkitty:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/cloudkitty/gateway.yaml",
    "content": "# Gateway API overrides for CloudKitty.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  rating:\n    host_fqdn_override:\n      public:\n        host: cloudkitty.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: cloudkitty-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.rating.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: cloudkitty-api\n              port: 8089\n...\n"
  },
  {
    "path": "values_overrides/cloudkitty/loci-2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    cloudkitty_api: quay.io/airshipit/cloudkitty:2025.1-ubuntu_noble_loci\n    cloudkitty_db_sync: quay.io/airshipit/cloudkitty:2025.1-ubuntu_noble_loci\n    cloudkitty_processor: quay.io/airshipit/cloudkitty:2025.1-ubuntu_noble_loci\n    cloudkitty_storage_init: quay.io/airshipit/cloudkitty:2025.1-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/cloudkitty/loci-2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    db_drop: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_service: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    cloudkitty_api: quay.io/airshipit/cloudkitty:2025.2-ubuntu_noble_loci\n    cloudkitty_db_sync: quay.io/airshipit/cloudkitty:2025.2-ubuntu_noble_loci\n    cloudkitty_processor: quay.io/airshipit/cloudkitty:2025.2-ubuntu_noble_loci\n    cloudkitty_storage_init: quay.io/airshipit/cloudkitty:2025.2-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/cloudkitty/mariadb-operator.yaml",
    "content": "---\nconf:\n  cloudkitty:\n    database:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  cloudkitty_api:\n    - cloudkitty-db-conn\n  cloudkitty_db_sync:\n    - cloudkitty-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: cloudkitty\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: cloudkitty\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: cloudkitty-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: cloudkitty-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"cloudkitty\"\n      table: \"*\"\n      username: cloudkitty\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: cloudkitty-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: cloudkitty\n      passwordSecretKeyRef:\n        name: cloudkitty-db-password\n        key: password\n      database: cloudkitty\n      secretName: cloudkitty-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/cyborg/annotations.yaml",
    "content": "---\nannotations:\n  pod:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    cyborg_api:\n      another.tld/foo: \"bar\"\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      cyborg:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/cyborg/gateway.yaml",
    "content": "# Gateway API overrides for Cyborg.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  accelerator:\n    host_fqdn_override:\n      public:\n        host: cyborg.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n  service_ingress_api: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: cyborg-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.accelerator.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: cyborg-api\n              port: 6666\n...\n"
  },
  {
    "path": "values_overrides/cyborg/mariadb-operator.yaml",
    "content": "---\nconf:\n  cyborg:\n    database:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  cyborg_api:\n    - cyborg-db-conn\n  cyborg_db_sync:\n    - cyborg-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: cyborg\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: cyborg\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: cyborg-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: cyborg-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"cyborg\"\n      table: \"*\"\n      username: cyborg\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: cyborg-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: cyborg\n      passwordSecretKeyRef:\n        name: cyborg-db-password\n        key: password\n      database: cyborg\n      secretName: cyborg-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/cyborg/rabbitmq4.yaml",
    "content": "---\n# Upgrading from rabbitmq 3.x to 4.x requires:\n# 1: upgrading to the latest rabbitmq 3.x release and enabling all feature flags\n# 2: removing all rabbitmq 3.x openstack vhost ha policies\n# 3: setting rabbit_ha_queues to false in all openstack component configs\n# 4: wiping the rabbitmq database if rabbit_ha_queues and/or vhost ha policies were used with 3.x\nconf:\n  cyborg:\n    oslo_messaging_rabbit:\n      rabbit_ha_queues: false\n\n# Note: rabbit_ha_queues is true by default for all openstack components in openstack-helm\n\n# Steps to wipe rabbitmq database:\n# 1: rabbitmqctl stop_app\n# 2: rabbitmqctl force_reset\n# 3: rabbitmqctl start_app\n# 4: rerun all openstack component rabbit-init jobs to recreate rabbitmq vhosts and users\n\n# Note: rabbitmq classic v2 vs quorum queues\n# With rabbitmq 4.x classic queues have been replaced with classic v2 queues. Classic v2 queues\n# do not support high availability. For HA, quorum queues must be used. Quorum queues are HA by default.\n# Classic v2 queues are the default in Rabbitmq 4.x.\n#\n# To enable quorum queues with rabbitmq 4.x you can use:\n#\n# conf:\n#   cyborg:\n#     oslo_messaging_rabbit:\n#       rabbit_ha_queues: false\n#       rabbit_quorum_queues: true\n#       rabbit_transient_quorum_queue: true\n#       use_queue_manager: true\n...\n"
  },
  {
    "path": "values_overrides/designate/annotations.yaml",
    "content": "---\nannotations:\n  pod:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    designate_api:\n      another.tld/foo: \"bar\"\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      designate:\n        custom.tld/key: \"value\"\n    tls:\n      dns_api_public:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/designate/gateway.yaml",
    "content": "# Gateway API overrides for Designate.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  dns:\n    host_fqdn_override:\n      public:\n        host: designate.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n  service_ingress_api: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: designate-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.dns.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: designate-api\n              port: 9001\n...\n"
  },
  {
    "path": "values_overrides/designate/mariadb-operator.yaml",
    "content": "---\nconf:\n  designate:\n    database:\n      connection: null\n    storage:sqlalchemy:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  designate_api:\n    - designate-db-conn\n  designate_db_sync:\n    - designate-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: designate\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: designate\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: designate-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: designate-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"designate\"\n      table: \"*\"\n      username: designate\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: designate-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: designate\n      passwordSecretKeyRef:\n        name: designate-db-password\n        key: password\n      database: designate\n      secretName: designate-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/elastic-apm-server/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    elastic_apm_server:\n      container:\n        elastic_apm_server:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/elastic-filebeat/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    filebeat:\n      container:\n        filebeat:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/elasticsearch/2024.2-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    memory_init: quay.io/airshipit/heat:2024.2-ubuntu_jammy\n    helm_tests: quay.io/airshipit/heat:2024.2-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/elasticsearch/2025.1-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    memory_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/elasticsearch/2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    memory_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/elasticsearch/2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    memory_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    helm_tests: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/elasticsearch/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    master:\n      container:\n        elasticsearch_master:\n          appArmorProfile:\n            type: RuntimeDefault\n        elasticsearch_perms:\n          appArmorProfile:\n            type: RuntimeDefault\n        memory_map_increase:\n          appArmorProfile:\n            type: RuntimeDefault\n    data:\n      container:\n        elasticsearch_data:\n          appArmorProfile:\n            type: RuntimeDefault\n        elasticsearch_perms:\n          appArmorProfile:\n            type: RuntimeDefault\n        memory_map_increase:\n          appArmorProfile:\n            type: RuntimeDefault\n    client:\n      container:\n        elasticsearch_client:\n          appArmorProfile:\n            type: RuntimeDefault\n        memory_map_increase:\n          appArmorProfile:\n            type: RuntimeDefault\n        apache_proxy:\n          appArmorProfile:\n            type: RuntimeDefault\n    exporter:\n      container:\n        elasticsearch_exporter:\n          appArmorProfile:\n            type: RuntimeDefault\n    test:\n      container:\n        helm_tests:\n          appArmorProfile:\n            type: RuntimeDefault\n    create_template:\n      container:\n        create_elasticsearch_template:\n          appArmorProfile:\n            type: RuntimeDefault\n    verify_repositories:\n      container:\n        elasticsearch_verify_repositories:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/elasticsearch/gateway.yaml",
    "content": "# Gateway API overrides for Elasticsearch.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  elasticsearch:\n    host_fqdn_override:\n      public:\n        host: elasticsearch.openstack-helm.org\n\nmanifests:\n  ingress: false\n  service_ingress: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: elasticsearch-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.elasticsearch.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: elasticsearch-logging\n              port: 9200\n...\n"
  },
  {
    "path": "values_overrides/elasticsearch/local-storage.yaml",
    "content": "---\npod:\n  replicas:\n    data: 1\nstorage:\n  data:\n    requests:\n      storage: 1Gi\n    storage_class: local-storage\n  master:\n    requests:\n      storage: 1Gi\n    storage_class: local-storage\nmanifests:\n  cron_curator: false\n  cron_verify_repositories: false\n  job_snapshot_repository: false\n  job_elasticsearch_templates: false\n  job_s3_user: false\n  job_s3_bucket: false\n  helm_tests: false\n...\n"
  },
  {
    "path": "values_overrides/elasticsearch/loci-2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    memory_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/elasticsearch/loci-2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    memory_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    helm_tests: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/elasticsearch/remote-cluster.yaml",
    "content": "# Can't use these settings at startup yet becuse of\n# https://github.com/elastic/elasticsearch/issues/27006\n# conf:\n#   elasticsearch:\n#     config:\n#       cluster:\n#         remote:\n#           remote_elasticsearch:\n#             seeds:\n#               - elasticsearch-gateway-1.remote_host:9301\n#               - elasticsearch-gateway-2.remote_host:9301\n#               - elasticsearch-gateway-3.remote_host:9301\n#             skip_unavailale: true\n---\nnetwork:\n  remote_clustering:\n    enabled: true\n\nmanifests:\n  cron_curator: false\n  cron_verify_repositories: false\n  job_snapshot_repository: false\npod:\n  replicas:\n    master: 2\n    data: 1\n    client: 1\n    gateway: 1\n...\n"
  },
  {
    "path": "values_overrides/elasticsearch/tls.yaml",
    "content": "---\nendpoints:\n  elasticsearch:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: elasticsearch-tls-api\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\n    scheme:\n      default: \"https\"\n    port:\n      http:\n        default: 443\nnetwork:\n  elasticsearch:\n    ingress:\n      annotations:\n        nginx.ingress.kubernetes.io/backend-protocol: https\nconf:\n  httpd: |\n    ServerRoot \"/usr/local/apache2\"\n\n    Listen 443\n\n    LoadModule allowmethods_module modules/mod_allowmethods.so\n    LoadModule mpm_event_module modules/mod_mpm_event.so\n    LoadModule authn_file_module modules/mod_authn_file.so\n    LoadModule authn_core_module modules/mod_authn_core.so\n    LoadModule authz_host_module modules/mod_authz_host.so\n    LoadModule authz_groupfile_module modules/mod_authz_groupfile.so\n    LoadModule authz_user_module modules/mod_authz_user.so\n    LoadModule authz_core_module modules/mod_authz_core.so\n    LoadModule access_compat_module modules/mod_access_compat.so\n    LoadModule auth_basic_module modules/mod_auth_basic.so\n    LoadModule ldap_module modules/mod_ldap.so\n    LoadModule authnz_ldap_module modules/mod_authnz_ldap.so\n    LoadModule reqtimeout_module modules/mod_reqtimeout.so\n    LoadModule filter_module modules/mod_filter.so\n    LoadModule proxy_html_module modules/mod_proxy_html.so\n    LoadModule log_config_module modules/mod_log_config.so\n    LoadModule env_module modules/mod_env.so\n    LoadModule headers_module modules/mod_headers.so\n    LoadModule setenvif_module modules/mod_setenvif.so\n    LoadModule version_module modules/mod_version.so\n    LoadModule proxy_module modules/mod_proxy.so\n    LoadModule proxy_connect_module modules/mod_proxy_connect.so\n    LoadModule proxy_http_module modules/mod_proxy_http.so\n    LoadModule proxy_balancer_module modules/mod_proxy_balancer.so\n    LoadModule slotmem_shm_module modules/mod_slotmem_shm.so\n    LoadModule slotmem_plain_module modules/mod_slotmem_plain.so\n    LoadModule unixd_module modules/mod_unixd.so\n    LoadModule status_module modules/mod_status.so\n    LoadModule autoindex_module modules/mod_autoindex.so\n    LoadModule rewrite_module modules/mod_rewrite.so\n    LoadModule ssl_module modules/mod_ssl.so\n\n    <IfModule unixd_module>\n    User daemon\n    Group daemon\n    </IfModule>\n\n    <Directory />\n        AllowOverride none\n        Require all denied\n    </Directory>\n\n    <Files \".ht*\">\n        Require all denied\n    </Files>\n\n    ErrorLog /dev/stderr\n\n    LogLevel warn\n\n    <IfModule log_config_module>\n        LogFormat \"%a %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" combined\n        LogFormat \"%{X-Forwarded-For}i %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" proxy\n        LogFormat \"%h %l %u %t \\\"%r\\\" %>s %b\" common\n\n        <IfModule logio_module>\n          LogFormat \"%a %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\" %I %O\" combinedio\n        </IfModule>\n\n        SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n        CustomLog /dev/stdout common\n        CustomLog /dev/stdout combined\n        CustomLog /dev/stdout proxy env=forwarded\n    </IfModule>\n\n    <Directory \"/usr/local/apache2/cgi-bin\">\n        AllowOverride None\n        Options None\n        Require all granted\n    </Directory>\n\n    <IfModule headers_module>\n        RequestHeader unset Proxy early\n    </IfModule>\n\n    <IfModule proxy_html_module>\n    Include conf/extra/proxy-html.conf\n    </IfModule>\n\n    <VirtualHost *:443>\n      <Location />\n          ProxyPass http://localhost:{{ tuple \"elasticsearch\" \"internal\" \"client\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/\n          ProxyPassReverse http://localhost:{{ tuple \"elasticsearch\" \"internal\" \"client\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/\n          AuthName \"Elasticsearch\"\n          AuthType Basic\n          AuthBasicProvider file ldap\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          AuthLDAPBindDN {{ .Values.endpoints.ldap.auth.admin.bind }}\n          AuthLDAPBindPassword {{ .Values.endpoints.ldap.auth.admin.password }}\n          AuthLDAPURL {{ tuple \"ldap\" \"default\" \"ldap\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | quote }}\n          Require valid-user\n      </Location>\n\n      # Restrict access to the Elasticsearch Update By Query API Endpoint to prevent modification of indexed documents\n      <Location /*/_update_by_query*>\n          Require all denied\n      </Location>\n      # Restrict access to the Elasticsearch Delete By Query API Endpoint to prevent deletion of indexed documents\n      <Location /*/_delete_by_query*>\n          Require all denied\n      </Location>\n      SSLEngine On\n      SSLProxyEngine on\n      SSLCertificateFile      /etc/elasticsearch/certs/tls.crt\n      SSLCertificateKeyFile   /etc/elasticsearch/certs/tls.key\n      SSLProtocol             all -SSLv3 -TLSv1 -TLSv1.1\n      SSLCipherSuite          ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256\n      SSLHonorCipherOrder     on\n    </VirtualHost>\n  elasticsearch:\n    config:\n      xpack:\n        security:\n          enabled: true\n          transport:\n            ssl:\n              enabled: true\n              verification_mode: certificate\n              key: /usr/share/elasticsearch/config/tls.key\n              certificate: /usr/share/elasticsearch/config/tls.crt\n              certificate_authorities: [\"/usr/share/elasticsearch/config/ca.crt\"]\n  curator:\n    config:\n      client:\n        use_ssl: True\n        ssl_no_validate: False\n        certificate: '/etc/elasticsearch/certs/ca.crt'\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/fluentd/2024.2-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    helm_tests: quay.io/airshipit/heat:2024.2-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/fluentd/2025.1-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/fluentd/2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/fluentd/2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    helm_tests: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/fluentd/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    fluentd:\n      container:\n        fluentd:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/fluentd/loci-2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/fluentd/loci-2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    helm_tests: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/fluentd/tls.yaml",
    "content": "---\nconf:\n  fluentd:\n    conf:\n      output: |\n        <label @output>\n          <match **>\n            <buffer>\n              chunk_limit_size 512K\n              flush_interval 5s\n              flush_thread_count 8\n              queue_limit_length 32\n              retry_forever false\n              retry_max_interval 30\n            </buffer>\n            host \"#{ENV['ELASTICSEARCH_HOST']}\"\n            reload_connections false\n            reconnect_on_error true\n            reload_on_failure true\n            include_tag_key true\n            logstash_format true\n            password \"#{ENV['ELASTICSEARCH_PASSWORD']}\"\n            port \"#{ENV['ELASTICSEARCH_PORT']}\"\n            scheme \"#{ENV['ELASTICSEARCH_SCHEME']}\"\n            @type elasticsearch\n            user \"#{ENV['ELASTICSEARCH_USERNAME']}\"\n            ssl_verify true\n            ssl_version TLSv1_2\n            ca_file /etc/elasticsearch/certs/ca.crt\n          </match>\n        </label>\nendpoints:\n  elasticsearch:\n    scheme:\n      default: \"https\"\n    port:\n      http:\n        default: 443\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/freezer/2025.1-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    freezer_db_sync: quay.io/airshipit/freezer-api:2025.1-ubuntu_jammy\n    freezer_api: quay.io/airshipit/freezer-api:2025.1-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/freezer/2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    freezer_db_sync: quay.io/airshipit/freezer-api:2025.1-ubuntu_noble\n    freezer_api: quay.io/airshipit/freezer-api:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/freezer/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    freezer_db_sync: quay.io/airshipit/freezer-api:2025.2-ubuntu_noble\n    freezer_api: quay.io/airshipit/freezer-api:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/freezer/gateway.yaml",
    "content": "# Gateway API overrides for Freezer.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  backup:\n    host_fqdn_override:\n      public:\n        host: freezer.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n  service_ingress_api: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: freezer-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.backup.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: freezer-api\n              port: 9090\n...\n"
  },
  {
    "path": "values_overrides/freezer/loci-2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    freezer_db_sync: quay.io/airshipit/freezer-api:2025.1-ubuntu_noble_loci\n    freezer_api: quay.io/airshipit/freezer-api:2025.1-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/freezer/loci-2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    db_drop: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_service: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    freezer_db_sync: quay.io/airshipit/freezer-api:2025.2-ubuntu_noble_loci\n    freezer_api: quay.io/airshipit/freezer-api:2025.2-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/freezer/mariadb-operator.yaml",
    "content": "---\nconf:\n  freezer:\n    database:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  freezer_api:\n    - freezer-db-conn\n  freezer_db_sync:\n    - freezer-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: freezer\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: freezer\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: freezer-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: freezer-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"freezer\"\n      table: \"*\"\n      username: freezer\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: freezer-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: freezer\n      passwordSecretKeyRef:\n        name: freezer-db-password\n        key: password\n      database: freezer\n      secretName: freezer-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/glance/2024.2-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    db_init: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    db_drop: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    ks_user: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    ks_service: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    glance_db_sync: \"quay.io/airshipit/glance:2024.2-ubuntu_jammy\"\n    glance_api: \"quay.io/airshipit/glance:2024.2-ubuntu_jammy\"\n    glance_metadefs_load: \"quay.io/airshipit/glance:2024.2-ubuntu_jammy\"\n    glance_storage_init: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/glance/2025.1-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    glance_db_sync: \"quay.io/airshipit/glance:2025.1-ubuntu_jammy\"\n    glance_api: \"quay.io/airshipit/glance:2025.1-ubuntu_jammy\"\n    glance_metadefs_load: \"quay.io/airshipit/glance:2025.1-ubuntu_jammy\"\n    glance_storage_init: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/glance/2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    glance_db_sync: \"quay.io/airshipit/glance:2025.1-ubuntu_noble\"\n    glance_api: \"quay.io/airshipit/glance:2025.1-ubuntu_noble\"\n    glance_metadefs_load: \"quay.io/airshipit/glance:2025.1-ubuntu_noble\"\n    glance_storage_init: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/glance/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    glance_db_sync: \"quay.io/airshipit/glance:2025.2-ubuntu_noble\"\n    glance_api: \"quay.io/airshipit/glance:2025.2-ubuntu_noble\"\n    glance_metadefs_load: \"quay.io/airshipit/glance:2025.2-ubuntu_noble\"\n    glance_storage_init: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/glance/annotations.yaml",
    "content": "---\nannotations:\n  pod:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    glance_api:\n      another.tld/foo: \"bar\"\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      glance:\n        custom.tld/key: \"value\"\n    tls:\n      image_api_public:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/glance/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    glance:\n      container:\n        glance_api:\n          appArmorProfile:\n            type: RuntimeDefault\n        glance_perms:\n          appArmorProfile:\n            type: RuntimeDefault\n        nginx:\n          appArmorProfile:\n            type: RuntimeDefault\n    metadefs_load:\n      container:\n        glance_metadefs_load:\n          appArmorProfile:\n            type: RuntimeDefault\n    storage_init:\n      container:\n        glance_storage_init:\n          appArmorProfile:\n            type: RuntimeDefault\n    test:\n      container:\n        glance_test_ks_user:\n          appArmorProfile:\n            type: RuntimeDefault\n        glance_test:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/glance/bootstrap-ubuntu-image.yaml",
    "content": "---\nbootstrap:\n  structured:\n    images:\n      ubuntu_miniaml:\n        name: \"Ubuntu Jammy Minimal\"\n        source_url: \"https://cloud-images.ubuntu.com/minimal/releases/jammy/release/\"\n        image_file: \"ubuntu-22.04-minimal-cloudimg-amd64.img\"\n        id: null\n        min_disk: 3\n        image_type: qcow2\n        container_format: bare\n...\n"
  },
  {
    "path": "values_overrides/glance/gateway.yaml",
    "content": "# Gateway API overrides for Glance.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  image:\n    host_fqdn_override:\n      public:\n        host: glance.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n  service_ingress_api: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: glance-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.image.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: glance-api\n              port: 9292\n...\n"
  },
  {
    "path": "values_overrides/glance/loci-2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    glance_db_sync: \"quay.io/airshipit/glance:2025.1-ubuntu_noble_loci\"\n    glance_api: \"quay.io/airshipit/glance:2025.1-ubuntu_noble_loci\"\n    glance_metadefs_load: \"quay.io/airshipit/glance:2025.1-ubuntu_noble_loci\"\n    glance_storage_init: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/glance/loci-2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    glance_db_sync: \"quay.io/airshipit/glance:2025.2-ubuntu_noble_loci\"\n    glance_api: \"quay.io/airshipit/glance:2025.2-ubuntu_noble_loci\"\n    glance_metadefs_load: \"quay.io/airshipit/glance:2025.2-ubuntu_noble_loci\"\n    glance_storage_init: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/glance/mariadb-operator.yaml",
    "content": "---\nconf:\n  glance:\n    database:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  glance_api:\n    - glance-db-conn\n  glance_db_sync:\n    - glance-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: glance\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: glance\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: glance-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: glance-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"glance\"\n      table: \"*\"\n      username: glance\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: glance-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: glance\n      passwordSecretKeyRef:\n        name: glance-db-password\n        key: password\n      database: glance\n      secretName: glance-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/glance/netpol.yaml",
    "content": "---\nmanifests:\n  network_policy: true\nnetwork_policy:\n  glance:\n    ingress:\n      - from:\n        - podSelector:\n            matchLabels:\n              application: glance\n        - podSelector:\n            matchLabels:\n              application: nova\n        - podSelector:\n            matchLabels:\n              application: horizon\n        - podSelector:\n            matchLabels:\n              application: ingress\n        - podSelector:\n            matchLabels:\n              application: heat\n        - podSelector:\n            matchLabels:\n              application: ironic\n        - podSelector:\n            matchLabels:\n              application: cinder\n        ports:\n        - protocol: TCP\n          port: 9292\n    egress:\n      - to:\n        ports:\n          - protocol: TCP\n            port: 80\n          - protocol: TCP\n            port: 443\n      - to:\n        - ipBlock:\n            cidr: %%%REPLACE_API_ADDR%%%/32\n        ports:\n          - protocol: TCP\n            port: %%%REPLACE_API_PORT%%%\n...\n"
  },
  {
    "path": "values_overrides/glance/rabbitmq4.yaml",
    "content": "---\n# Upgrading from rabbitmq 3.x to 4.x requires:\n# 1: upgrading to the latest rabbitmq 3.x release and enabling all feature flags\n# 2: removing all rabbitmq 3.x openstack vhost ha policies\n# 3: setting rabbit_ha_queues to false in all openstack component configs\n# 4: wiping the rabbitmq database if rabbit_ha_queues and/or vhost ha policies were used with 3.x\nconf:\n  glance:\n    oslo_messaging_rabbit:\n      rabbit_ha_queues: false\n\n# Note: rabbit_ha_queues is true by default for all openstack components in openstack-helm\n\n# Steps to wipe rabbitmq database:\n# 1: rabbitmqctl stop_app\n# 2: rabbitmqctl force_reset\n# 3: rabbitmqctl start_app\n# 4: rerun all openstack component rabbit-init jobs to recreate rabbitmq vhosts and users\n\n# Note: rabbitmq classic v2 vs quorum queues\n# With rabbitmq 4.x classic queues have been replaced with classic v2 queues. Classic v2 queues\n# do not support high availability. For HA, quorum queues must be used. Quorum queues are HA by default.\n# Classic v2 queues are the default in Rabbitmq 4.x.\n#\n# To enable quorum queues with rabbitmq 4.x you can use:\n#\n# conf:\n#   glance:\n#     oslo_messaging_rabbit:\n#       rabbit_ha_queues: false\n#       rabbit_quorum_queues: true\n#       rabbit_transient_quorum_queue: true\n#       use_queue_manager: true\n...\n"
  },
  {
    "path": "values_overrides/glance/tls-offloading.yaml",
    "content": "---\nendpoints:\n  identity:\n    auth:\n      admin:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      test:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n\ntls:\n  identity: true\n...\n"
  },
  {
    "path": "values_overrides/glance/tls.yaml",
    "content": "---\nimages:\n  tags:\n    nginx: docker.io/nginx:1.18.0\nconf:\n  glance:\n    keystone_authtoken:\n      cafile: /etc/glance/certs/ca.crt\n    glance_store:\n      # This option has been removed in 2024.1\n      https_ca_certificates_file: /etc/glance/certs/ca.crt\n      swift_store_cacert: /etc/glance/certs/ca.crt\n    oslo_messaging_rabbit:\n      ssl: true\n      ssl_ca_file: /etc/rabbitmq/certs/ca.crt\n      ssl_cert_file: /etc/rabbitmq/certs/tls.crt\n      ssl_key_file: /etc/rabbitmq/certs/tls.key\n  glance_api_uwsgi:\n    uwsgi:\n      http-socket: 127.0.0.1:9292\n  nginx: |\n    worker_processes 1;\n    daemon off;\n    user nginx;\n\n    events {\n      worker_connections 1024;\n    }\n\n    http {\n      include /etc/nginx/mime.types;\n      default_type application/octet-stream;\n\n      sendfile on;\n      keepalive_timeout 65s;\n      tcp_nodelay on;\n\n      log_format main '[nginx] method=$request_method path=$request_uri '\n                      'status=$status upstream_status=$upstream_status duration=$request_time size=$body_bytes_sent '\n                      '\"$remote_user\" \"$http_referer\" \"$http_user_agent\"';\n\n      access_log /dev/stdout  main;\n\n      upstream websocket {\n        server 127.0.0.1:$PORT;\n      }\n\n      server {\n        server_name {{ printf \"%s.%s.svc.%s\" \"${SHORTNAME}\" .Release.Namespace .Values.endpoints.cluster_domain_suffix }};\n        listen $POD_IP:$PORT ssl;\n\n        client_max_body_size  0;\n\n        ssl_certificate      /etc/nginx/certs/tls.crt;\n        ssl_certificate_key  /etc/nginx/certs/tls.key;\n        ssl_ciphers ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA384;\n\n        location / {\n          proxy_pass_request_headers on;\n\n          proxy_http_version  1.1;\n          proxy_pass          http://websocket;\n          proxy_read_timeout  90;\n        }\n      }\n    }\nnetwork:\n  api:\n    ingress:\n      annotations:\n        nginx.ingress.kubernetes.io/backend-protocol: \"https\"\n\nendpoints:\n  identity:\n    name: keystone\n    auth:\n      admin:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      glance:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      test:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n    scheme:\n      default: https\n    port:\n      api:\n        default: 443\n  image:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: glance-tls-api\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\n    scheme:\n      default: https\n      public: https\n    port:\n      api:\n        public: 443\n  dashboard:\n    scheme:\n      default: https\n      public: https\n    port:\n      web:\n        default: 80\n        public: 443\n  oslo_messaging:\n    port:\n      https:\n        default: 15680\npod:\n  security_context:\n    glance:\n      pod:\n        runAsUser: 0\n  resources:\n    nginx:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/gnocchi/gateway.yaml",
    "content": "# Gateway API overrides for Gnocchi.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  metric:\n    host_fqdn_override:\n      public:\n        host: gnocchi.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n  service_ingress_api: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: gnocchi-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.metric.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: gnocchi-api\n              port: 8041\n...\n"
  },
  {
    "path": "values_overrides/gnocchi/mariadb-operator.yaml",
    "content": "---\nconf:\n  gnocchi:\n    database:\n      connection: null\n    indexer:\n      url: null\n\nmanifests:\n  job_db_init: false\n  job_db_init_indexer: false\n  secret_db_indexer: false\n\netcSources:\n  gnocchi_api:\n    - gnocchi-db-conn\n  gnocchi_db_sync:\n    - gnocchi-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: gnocchi\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: gnocchi\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: gnocchi-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: gnocchi-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"gnocchi\"\n      table: \"*\"\n      username: gnocchi\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: gnocchi-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: gnocchi\n      passwordSecretKeyRef:\n        name: gnocchi-db-password\n        key: password\n      database: gnocchi\n      secretName: gnocchi-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/grafana/2024.2-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    db_init: quay.io/airshipit/heat:2024.2-ubuntu_jammy\n    grafana_db_session_sync: quay.io/airshipit/heat:2024.2-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/grafana/2025.1-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    grafana_db_session_sync: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/grafana/2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    grafana_db_session_sync: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/grafana/2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    grafana_db_session_sync: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/grafana/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    dashboard:\n      container:\n        grafana:\n          appArmorProfile:\n            type: RuntimeDefault\n    db_init:\n      container:\n        grafana_db_init_session:\n          appArmorProfile:\n            type: RuntimeDefault\n        grafana_db_init:\n          appArmorProfile:\n            type: RuntimeDefault\n    db_session_sync:\n      container:\n        grafana_db_session_sync:\n          appArmorProfile:\n            type: RuntimeDefault\n    set_admin_user:\n      container:\n        grafana_set_admin_password:\n          appArmorProfile:\n            type: RuntimeDefault\n    run_migrator:\n      container:\n        grafana_run_migrator:\n          appArmorProfile:\n            type: RuntimeDefault\n        prepare_grafana_migrator:\n          appArmorProfile:\n            type: RuntimeDefault\n    test:\n      container:\n        helm_tests:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/grafana/calico.yaml",
    "content": "# NOTE(srwilkers): This overrides file provides a reference for a dashboard for\n# the Calico CNI\n---\nconf:\n  dashboards:\n    network:\n      calico: |-\n        {\n          \"__inputs\": [\n            {\n              \"name\": \"DS_PROMETHEUS\",\n              \"label\": \"prometheus\",\n              \"description\": \"\",\n              \"type\": \"datasource\",\n              \"pluginId\": \"prometheus\",\n              \"pluginName\": \"Prometheus\"\n            }\n          ],\n          \"__requires\": [\n            {\n              \"type\": \"grafana\",\n              \"id\": \"grafana\",\n              \"name\": \"Grafana\",\n              \"version\": \"5.0.0\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"graph\",\n              \"name\": \"Graph\",\n              \"version\": \"\"\n            },\n            {\n              \"type\": \"datasource\",\n              \"id\": \"prometheus\",\n              \"name\": \"Prometheus\",\n              \"version\": \"1.0.0\"\n            }\n          ],\n          \"annotations\": {\n            \"list\": [\n              {\n                \"builtIn\": 1,\n                \"datasource\": \"-- Grafana --\",\n                \"enable\": true,\n                \"hide\": true,\n                \"iconColor\": \"rgba(0, 211, 255, 1)\",\n                \"name\": \"Annotations & Alerts\",\n                \"type\": \"dashboard\"\n              }\n            ]\n          },\n          \"description\": \"Calico cluster monitoring dashboard\",\n          \"overwrite\": true,\n          \"editable\": false,\n          \"gnetId\": 3244,\n          \"graphTooltip\": 0,\n          \"id\": 38,\n          \"links\": [],\n          \"panels\": [\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"true\": 0,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 0\n              },\n              \"id\": 15,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Felix\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"true\": 1,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 1\n              },\n              \"id\": 1,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"felix_active_local_endpoints\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{instance}}\",\n                  \"refId\": \"A\",\n                  \"step\": 20\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Active Local Endpoints\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"true\": 1,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 1\n              },\n              \"id\": 3,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"felix_active_local_policies\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{instance}}\",\n                  \"refId\": \"A\",\n                  \"step\": 20\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Active Local Policies\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"true\": 8,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 8\n              },\n              \"id\": 2,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"felix_active_local_selectors\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{instance}}\",\n                  \"refId\": \"A\",\n                  \"step\": 20\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Active Local Selectors\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"true\": 8,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 8\n              },\n              \"id\": 4,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"felix_active_local_tags\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{instance}}\",\n                  \"refId\": \"A\",\n                  \"step\": 20\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Active Local Tags\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"true\": 15,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 15\n              },\n              \"id\": 5,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"felix_cluster_num_host_endpoints\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{instance}}\",\n                  \"refId\": \"A\",\n                  \"step\": 20\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Cluster Host Endpoints\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"true\": 15,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 15\n              },\n              \"id\": 6,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": false,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"felix_cluster_num_workload_endpoints\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{instance}}\",\n                  \"refId\": \"A\",\n                  \"step\": 20\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Cluster Workload Endpoints\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"true\": 22,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 22\n              },\n              \"id\": 7,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"felix_cluster_num_hosts\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{instance}}\",\n                  \"refId\": \"A\",\n                  \"step\": 20\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Clusters Hosts\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"true\": 22,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 22\n              },\n              \"id\": 8,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"felix_ipsets_calico\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{instance}}\",\n                  \"refId\": \"A\",\n                  \"step\": 20\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Active IP Sets\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"true\": 29,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 29\n              },\n              \"id\": 9,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"felix_iptables_chains\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{instance}}\",\n                  \"refId\": \"A\",\n                  \"step\": 20\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Active IP Tables Chains\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"true\": 29,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 29\n              },\n              \"id\": 10,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"felix_ipset_errors\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{instance}}\",\n                  \"refId\": \"A\",\n                  \"step\": 20\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"IP Set Command Failures\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"true\": 36,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 36\n              },\n              \"id\": 11,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"felix_iptables_save_errors\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{instance}}\",\n                  \"refId\": \"A\",\n                  \"step\": 20\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"IP Tables Save Errors\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"true\": 36,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 36\n              },\n              \"id\": 12,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"felix_iptables_restore_errors\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{instance}}\",\n                  \"refId\": \"A\",\n                  \"step\": 20\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"IP Tables Restore Errors\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"true\": 43,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 43\n              },\n              \"id\": 13,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"felix_resyncs_started\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{instance}}\",\n                  \"refId\": \"A\",\n                  \"step\": 20\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Felix Resyncing Datastore\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"true\": 43,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 43\n              },\n              \"id\": 14,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"felix_int_dataplane_failures\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{instance}}\",\n                  \"refId\": \"A\",\n                  \"step\": 20\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Dataplane failed updates\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            }\n          ],\n          \"refresh\": \"5m\",\n          \"schemaVersion\": 18,\n          \"style\": \"dark\",\n          \"tags\": [\n            \"calico\"\n          ],\n          \"templating\": {\n            \"list\": [\n              {\n                \"current\": {\n                  \"text\": \"prometheus\",\n                  \"value\": \"prometheus\"\n                },\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Prometheus datasource\",\n                \"multi\": false,\n                \"name\": \"DS_PROMETHEUS\",\n                \"options\": [],\n                \"query\": \"prometheus\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"type\": \"datasource\"\n              }\n            ]\n          },\n          \"time\": {\n            \"from\": \"now-1h\",\n            \"to\": \"now\"\n          },\n          \"timepicker\": {\n            \"refresh_intervals\": [\n              \"5s\",\n              \"10s\",\n              \"30s\",\n              \"1m\",\n              \"5m\",\n              \"15m\",\n              \"30m\",\n              \"1h\",\n              \"2h\",\n              \"1d\"\n            ],\n            \"time_options\": [\n              \"5m\",\n              \"15m\",\n              \"1h\",\n              \"6h\",\n              \"12h\",\n              \"24h\",\n              \"2d\",\n              \"7d\",\n              \"30d\"\n            ]\n          },\n          \"timezone\": \"browser\",\n          \"title\": \"Kubernetes Calico\",\n          \"version\": 1\n        }\n...\n"
  },
  {
    "path": "values_overrides/grafana/ceph.yaml",
    "content": "# NOTE(srwilkers): This overrides file provides a reference for dashboards for\n# the overall state of ceph clusters, ceph osds in those clusters, and the\n# status of ceph pools for those clusters\n---\nconf:\n  dashboards:\n    ceph:\n      ceph_cluster: |-\n        {\n          \"__inputs\": [\n            {\n              \"name\": \"DS_PROMETHEUS\",\n              \"label\": \"prometheus\",\n              \"description\": \"Prometheus.IO\",\n              \"type\": \"datasource\",\n              \"pluginId\": \"prometheus\",\n              \"pluginName\": \"Prometheus\"\n            }\n          ],\n          \"__requires\": [\n            {\n              \"type\": \"grafana\",\n              \"id\": \"grafana\",\n              \"name\": \"Grafana\",\n              \"version\": \"3.1.1\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"graph\",\n              \"name\": \"Graph\",\n              \"version\": \"\"\n            },\n            {\n              \"type\": \"datasource\",\n              \"id\": \"prometheus\",\n              \"name\": \"Prometheus\",\n              \"version\": \"1.0.0\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"singlestat\",\n              \"name\": \"Singlestat\",\n              \"version\": \"\"\n            }\n          ],\n          \"annotations\": {\n            \"list\": [\n              {\n                \"builtIn\": 1,\n                \"datasource\": \"-- Grafana --\",\n                \"enable\": true,\n                \"hide\": true,\n                \"iconColor\": \"rgba(0, 211, 255, 1)\",\n                \"name\": \"Annotations & Alerts\",\n                \"type\": \"dashboard\"\n              }\n            ]\n          },\n          \"description\": \"Ceph Cluster overview.\\r\\n\",\n          \"overwrite\": true,\n          \"editable\": false,\n          \"gnetId\": 917,\n          \"graphTooltip\": 0,\n          \"id\": 14,\n          \"links\": [],\n          \"panels\": [\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 0\n              },\n              \"id\": 35,\n              \"panels\": [],\n              \"title\": \"New row\",\n              \"type\": \"row\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": true,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 4,\n                \"w\": 4,\n                \"x\": 0,\n                \"y\": 1\n              },\n              \"id\": 21,\n              \"interval\": \"1m\",\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"ceph_health_status{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"refId\": \"A\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": \"1,1\",\n              \"title\": \"Status\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"100%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"HEALTHY\",\n                  \"value\": \"0\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"WARNING\",\n                  \"value\": \"1\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"CRITICAL\",\n                  \"value\": \"2\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 4,\n                \"w\": 4,\n                \"x\": 4,\n                \"y\": 1\n              },\n              \"id\": 22,\n              \"interval\": \"1m\",\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": true,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": true\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"count(ceph_pool_max_avail{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"Pools\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"100%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"bytes\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 4,\n                \"w\": 4,\n                \"x\": 8,\n                \"y\": 1\n              },\n              \"id\": 33,\n              \"interval\": \"1m\",\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": true,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": true\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"ceph_cluster_total_bytes{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": \"0.025,0.1\",\n              \"title\": \"Cluster Capacity\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"100%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"bytes\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 4,\n                \"w\": 4,\n                \"x\": 12,\n                \"y\": 1\n              },\n              \"id\": 34,\n              \"interval\": \"1m\",\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": true,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": true\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"ceph_cluster_total_used_bytes{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": \"0.025,0.1\",\n              \"title\": \"Used Capacity\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"100%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": true,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"percentunit\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": true,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 4,\n                \"w\": 4,\n                \"x\": 16,\n                \"y\": 1\n              },\n              \"id\": 23,\n              \"interval\": \"1m\",\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": true,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"ceph_cluster_total_used_bytes/ceph_cluster_total_bytes{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": \"70,80\",\n              \"title\": \"Current Utilization\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"100%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 5\n              },\n              \"id\": 36,\n              \"panels\": [],\n              \"title\": \"New row\",\n              \"type\": \"row\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 2,\n                \"x\": 0,\n                \"y\": 6\n              },\n              \"id\": 26,\n              \"interval\": null,\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"sum(ceph_osd_in{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"OSDs IN\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": true,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 40, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 2,\n                \"x\": 2,\n                \"y\": 6\n              },\n              \"id\": 27,\n              \"interval\": null,\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"sum(ceph_osd_metadata{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}) - sum(ceph_osd_in{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": \"1,1\",\n              \"title\": \"OSDs OUT\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 2,\n                \"x\": 4,\n                \"y\": 6\n              },\n              \"id\": 28,\n              \"interval\": null,\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"sum(ceph_osd_up{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"OSDs UP\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": true,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 40, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 2,\n                \"x\": 6,\n                \"y\": 6\n              },\n              \"id\": 29,\n              \"interval\": null,\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"sum(ceph_osd_metadata{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}) - sum(ceph_osd_up{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": \"1,1\",\n              \"title\": \"OSDs DOWN\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": true,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 4,\n                \"x\": 8,\n                \"y\": 6\n              },\n              \"id\": 30,\n              \"interval\": null,\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": true,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": true\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"avg(ceph_osd_numpg{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": \"250,300\",\n              \"title\": \"Average PGs per OSD\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 9\n              },\n              \"id\": 37,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"CLUSTER\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {\n                \"Available\": \"#EAB839\",\n                \"Total Capacity\": \"#447EBC\",\n                \"Used\": \"#BF1B00\",\n                \"total_avail\": \"#6ED0E0\",\n                \"total_space\": \"#7EB26D\",\n                \"total_used\": \"#890F02\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 4,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 8,\n                \"w\": 8,\n                \"x\": 0,\n                \"y\": 10\n              },\n              \"height\": \"300\",\n              \"id\": 1,\n              \"interval\": \"$interval\",\n              \"isNew\": true,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 0,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"Total Capacity\",\n                  \"fill\": 0,\n                  \"linewidth\": 3,\n                  \"stack\": false\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"ceph_cluster_total_bytes{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"} - ceph_cluster_total_used_bytes{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Available\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"ceph_cluster_total_used_bytes\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Used\",\n                  \"refId\": \"B\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"ceph_cluster_total_bytes\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Total Capacity\",\n                  \"refId\": \"C\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Capacity\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {\n                \"Total Capacity\": \"#7EB26D\",\n                \"Used\": \"#BF1B00\",\n                \"total_avail\": \"#6ED0E0\",\n                \"total_space\": \"#7EB26D\",\n                \"total_used\": \"#890F02\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 0,\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 8,\n                \"w\": 8,\n                \"x\": 8,\n                \"y\": 10\n              },\n              \"height\": \"300\",\n              \"id\": 3,\n              \"interval\": \"$interval\",\n              \"isNew\": true,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(ceph_osd_op_w{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Write\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"sum(ceph_osd_op_r{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Read\",\n                  \"refId\": \"B\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"IOPS\",\n              \"tooltip\": {\n                \"msResolution\": true,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"none\",\n                  \"label\": \"\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 8,\n                \"w\": 8,\n                \"x\": 16,\n                \"y\": 10\n              },\n              \"height\": \"300\",\n              \"id\": 7,\n              \"interval\": \"$interval\",\n              \"isNew\": true,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(ceph_osd_op_in_bytes{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Write\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"sum(ceph_osd_op_out_bytes{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Read\",\n                  \"refId\": \"B\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Throughput\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"Bps\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 18\n              },\n              \"id\": 38,\n              \"panels\": [],\n              \"title\": \"New row\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 19\n              },\n              \"id\": 18,\n              \"isNew\": true,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": false,\n                \"min\": false,\n                \"rightSide\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"/^Total.*$/\",\n                  \"stack\": false\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"ceph_cluster_total_objects{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Total\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Objects in the Cluster\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 1,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 26\n              },\n              \"id\": 19,\n              \"isNew\": true,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": false,\n                \"min\": false,\n                \"rightSide\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"/^Total.*$/\",\n                  \"stack\": false\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(ceph_osd_numpg{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Total\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"sum(ceph_pg_active{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Active\",\n                  \"refId\": \"B\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"sum(ceph_pg_inconsistent{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Inconsistent\",\n                  \"refId\": \"C\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"sum(ceph_pg_creating{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Creating\",\n                  \"refId\": \"D\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"sum(ceph_pg_recovering{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Recovering\",\n                  \"refId\": \"E\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"sum(ceph_pg_down{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Down\",\n                  \"refId\": \"F\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"PGs\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 1,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 26\n              },\n              \"id\": 20,\n              \"isNew\": true,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": false,\n                \"min\": false,\n                \"rightSide\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"/^Total.*$/\",\n                  \"stack\": false\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(ceph_pg_degraded{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Degraded\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"sum(ceph_pg_stale{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Stale\",\n                  \"refId\": \"B\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"sum(ceph_pg_undersized{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Undersized\",\n                  \"refId\": \"C\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Stuck PGs\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 1,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            }\n          ],\n          \"refresh\": \"5m\",\n          \"schemaVersion\": 18,\n          \"style\": \"dark\",\n          \"tags\": [\n            \"ceph\",\n            \"cluster\"\n          ],\n          \"templating\": {\n            \"list\": [\n              {\n                \"current\": {\n                  \"text\": \"prometheus\",\n                  \"value\": \"prometheus\"\n                },\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Prometheus datasource\",\n                \"multi\": false,\n                \"name\": \"DS_PROMETHEUS\",\n                \"options\": [],\n                \"query\": \"prometheus\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"type\": \"datasource\"\n              },\n              {\n                \"allValue\": null,\n                \"current\": {},\n                \"datasource\": \"${DS_PROMETHEUS}\",\n                \"definition\": \"\",\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Cluster\",\n                \"multi\": false,\n                \"name\": \"ceph_cluster\",\n                \"options\": [],\n                \"query\": \"label_values(ceph_health_status, release_group)\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"sort\": 2,\n                \"tagValuesQuery\": \"\",\n                \"tags\": [],\n                \"tagsQuery\": \"\",\n                \"type\": \"query\",\n                \"useTags\": false\n              },\n              {\n                \"auto\": true,\n                \"auto_count\": 10,\n                \"auto_min\": \"1m\",\n                \"current\": {\n                  \"text\": \"1m\",\n                  \"value\": \"1m\"\n                },\n                \"datasource\": null,\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Interval\",\n                \"multi\": false,\n                \"name\": \"interval\",\n                \"options\": [\n                  {\n                    \"selected\": false,\n                    \"text\": \"auto\",\n                    \"value\": \"$__auto_interval_interval\"\n                  },\n                  {\n                    \"selected\": true,\n                    \"text\": \"1m\",\n                    \"value\": \"1m\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"10m\",\n                    \"value\": \"10m\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"30m\",\n                    \"value\": \"30m\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"1h\",\n                    \"value\": \"1h\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"6h\",\n                    \"value\": \"6h\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"12h\",\n                    \"value\": \"12h\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"1d\",\n                    \"value\": \"1d\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"7d\",\n                    \"value\": \"7d\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"14d\",\n                    \"value\": \"14d\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"30d\",\n                    \"value\": \"30d\"\n                  }\n                ],\n                \"query\": \"1m,10m,30m,1h,6h,12h,1d,7d,14d,30d\",\n                \"refresh\": 2,\n                \"skipUrlSync\": false,\n                \"type\": \"interval\"\n              }\n            ]\n          },\n          \"time\": {\n            \"from\": \"now-1h\",\n            \"to\": \"now\"\n          },\n          \"timepicker\": {\n            \"refresh_intervals\": [\n              \"5s\",\n              \"10s\",\n              \"30s\",\n              \"1m\",\n              \"5m\",\n              \"15m\",\n              \"30m\",\n              \"1h\",\n              \"2h\",\n              \"1d\"\n            ],\n            \"time_options\": [\n              \"5m\",\n              \"15m\",\n              \"1h\",\n              \"6h\",\n              \"12h\",\n              \"24h\",\n              \"2d\",\n              \"7d\",\n              \"30d\"\n            ]\n          },\n          \"timezone\": \"browser\",\n          \"title\": \"Ceph - Cluster\",\n          \"version\": 1\n        }\n      ceph_osd: |-\n        {\n          \"__inputs\": [\n            {\n              \"name\": \"DS_PROMETHEUS\",\n              \"label\": \"prometheus\",\n              \"description\": \"Prometheus.IO\",\n              \"type\": \"datasource\",\n              \"pluginId\": \"prometheus\",\n              \"pluginName\": \"Prometheus\"\n            }\n          ],\n          \"__requires\": [\n            {\n              \"type\": \"grafana\",\n              \"id\": \"grafana\",\n              \"name\": \"Grafana\",\n              \"version\": \"3.1.1\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"graph\",\n              \"name\": \"Graph\",\n              \"version\": \"\"\n            },\n            {\n              \"type\": \"datasource\",\n              \"id\": \"prometheus\",\n              \"name\": \"Prometheus\",\n              \"version\": \"1.0.0\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"singlestat\",\n              \"name\": \"Singlestat\",\n              \"version\": \"\"\n            }\n          ],\n          \"annotations\": {\n            \"list\": [\n              {\n                \"builtIn\": 1,\n                \"datasource\": \"-- Grafana --\",\n                \"enable\": true,\n                \"hide\": true,\n                \"iconColor\": \"rgba(0, 211, 255, 1)\",\n                \"name\": \"Annotations & Alerts\",\n                \"type\": \"dashboard\"\n              }\n            ]\n          },\n          \"description\": \"CEPH OSD Status.\",\n          \"overwrite\": true,\n          \"editable\": true,\n          \"gnetId\": 923,\n          \"graphTooltip\": 0,\n          \"id\": 17,\n          \"links\": [],\n          \"panels\": [\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 0\n              },\n              \"id\": 11,\n              \"panels\": [],\n              \"title\": \"New row\",\n              \"type\": \"row\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": true,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 40, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 2,\n                \"x\": 0,\n                \"y\": 1\n              },\n              \"id\": 6,\n              \"interval\": null,\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 2,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                },\n                {\n                  \"from\": \"0\",\n                  \"text\": \"DOWN\",\n                  \"to\": \"0.99\"\n                },\n                {\n                  \"from\": \"0.99\",\n                  \"text\": \"UP\",\n                  \"to\": \"1\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"ceph_osd_up{ceph_daemon=\\\"$osd\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"refId\": \"A\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": \"0,1\",\n              \"timeFrom\": null,\n              \"title\": \"Status\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"DOWN\",\n                  \"value\": \"0\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"UP\",\n                  \"value\": \"1\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": true,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 40, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 2,\n                \"x\": 2,\n                \"y\": 1\n              },\n              \"id\": 8,\n              \"interval\": null,\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 2,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                },\n                {\n                  \"from\": \"0\",\n                  \"text\": \"OUT\",\n                  \"to\": \"0.99\"\n                },\n                {\n                  \"from\": \"0.99\",\n                  \"text\": \"IN\",\n                  \"to\": \"1\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"ceph_osd_in{ceph_daemon=\\\"$osd\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"refId\": \"A\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": \"0,1\",\n              \"timeFrom\": null,\n              \"title\": \"Available\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"DOWN\",\n                  \"value\": \"0\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"UP\",\n                  \"value\": \"1\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 2,\n                \"x\": 4,\n                \"y\": 1\n              },\n              \"id\": 10,\n              \"interval\": null,\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 2,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"count(ceph_osd_metadata{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"refId\": \"A\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": \"0,1\",\n              \"timeFrom\": null,\n              \"title\": \"Total OSDs\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"DOWN\",\n                  \"value\": \"0\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"UP\",\n                  \"value\": \"1\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 4\n              },\n              \"id\": 12,\n              \"panels\": [],\n              \"title\": \"OSD: $osd\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 20,\n                \"x\": 0,\n                \"y\": 5\n              },\n              \"id\": 5,\n              \"interval\": \"$interval\",\n              \"isNew\": true,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"/^Average.*/\",\n                  \"fill\": 0,\n                  \"stack\": false\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"ceph_osd_numpg{ceph_daemon=~\\\"$osd\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Number of PGs - {{ $osd }}\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"avg(ceph_osd_numpg{application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Average Number of PGs in the Cluster\",\n                  \"refId\": \"B\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": [\n                {\n                  \"colorMode\": \"custom\",\n                  \"line\": true,\n                  \"lineColor\": \"rgba(216, 200, 27, 0.27)\",\n                  \"op\": \"gt\",\n                  \"value\": 250\n                },\n                {\n                  \"colorMode\": \"custom\",\n                  \"line\": true,\n                  \"lineColor\": \"rgba(234, 112, 112, 0.22)\",\n                  \"op\": \"gt\",\n                  \"value\": 300\n                }\n              ],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"PGs\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": true,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"percent\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": true,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 4,\n                \"x\": 20,\n                \"y\": 5\n              },\n              \"id\": 7,\n              \"interval\": null,\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": true\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"(ceph_osd_stat_bytes_used{ceph_daemon=~\\\"$osd\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}/ceph_osd_stat_bytes{ceph_daemon=~\\\"$osd\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})*100\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": \"60,80\",\n              \"timeFrom\": null,\n              \"title\": \"Utilization\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 12\n              },\n              \"id\": 13,\n              \"panels\": [],\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 13\n              },\n              \"id\": 2,\n              \"interval\": \"$interval\",\n              \"isNew\": true,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"ceph_osd_stat_bytes_used{ceph_daemon=~\\\"$osd\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Used - {{ osd.$osd }}\",\n                  \"metric\": \"ceph_osd_used_bytes\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"ceph_osd_stat_bytes{ceph_daemon=~\\\"$osd\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"} - ceph_osd_stat_bytes_used{ceph_daemon=~\\\"$osd\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}\",\n                  \"hide\": false,\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Available - {{ $osd }}\",\n                  \"metric\": \"ceph_osd_avail_bytes\",\n                  \"refId\": \"B\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"OSD Storage\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 5,\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 13\n              },\n              \"id\": 9,\n              \"interval\": \"$interval\",\n              \"isNew\": true,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": false,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 2,\n              \"points\": true,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"(ceph_osd_stat_bytes_used{ceph_daemon=~\\\"$osd\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}/ceph_osd_stat_bytes{ceph_daemon=~\\\"$osd\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Available - {{ $osd }}\",\n                  \"metric\": \"ceph_osd_avail_bytes\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Utilization Variance\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"none\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"none\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            }\n          ],\n          \"refresh\": \"15m\",\n          \"schemaVersion\": 18,\n          \"style\": \"dark\",\n          \"tags\": [\n            \"ceph\",\n            \"osd\"\n          ],\n          \"templating\": {\n            \"list\": [\n              {\n                \"current\": {\n                  \"text\": \"prometheus\",\n                  \"value\": \"prometheus\"\n                },\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Prometheus datasource\",\n                \"multi\": false,\n                \"name\": \"DS_PROMETHEUS\",\n                \"options\": [],\n                \"query\": \"prometheus\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"type\": \"datasource\"\n              },\n              {\n                \"allValue\": null,\n                \"current\": {\n                  \"text\": \"clcp-ucp-ceph-client\",\n                  \"value\": \"clcp-ucp-ceph-client\"\n                },\n                \"datasource\": \"${DS_PROMETHEUS}\",\n                \"definition\": \"\",\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Cluster\",\n                \"multi\": false,\n                \"name\": \"ceph_cluster\",\n                \"options\": [],\n                \"query\": \"label_values(ceph_health_status, release_group)\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"sort\": 2,\n                \"tagValuesQuery\": \"\",\n                \"tags\": [],\n                \"tagsQuery\": \"\",\n                \"type\": \"query\",\n                \"useTags\": false\n              },\n              {\n                \"auto\": true,\n                \"auto_count\": 10,\n                \"auto_min\": \"1m\",\n                \"current\": {\n                  \"text\": \"1m\",\n                  \"value\": \"1m\"\n                },\n                \"datasource\": null,\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Interval\",\n                \"multi\": false,\n                \"name\": \"interval\",\n                \"options\": [\n                  {\n                    \"selected\": false,\n                    \"text\": \"auto\",\n                    \"value\": \"$__auto_interval_interval\"\n                  },\n                  {\n                    \"selected\": true,\n                    \"text\": \"1m\",\n                    \"value\": \"1m\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"10m\",\n                    \"value\": \"10m\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"30m\",\n                    \"value\": \"30m\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"1h\",\n                    \"value\": \"1h\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"6h\",\n                    \"value\": \"6h\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"12h\",\n                    \"value\": \"12h\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"1d\",\n                    \"value\": \"1d\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"7d\",\n                    \"value\": \"7d\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"14d\",\n                    \"value\": \"14d\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"30d\",\n                    \"value\": \"30d\"\n                  }\n                ],\n                \"query\": \"1m,10m,30m,1h,6h,12h,1d,7d,14d,30d\",\n                \"refresh\": 2,\n                \"skipUrlSync\": false,\n                \"type\": \"interval\"\n              },\n              {\n                \"allValue\": null,\n                \"current\": {\n                  \"text\": \"osd.0\",\n                  \"value\": \"osd.0\"\n                },\n                \"datasource\": \"${DS_PROMETHEUS}\",\n                \"definition\": \"\",\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"OSD\",\n                \"multi\": false,\n                \"name\": \"osd\",\n                \"options\": [],\n                \"query\": \"label_values(ceph_osd_metadata{release_group=\\\"$ceph_cluster\\\"}, ceph_daemon)\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"sort\": 0,\n                \"tagValuesQuery\": \"\",\n                \"tags\": [],\n                \"tagsQuery\": \"\",\n                \"type\": \"query\",\n                \"useTags\": false\n              }\n            ]\n          },\n          \"time\": {\n            \"from\": \"now-1h\",\n            \"to\": \"now\"\n          },\n          \"timepicker\": {\n            \"refresh_intervals\": [\n              \"5s\",\n              \"10s\",\n              \"30s\",\n              \"1m\",\n              \"5m\",\n              \"15m\",\n              \"30m\",\n              \"1h\",\n              \"2h\",\n              \"1d\"\n            ],\n            \"time_options\": [\n              \"5m\",\n              \"15m\",\n              \"1h\",\n              \"6h\",\n              \"12h\",\n              \"24h\",\n              \"2d\",\n              \"7d\",\n              \"30d\"\n            ]\n          },\n          \"timezone\": \"browser\",\n          \"title\": \"Ceph - OSD\",\n          \"version\": 1\n        }\n      ceph_pool: |-\n        {\n          \"__inputs\": [\n            {\n              \"name\": \"DS_PROMETHEUS\",\n              \"label\": \"prometheus\",\n              \"description\": \"Prometheus.IO\",\n              \"type\": \"datasource\",\n              \"pluginId\": \"prometheus\",\n              \"pluginName\": \"Prometheus\"\n            }\n          ],\n          \"__requires\": [\n            {\n              \"type\": \"grafana\",\n              \"id\": \"grafana\",\n              \"name\": \"Grafana\",\n              \"version\": \"3.1.1\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"graph\",\n              \"name\": \"Graph\",\n              \"version\": \"\"\n            },\n            {\n              \"type\": \"datasource\",\n              \"id\": \"prometheus\",\n              \"name\": \"Prometheus\",\n              \"version\": \"1.0.0\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"singlestat\",\n              \"name\": \"Singlestat\",\n              \"version\": \"\"\n            }\n          ],\n          \"annotations\": {\n            \"list\": [\n              {\n                \"builtIn\": 1,\n                \"datasource\": \"-- Grafana --\",\n                \"enable\": true,\n                \"hide\": true,\n                \"iconColor\": \"rgba(0, 211, 255, 1)\",\n                \"name\": \"Annotations & Alerts\",\n                \"type\": \"dashboard\"\n              }\n            ]\n          },\n          \"description\": \"Ceph Pools dashboard.\",\n          \"overwrite\": true,\n          \"editable\": false,\n          \"gnetId\": 926,\n          \"graphTooltip\": 0,\n          \"id\": 2,\n          \"links\": [],\n          \"panels\": [\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 0\n              },\n              \"id\": 11,\n              \"panels\": [],\n              \"title\": \"Pool: $pool\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 4,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 20,\n                \"x\": 0,\n                \"y\": 1\n              },\n              \"height\": \"\",\n              \"id\": 2,\n              \"interval\": \"$interval\",\n              \"isNew\": true,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"rightSide\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 0,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"/^Total.*$/\",\n                  \"fill\": 0,\n                  \"linewidth\": 4,\n                  \"stack\": false\n                },\n                {\n                  \"alias\": \"/^Raw.*$/\",\n                  \"color\": \"#BF1B00\",\n                  \"fill\": 0,\n                  \"linewidth\": 4\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"ceph_pool_max_avail{pool_id=~\\\"$pool\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Total - {{ $pool }}\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"ceph_pool_stored{pool_id=~\\\"$pool\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Used - {{ $pool }}\",\n                  \"refId\": \"B\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"ceph_pool_max_avail{pool_id=~\\\"$pool\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"} - ceph_pool_stored{pool_id=~\\\"$pool\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Available - {{ $pool }}\",\n                  \"refId\": \"C\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"ceph_pool_raw_bytes_used{pool_id=~\\\"$pool\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Raw - {{ $pool }}\",\n                  \"refId\": \"D\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"[[pool_name]] Pool Storage\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": true,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"percentunit\",\n              \"gauge\": {\n                \"maxValue\": 1,\n                \"minValue\": 0,\n                \"show\": true,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 4,\n                \"x\": 20,\n                \"y\": 1\n              },\n              \"id\": 10,\n              \"interval\": null,\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"(ceph_pool_stored{pool_id=~\\\"$pool\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"} / ceph_pool_max_avail{pool_id=~\\\"$pool\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"})\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"refId\": \"A\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"[[pool_name]] Pool Usage\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 8\n              },\n              \"id\": 12,\n              \"panels\": [],\n              \"title\": \"New row\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 9\n              },\n              \"height\": \"\",\n              \"id\": 7,\n              \"isNew\": true,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"ceph_pool_objects{pool_id=~\\\"$pool\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Objects - {{ $pool_name }}\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"ceph_pool_dirty{pool_id=~\\\"$pool\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Dirty Objects - {{ $pool_name }}\",\n                  \"refId\": \"B\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Objects in Pool [[pool_name]]\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 9\n              },\n              \"id\": 4,\n              \"interval\": \"$interval\",\n              \"isNew\": true,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"irate(ceph_pool_rd{pool_id=~\\\"$pool\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}[3m])\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Read - {{ $pool_name }}\",\n                  \"refId\": \"B\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"irate(ceph_pool_wr{pool_id=~\\\"$pool\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}[3m])\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Write - {{ $pool_name }}\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"[[pool_name]] Pool IOPS\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"none\",\n                  \"label\": \"IOPS\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": \"IOPS\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": false\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 16\n              },\n              \"id\": 5,\n              \"interval\": \"$interval\",\n              \"isNew\": true,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"irate(ceph_pool_rd_bytes{pool_id=\\\"$pool\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}[3m])\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Read Bytes - {{ $pool_name }}\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"irate(ceph_pool_wr_bytes{pool_id=\\\"$pool\\\",application=\\\"ceph\\\",release_group=\\\"$ceph_cluster\\\"}[3m])\",\n                  \"interval\": \"$interval\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Written Bytes - {{ $pool_name }}\",\n                  \"refId\": \"B\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"[[pool_name]] Pool Throughput\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"Bps\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"Bps\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            }\n          ],\n          \"refresh\": \"5m\",\n          \"schemaVersion\": 18,\n          \"style\": \"dark\",\n          \"tags\": [\n            \"ceph\",\n            \"pools\"\n          ],\n          \"templating\": {\n            \"list\": [\n              {\n                \"current\": {\n                  \"text\": \"prometheus\",\n                  \"value\": \"prometheus\"\n                },\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Prometheus datasource\",\n                \"multi\": false,\n                \"name\": \"DS_PROMETHEUS\",\n                \"options\": [],\n                \"query\": \"prometheus\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"type\": \"datasource\"\n              },\n              {\n                \"allValue\": null,\n                \"current\": {\n                  \"text\": \"clcp-ucp-ceph-client\",\n                  \"value\": \"clcp-ucp-ceph-client\"\n                },\n                \"datasource\": \"${DS_PROMETHEUS}\",\n                \"definition\": \"\",\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Cluster\",\n                \"multi\": false,\n                \"name\": \"ceph_cluster\",\n                \"options\": [],\n                \"query\": \"label_values(ceph_health_status, release_group)\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"sort\": 2,\n                \"tagValuesQuery\": \"\",\n                \"tags\": [],\n                \"tagsQuery\": \"\",\n                \"type\": \"query\",\n                \"useTags\": false\n              },\n              {\n                \"auto\": true,\n                \"auto_count\": 10,\n                \"auto_min\": \"1m\",\n                \"current\": {\n                  \"text\": \"1m\",\n                  \"value\": \"1m\"\n                },\n                \"datasource\": null,\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Interval\",\n                \"multi\": false,\n                \"name\": \"interval\",\n                \"options\": [\n                  {\n                    \"selected\": false,\n                    \"text\": \"auto\",\n                    \"value\": \"$__auto_interval_interval\"\n                  },\n                  {\n                    \"selected\": true,\n                    \"text\": \"1m\",\n                    \"value\": \"1m\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"10m\",\n                    \"value\": \"10m\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"30m\",\n                    \"value\": \"30m\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"1h\",\n                    \"value\": \"1h\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"6h\",\n                    \"value\": \"6h\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"12h\",\n                    \"value\": \"12h\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"1d\",\n                    \"value\": \"1d\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"7d\",\n                    \"value\": \"7d\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"14d\",\n                    \"value\": \"14d\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"30d\",\n                    \"value\": \"30d\"\n                  }\n                ],\n                \"query\": \"1m,10m,30m,1h,6h,12h,1d,7d,14d,30d\",\n                \"refresh\": 2,\n                \"skipUrlSync\": false,\n                \"type\": \"interval\"\n              },\n              {\n                \"allValue\": null,\n                \"current\": {\n                  \"text\": \"1\",\n                  \"value\": \"1\"\n                },\n                \"datasource\": \"${DS_PROMETHEUS}\",\n                \"definition\": \"\",\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Pool\",\n                \"multi\": false,\n                \"name\": \"pool\",\n                \"options\": [],\n                \"query\": \"label_values(ceph_pool_objects{release_group=\\\"$ceph_cluster\\\"}, pool_id)\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"sort\": 0,\n                \"tagValuesQuery\": \"\",\n                \"tags\": [],\n                \"tagsQuery\": \"\",\n                \"type\": \"query\",\n                \"useTags\": false\n              },\n              {\n                \"allValue\": null,\n                \"current\": {\n                  \"text\": \"rbd\",\n                  \"value\": \"rbd\"\n                },\n                \"datasource\": \"${DS_PROMETHEUS}\",\n                \"definition\": \"\",\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Pool\",\n                \"multi\": false,\n                \"name\": \"pool_name\",\n                \"options\": [],\n                \"query\": \"label_values(ceph_pool_metadata{release_group=\\\"$ceph_cluster\\\",pool_id=\\\"[[pool]]\\\" }, name)\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"sort\": 0,\n                \"tagValuesQuery\": \"\",\n                \"tags\": [],\n                \"tagsQuery\": \"\",\n                \"type\": \"query\",\n                \"useTags\": false\n              }\n            ]\n          },\n          \"time\": {\n            \"from\": \"now-1h\",\n            \"to\": \"now\"\n          },\n          \"timepicker\": {\n            \"refresh_intervals\": [\n              \"5s\",\n              \"10s\",\n              \"30s\",\n              \"1m\",\n              \"5m\",\n              \"15m\",\n              \"30m\",\n              \"1h\",\n              \"2h\",\n              \"1d\"\n            ],\n            \"time_options\": [\n              \"5m\",\n              \"15m\",\n              \"1h\",\n              \"6h\",\n              \"12h\",\n              \"24h\",\n              \"2d\",\n              \"7d\",\n              \"30d\"\n            ]\n          },\n          \"timezone\": \"browser\",\n          \"title\": \"Ceph - Pools\",\n          \"version\": 1\n        }\n...\n"
  },
  {
    "path": "values_overrides/grafana/containers.yaml",
    "content": "# NOTE(srwilkers): This overrides file provides a reference for a dashboard for\n# container metrics, specific to each host\n---\nconf:\n  dashboards:\n    kubernetes:\n      containers: |-\n        {\n          \"__inputs\": [\n            {\n              \"name\": \"DS_PROMETHEUS\",\n              \"label\": \"prometheus\",\n              \"description\": \"\",\n              \"type\": \"datasource\",\n              \"pluginId\": \"prometheus\",\n              \"pluginName\": \"Prometheus\"\n            }\n          ],\n          \"__requires\": [\n            {\n              \"type\": \"grafana\",\n              \"id\": \"grafana\",\n              \"name\": \"Grafana\",\n              \"version\": \"3.1.1\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"graph\",\n              \"name\": \"Graph\",\n              \"version\": \"\"\n            },\n            {\n              \"type\": \"datasource\",\n              \"id\": \"prometheus\",\n              \"name\": \"Prometheus\",\n              \"version\": \"1.3.0\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"singlestat\",\n              \"name\": \"Singlestat\",\n              \"version\": \"\"\n            }\n          ],\n          \"annotations\": {\n            \"list\": [\n              {\n                \"builtIn\": 1,\n                \"datasource\": \"-- Grafana --\",\n                \"enable\": true,\n                \"hide\": true,\n                \"iconColor\": \"rgba(0, 211, 255, 1)\",\n                \"name\": \"Annotations & Alerts\",\n                \"type\": \"dashboard\"\n              }\n            ]\n          },\n          \"description\": \"Monitors Kubernetes cluster using Prometheus. Shows overall cluster CPU / Memory / Filesystem usage as well as individual pod, containers, systemd services statistics. Uses cAdvisor metrics only.\",\n          \"overwrite\": true,\n          \"editable\": false,\n          \"gnetId\": 315,\n          \"graphTooltip\": 0,\n          \"id\": 32,\n          \"links\": [],\n          \"panels\": [\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 0\n              },\n              \"id\": 33,\n              \"panels\": [],\n              \"title\": \"Network I/O pressure\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 5,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 1\n              },\n              \"height\": \"200px\",\n              \"id\": 32,\n              \"isNew\": true,\n              \"legend\": {\n                \"alignAsTable\": false,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": false,\n                \"min\": false,\n                \"rightSide\": false,\n                \"show\": false,\n                \"sideWidth\": 200,\n                \"sort\": \"current\",\n                \"sortDesc\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum (rate (container_network_receive_bytes_total{kubernetes_io_hostname=~\\\"^$Node$\\\"}[5m]))\",\n                  \"interval\": \"10s\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Received\",\n                  \"metric\": \"network\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                },\n                {\n                  \"expr\": \"- sum (rate (container_network_transmit_bytes_total{kubernetes_io_hostname=~\\\"^$Node$\\\"}[5m]))\",\n                  \"interval\": \"10s\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Sent\",\n                  \"metric\": \"network\",\n                  \"refId\": \"B\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Network I/O pressure\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"Bps\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"Bps\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 6\n              },\n              \"id\": 34,\n              \"panels\": [],\n              \"title\": \"Total usage\",\n              \"type\": \"row\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": true,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"percent\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": true,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 5,\n                \"w\": 8,\n                \"x\": 0,\n                \"y\": 7\n              },\n              \"height\": \"180px\",\n              \"id\": 4,\n              \"interval\": null,\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"sum (container_memory_working_set_bytes{id=\\\"/\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}) / sum (machine_memory_bytes{kubernetes_io_hostname=~\\\"^$Node$\\\"}) * 100\",\n                  \"interval\": \"10s\",\n                  \"intervalFactor\": 1,\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": \"65, 90\",\n              \"title\": \"Cluster memory usage\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": true,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"percent\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": true,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 5,\n                \"w\": 8,\n                \"x\": 8,\n                \"y\": 7\n              },\n              \"height\": \"180px\",\n              \"id\": 6,\n              \"interval\": null,\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"sum (rate (container_cpu_usage_seconds_total{id=\\\"/\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}[5m])) / sum (machine_cpu_cores{kubernetes_io_hostname=~\\\"^$Node$\\\"}) * 100\",\n                  \"interval\": \"10s\",\n                  \"intervalFactor\": 1,\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": \"65, 90\",\n              \"title\": \"Cluster CPU usage (5m avg)\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": true,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"percent\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": true,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 5,\n                \"w\": 8,\n                \"x\": 16,\n                \"y\": 7\n              },\n              \"height\": \"180px\",\n              \"id\": 7,\n              \"interval\": null,\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"sum (container_fs_usage_bytes{device=~\\\"^/dev/[sv]da[0-9]$\\\",id=~\\\"/.+\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}) / sum (container_fs_limit_bytes{device=~\\\"^/dev/[sv]da[0-9]$\\\",id=~\\\"/.+\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}) * 100\",\n                  \"interval\": \"10s\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": \"65, 90\",\n              \"title\": \"Cluster filesystem usage\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"bytes\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 4,\n                \"x\": 0,\n                \"y\": 12\n              },\n              \"height\": \"1px\",\n              \"id\": 9,\n              \"interval\": null,\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"20%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"20%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"sum (container_memory_working_set_bytes{id=\\\"/\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"})\",\n                  \"interval\": \"10s\",\n                  \"intervalFactor\": 1,\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"Used\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"50%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"bytes\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 4,\n                \"x\": 4,\n                \"y\": 12\n              },\n              \"height\": \"1px\",\n              \"id\": 10,\n              \"interval\": null,\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"sum (machine_memory_bytes{kubernetes_io_hostname=~\\\"^$Node$\\\"})\",\n                  \"interval\": \"10s\",\n                  \"intervalFactor\": 1,\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"Total\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"50%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 4,\n                \"x\": 8,\n                \"y\": 12\n              },\n              \"height\": \"1px\",\n              \"id\": 11,\n              \"interval\": null,\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \" cores\",\n              \"postfixFontSize\": \"30%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"sum (rate (container_cpu_usage_seconds_total{id=\\\"/\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}[5m]))\",\n                  \"interval\": \"10s\",\n                  \"intervalFactor\": 1,\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"Used\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"50%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 4,\n                \"x\": 12,\n                \"y\": 12\n              },\n              \"height\": \"1px\",\n              \"id\": 12,\n              \"interval\": null,\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \" cores\",\n              \"postfixFontSize\": \"30%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"sum (machine_cpu_cores{kubernetes_io_hostname=~\\\"^$Node$\\\"})\",\n                  \"interval\": \"10s\",\n                  \"intervalFactor\": 1,\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"Total\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"50%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"bytes\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 4,\n                \"x\": 16,\n                \"y\": 12\n              },\n              \"height\": \"1px\",\n              \"id\": 13,\n              \"interval\": null,\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"sum (container_fs_usage_bytes{device=~\\\"^/dev/[sv]da[0-9]$\\\",id=~\\\"/.+\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"})\",\n                  \"interval\": \"10s\",\n                  \"intervalFactor\": 1,\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"Used\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"50%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"bytes\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 4,\n                \"x\": 20,\n                \"y\": 12\n              },\n              \"height\": \"1px\",\n              \"id\": 14,\n              \"interval\": null,\n              \"isNew\": true,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"sum (container_fs_limit_bytes{device=~\\\"^/dev/[sv]da[0-9]$\\\",id=~\\\"/.+\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"})\",\n                  \"interval\": \"10s\",\n                  \"intervalFactor\": 1,\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"Total\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"50%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 15\n              },\n              \"id\": 35,\n              \"panels\": [],\n              \"title\": \"Pods CPU usage\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 3,\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 0,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 16\n              },\n              \"height\": \"\",\n              \"id\": 17,\n              \"isNew\": true,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": false,\n                \"min\": false,\n                \"rightSide\": true,\n                \"show\": true,\n                \"sort\": \"current\",\n                \"sortDesc\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": true,\n              \"targets\": [\n                {\n                  \"expr\": \"sum (rate (container_cpu_usage_seconds_total{image!=\\\"\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}[5m])) by (pod)\",\n                  \"interval\": \"10s\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"{{ pod }}\",\n                  \"metric\": \"container_cpu\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Pods CPU usage (5m avg)\",\n              \"tooltip\": {\n                \"msResolution\": true,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"none\",\n                  \"label\": \"cores\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"collapsed\": true,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 23\n              },\n              \"id\": 36,\n              \"panels\": [\n                {\n                  \"aliasColors\": {},\n                  \"bars\": false,\n                  \"datasource\": \"${DS_PROMETHEUS}\",\n                  \"decimals\": 3,\n                  \"editable\": true,\n                  \"error\": false,\n                  \"fill\": 0,\n                  \"grid\": {},\n                  \"gridPos\": {\n                    \"h\": 7,\n                    \"w\": 24,\n                    \"x\": 0,\n                    \"y\": 23\n                  },\n                  \"height\": \"\",\n                  \"id\": 24,\n                  \"isNew\": true,\n                  \"legend\": {\n                    \"alignAsTable\": true,\n                    \"avg\": true,\n                    \"current\": true,\n                    \"hideEmpty\": false,\n                    \"hideZero\": false,\n                    \"max\": false,\n                    \"min\": false,\n                    \"rightSide\": true,\n                    \"show\": true,\n                    \"sideWidth\": null,\n                    \"sort\": \"current\",\n                    \"sortDesc\": true,\n                    \"total\": false,\n                    \"values\": true\n                  },\n                  \"lines\": true,\n                  \"linewidth\": 2,\n                  \"links\": [],\n                  \"nullPointMode\": \"connected\",\n                  \"percentage\": false,\n                  \"pointradius\": 5,\n                  \"points\": false,\n                  \"renderer\": \"flot\",\n                  \"seriesOverrides\": [],\n                  \"stack\": false,\n                  \"steppedLine\": true,\n                  \"targets\": [\n                    {\n                      \"expr\": \"sum (rate (container_cpu_usage_seconds_total{image!=\\\"\\\",name=~\\\"^k8s_.*\\\",container!=\\\"POD\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}[5m])) by (container, pod)\",\n                      \"hide\": false,\n                      \"interval\": \"10s\",\n                      \"intervalFactor\": 1,\n                      \"legendFormat\": \"pod: {{ pod }} | {{ container }}\",\n                      \"metric\": \"container_cpu\",\n                      \"refId\": \"A\",\n                      \"step\": 10\n                    },\n                    {\n                      \"expr\": \"sum (rate (container_cpu_usage_seconds_total{image!=\\\"\\\",name!~\\\"^k8s_.*\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}[5m])) by (kubernetes_io_hostname, name, image)\",\n                      \"hide\": false,\n                      \"interval\": \"10s\",\n                      \"intervalFactor\": 1,\n                      \"legendFormat\": \"docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})\",\n                      \"metric\": \"container_cpu\",\n                      \"refId\": \"B\",\n                      \"step\": 10\n                    },\n                    {\n                      \"expr\": \"sum (rate (container_cpu_usage_seconds_total{rkt_container_name!=\\\"\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}[5m])) by (kubernetes_io_hostname, rkt_container_name)\",\n                      \"interval\": \"10s\",\n                      \"intervalFactor\": 1,\n                      \"legendFormat\": \"rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}\",\n                      \"metric\": \"container_cpu\",\n                      \"refId\": \"C\",\n                      \"step\": 10\n                    }\n                  ],\n                  \"thresholds\": [],\n                  \"timeFrom\": null,\n                  \"timeShift\": null,\n                  \"title\": \"Containers CPU usage (5m avg)\",\n                  \"tooltip\": {\n                    \"msResolution\": true,\n                    \"shared\": true,\n                    \"sort\": 2,\n                    \"value_type\": \"cumulative\"\n                  },\n                  \"type\": \"graph\",\n                  \"xaxis\": {\n                    \"show\": true\n                  },\n                  \"yaxes\": [\n                    {\n                      \"format\": \"none\",\n                      \"label\": \"cores\",\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    },\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": false\n                    }\n                  ]\n                }\n              ],\n              \"title\": \"Containers CPU usage\",\n              \"type\": \"row\"\n            },\n            {\n              \"collapsed\": true,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 24\n              },\n              \"id\": 37,\n              \"panels\": [\n                {\n                  \"aliasColors\": {},\n                  \"bars\": false,\n                  \"datasource\": \"${DS_PROMETHEUS}\",\n                  \"decimals\": 3,\n                  \"editable\": true,\n                  \"error\": false,\n                  \"fill\": 0,\n                  \"grid\": {},\n                  \"gridPos\": {\n                    \"h\": 13,\n                    \"w\": 24,\n                    \"x\": 0,\n                    \"y\": 24\n                  },\n                  \"id\": 20,\n                  \"isNew\": true,\n                  \"legend\": {\n                    \"alignAsTable\": true,\n                    \"avg\": true,\n                    \"current\": true,\n                    \"max\": false,\n                    \"min\": false,\n                    \"rightSide\": false,\n                    \"show\": true,\n                    \"sort\": \"current\",\n                    \"sortDesc\": true,\n                    \"total\": false,\n                    \"values\": true\n                  },\n                  \"lines\": true,\n                  \"linewidth\": 2,\n                  \"links\": [],\n                  \"nullPointMode\": \"connected\",\n                  \"percentage\": false,\n                  \"pointradius\": 5,\n                  \"points\": false,\n                  \"renderer\": \"flot\",\n                  \"seriesOverrides\": [],\n                  \"stack\": false,\n                  \"steppedLine\": true,\n                  \"targets\": [\n                    {\n                      \"expr\": \"sum (rate (container_cpu_usage_seconds_total{id!=\\\"/\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}[5m])) by (id)\",\n                      \"hide\": false,\n                      \"interval\": \"10s\",\n                      \"intervalFactor\": 1,\n                      \"legendFormat\": \"{{ id }}\",\n                      \"metric\": \"container_cpu\",\n                      \"refId\": \"A\",\n                      \"step\": 10\n                    }\n                  ],\n                  \"thresholds\": [],\n                  \"timeFrom\": null,\n                  \"timeShift\": null,\n                  \"title\": \"All processes CPU usage (5m avg)\",\n                  \"tooltip\": {\n                    \"msResolution\": true,\n                    \"shared\": true,\n                    \"sort\": 2,\n                    \"value_type\": \"cumulative\"\n                  },\n                  \"type\": \"graph\",\n                  \"xaxis\": {\n                    \"show\": true\n                  },\n                  \"yaxes\": [\n                    {\n                      \"format\": \"none\",\n                      \"label\": \"cores\",\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    },\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": false\n                    }\n                  ]\n                }\n              ],\n              \"repeat\": null,\n              \"title\": \"All processes CPU usage\",\n              \"type\": \"row\"\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 25\n              },\n              \"id\": 38,\n              \"panels\": [],\n              \"title\": \"Pods memory usage\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 0,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 26\n              },\n              \"id\": 25,\n              \"isNew\": true,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": false,\n                \"min\": false,\n                \"rightSide\": true,\n                \"show\": true,\n                \"sideWidth\": 200,\n                \"sort\": \"current\",\n                \"sortDesc\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": true,\n              \"targets\": [\n                {\n                  \"expr\": \"sum (container_memory_working_set_bytes{image!=\\\"\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}) by (pod)\",\n                  \"interval\": \"10s\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"{{ pod }}\",\n                  \"metric\": \"container_memory_usage:sort_desc\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Pods memory usage\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"collapsed\": true,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 33\n              },\n              \"id\": 39,\n              \"panels\": [\n                {\n                  \"aliasColors\": {},\n                  \"bars\": false,\n                  \"datasource\": \"${DS_PROMETHEUS}\",\n                  \"decimals\": 2,\n                  \"editable\": true,\n                  \"error\": false,\n                  \"fill\": 0,\n                  \"grid\": {},\n                  \"gridPos\": {\n                    \"h\": 7,\n                    \"w\": 24,\n                    \"x\": 0,\n                    \"y\": 33\n                  },\n                  \"id\": 27,\n                  \"isNew\": true,\n                  \"legend\": {\n                    \"alignAsTable\": true,\n                    \"avg\": true,\n                    \"current\": true,\n                    \"max\": false,\n                    \"min\": false,\n                    \"rightSide\": true,\n                    \"show\": true,\n                    \"sideWidth\": 200,\n                    \"sort\": \"current\",\n                    \"sortDesc\": true,\n                    \"total\": false,\n                    \"values\": true\n                  },\n                  \"lines\": true,\n                  \"linewidth\": 2,\n                  \"links\": [],\n                  \"nullPointMode\": \"connected\",\n                  \"percentage\": false,\n                  \"pointradius\": 5,\n                  \"points\": false,\n                  \"renderer\": \"flot\",\n                  \"seriesOverrides\": [],\n                  \"stack\": false,\n                  \"steppedLine\": true,\n                  \"targets\": [\n                    {\n                      \"expr\": \"sum (container_memory_working_set_bytes{image!=\\\"\\\",name=~\\\"^k8s_.*\\\",container!=\\\"POD\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}) by (container, pod)\",\n                      \"interval\": \"10s\",\n                      \"intervalFactor\": 1,\n                      \"legendFormat\": \"pod: {{ pod }} | {{ container }}\",\n                      \"metric\": \"container_memory_usage:sort_desc\",\n                      \"refId\": \"A\",\n                      \"step\": 10\n                    },\n                    {\n                      \"expr\": \"sum (container_memory_working_set_bytes{image!=\\\"\\\",name!~\\\"^k8s_.*\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}) by (kubernetes_io_hostname, name, image)\",\n                      \"interval\": \"10s\",\n                      \"intervalFactor\": 1,\n                      \"legendFormat\": \"docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})\",\n                      \"metric\": \"container_memory_usage:sort_desc\",\n                      \"refId\": \"B\",\n                      \"step\": 10\n                    },\n                    {\n                      \"expr\": \"sum (container_memory_working_set_bytes{rkt_container_name!=\\\"\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}) by (kubernetes_io_hostname, rkt_container_name)\",\n                      \"interval\": \"10s\",\n                      \"intervalFactor\": 1,\n                      \"legendFormat\": \"rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}\",\n                      \"metric\": \"container_memory_usage:sort_desc\",\n                      \"refId\": \"C\",\n                      \"step\": 10\n                    }\n                  ],\n                  \"thresholds\": [],\n                  \"timeFrom\": null,\n                  \"timeShift\": null,\n                  \"title\": \"Containers memory usage\",\n                  \"tooltip\": {\n                    \"msResolution\": false,\n                    \"shared\": true,\n                    \"sort\": 2,\n                    \"value_type\": \"cumulative\"\n                  },\n                  \"type\": \"graph\",\n                  \"xaxis\": {\n                    \"show\": true\n                  },\n                  \"yaxes\": [\n                    {\n                      \"format\": \"bytes\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    },\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": false\n                    }\n                  ]\n                }\n              ],\n              \"title\": \"Containers memory usage\",\n              \"type\": \"row\"\n            },\n            {\n              \"collapsed\": true,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 34\n              },\n              \"id\": 40,\n              \"panels\": [\n                {\n                  \"aliasColors\": {},\n                  \"bars\": false,\n                  \"datasource\": \"${DS_PROMETHEUS}\",\n                  \"decimals\": 2,\n                  \"editable\": true,\n                  \"error\": false,\n                  \"fill\": 0,\n                  \"grid\": {},\n                  \"gridPos\": {\n                    \"h\": 13,\n                    \"w\": 24,\n                    \"x\": 0,\n                    \"y\": 34\n                  },\n                  \"id\": 28,\n                  \"isNew\": true,\n                  \"legend\": {\n                    \"alignAsTable\": true,\n                    \"avg\": true,\n                    \"current\": true,\n                    \"max\": false,\n                    \"min\": false,\n                    \"rightSide\": false,\n                    \"show\": true,\n                    \"sideWidth\": 200,\n                    \"sort\": \"current\",\n                    \"sortDesc\": true,\n                    \"total\": false,\n                    \"values\": true\n                  },\n                  \"lines\": true,\n                  \"linewidth\": 2,\n                  \"links\": [],\n                  \"nullPointMode\": \"connected\",\n                  \"percentage\": false,\n                  \"pointradius\": 5,\n                  \"points\": false,\n                  \"renderer\": \"flot\",\n                  \"seriesOverrides\": [],\n                  \"stack\": false,\n                  \"steppedLine\": true,\n                  \"targets\": [\n                    {\n                      \"expr\": \"sum (container_memory_working_set_bytes{id!=\\\"/\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}) by (id)\",\n                      \"interval\": \"10s\",\n                      \"intervalFactor\": 1,\n                      \"legendFormat\": \"{{ id }}\",\n                      \"metric\": \"container_memory_usage:sort_desc\",\n                      \"refId\": \"A\",\n                      \"step\": 10\n                    }\n                  ],\n                  \"thresholds\": [],\n                  \"timeFrom\": null,\n                  \"timeShift\": null,\n                  \"title\": \"All processes memory usage\",\n                  \"tooltip\": {\n                    \"msResolution\": false,\n                    \"shared\": true,\n                    \"sort\": 2,\n                    \"value_type\": \"cumulative\"\n                  },\n                  \"type\": \"graph\",\n                  \"xaxis\": {\n                    \"show\": true\n                  },\n                  \"yaxes\": [\n                    {\n                      \"format\": \"bytes\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    },\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": false\n                    }\n                  ]\n                }\n              ],\n              \"title\": \"All processes memory usage\",\n              \"type\": \"row\"\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 35\n              },\n              \"id\": 41,\n              \"panels\": [],\n              \"title\": \"Pods network I/O\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 36\n              },\n              \"id\": 16,\n              \"isNew\": true,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": false,\n                \"min\": false,\n                \"rightSide\": true,\n                \"show\": true,\n                \"sideWidth\": 200,\n                \"sort\": \"current\",\n                \"sortDesc\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum (rate (container_network_receive_bytes_total{image!=\\\"\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}[5m])) by (pod)\",\n                  \"interval\": \"10s\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"-> {{ pod }}\",\n                  \"metric\": \"network\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                },\n                {\n                  \"expr\": \"- sum (rate (container_network_transmit_bytes_total{image!=\\\"\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}[5m])) by (pod)\",\n                  \"interval\": \"10s\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"<- {{ pod }}\",\n                  \"metric\": \"network\",\n                  \"refId\": \"B\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Pods network I/O (5m avg)\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"show\": true\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"Bps\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ]\n            },\n            {\n              \"collapsed\": true,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 43\n              },\n              \"id\": 42,\n              \"panels\": [\n                {\n                  \"aliasColors\": {},\n                  \"bars\": false,\n                  \"datasource\": \"${DS_PROMETHEUS}\",\n                  \"decimals\": 2,\n                  \"editable\": true,\n                  \"error\": false,\n                  \"fill\": 1,\n                  \"grid\": {},\n                  \"gridPos\": {\n                    \"h\": 7,\n                    \"w\": 24,\n                    \"x\": 0,\n                    \"y\": 43\n                  },\n                  \"id\": 30,\n                  \"isNew\": true,\n                  \"legend\": {\n                    \"alignAsTable\": true,\n                    \"avg\": true,\n                    \"current\": true,\n                    \"max\": false,\n                    \"min\": false,\n                    \"rightSide\": true,\n                    \"show\": true,\n                    \"sideWidth\": 200,\n                    \"sort\": \"current\",\n                    \"sortDesc\": true,\n                    \"total\": false,\n                    \"values\": true\n                  },\n                  \"lines\": true,\n                  \"linewidth\": 2,\n                  \"links\": [],\n                  \"nullPointMode\": \"connected\",\n                  \"percentage\": false,\n                  \"pointradius\": 5,\n                  \"points\": false,\n                  \"renderer\": \"flot\",\n                  \"seriesOverrides\": [],\n                  \"stack\": false,\n                  \"steppedLine\": false,\n                  \"targets\": [\n                    {\n                      \"expr\": \"sum (rate (container_network_receive_bytes_total{image!=\\\"\\\",name=~\\\"^k8s_.*\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}[5m])) by (container, pod)\",\n                      \"hide\": false,\n                      \"interval\": \"10s\",\n                      \"intervalFactor\": 1,\n                      \"legendFormat\": \"-> pod: {{ pod }} | {{ container }}\",\n                      \"metric\": \"network\",\n                      \"refId\": \"B\",\n                      \"step\": 10\n                    },\n                    {\n                      \"expr\": \"- sum (rate (container_network_transmit_bytes_total{image!=\\\"\\\",name=~\\\"^k8s_.*\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}[5m])) by (container, pod)\",\n                      \"hide\": false,\n                      \"interval\": \"10s\",\n                      \"intervalFactor\": 1,\n                      \"legendFormat\": \"<- pod: {{ pod }} | {{ container }}\",\n                      \"metric\": \"network\",\n                      \"refId\": \"D\",\n                      \"step\": 10\n                    },\n                    {\n                      \"expr\": \"sum (rate (container_network_receive_bytes_total{image!=\\\"\\\",name!~\\\"^k8s_.*\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}[5m])) by (kubernetes_io_hostname, name, image)\",\n                      \"hide\": false,\n                      \"interval\": \"10s\",\n                      \"intervalFactor\": 1,\n                      \"legendFormat\": \"-> docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})\",\n                      \"metric\": \"network\",\n                      \"refId\": \"A\",\n                      \"step\": 10\n                    },\n                    {\n                      \"expr\": \"- sum (rate (container_network_transmit_bytes_total{image!=\\\"\\\",name!~\\\"^k8s_.*\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}[5m])) by (kubernetes_io_hostname, name, image)\",\n                      \"hide\": false,\n                      \"interval\": \"10s\",\n                      \"intervalFactor\": 1,\n                      \"legendFormat\": \"<- docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})\",\n                      \"metric\": \"network\",\n                      \"refId\": \"C\",\n                      \"step\": 10\n                    },\n                    {\n                      \"expr\": \"sum (rate (container_network_transmit_bytes_total{rkt_container_name!=\\\"\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}[5m])) by (kubernetes_io_hostname, rkt_container_name)\",\n                      \"hide\": false,\n                      \"interval\": \"10s\",\n                      \"intervalFactor\": 1,\n                      \"legendFormat\": \"-> rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}\",\n                      \"metric\": \"network\",\n                      \"refId\": \"E\",\n                      \"step\": 10\n                    },\n                    {\n                      \"expr\": \"- sum (rate (container_network_transmit_bytes_total{rkt_container_name!=\\\"\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}[5m])) by (kubernetes_io_hostname, rkt_container_name)\",\n                      \"hide\": false,\n                      \"interval\": \"10s\",\n                      \"intervalFactor\": 1,\n                      \"legendFormat\": \"<- rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}\",\n                      \"metric\": \"network\",\n                      \"refId\": \"F\",\n                      \"step\": 10\n                    }\n                  ],\n                  \"thresholds\": [],\n                  \"timeFrom\": null,\n                  \"timeShift\": null,\n                  \"title\": \"Containers network I/O (5m avg)\",\n                  \"tooltip\": {\n                    \"msResolution\": false,\n                    \"shared\": true,\n                    \"sort\": 2,\n                    \"value_type\": \"cumulative\"\n                  },\n                  \"type\": \"graph\",\n                  \"xaxis\": {\n                    \"show\": true\n                  },\n                  \"yaxes\": [\n                    {\n                      \"format\": \"Bps\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    },\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": false\n                    }\n                  ]\n                }\n              ],\n              \"title\": \"Containers network I/O\",\n              \"type\": \"row\"\n            },\n            {\n              \"collapsed\": true,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 44\n              },\n              \"id\": 43,\n              \"panels\": [\n                {\n                  \"aliasColors\": {},\n                  \"bars\": false,\n                  \"datasource\": \"${DS_PROMETHEUS}\",\n                  \"decimals\": 2,\n                  \"editable\": true,\n                  \"error\": false,\n                  \"fill\": 1,\n                  \"grid\": {},\n                  \"gridPos\": {\n                    \"h\": 13,\n                    \"w\": 24,\n                    \"x\": 0,\n                    \"y\": 44\n                  },\n                  \"id\": 29,\n                  \"isNew\": true,\n                  \"legend\": {\n                    \"alignAsTable\": true,\n                    \"avg\": true,\n                    \"current\": true,\n                    \"max\": false,\n                    \"min\": false,\n                    \"rightSide\": false,\n                    \"show\": true,\n                    \"sideWidth\": 200,\n                    \"sort\": \"current\",\n                    \"sortDesc\": true,\n                    \"total\": false,\n                    \"values\": true\n                  },\n                  \"lines\": true,\n                  \"linewidth\": 2,\n                  \"links\": [],\n                  \"nullPointMode\": \"connected\",\n                  \"percentage\": false,\n                  \"pointradius\": 5,\n                  \"points\": false,\n                  \"renderer\": \"flot\",\n                  \"seriesOverrides\": [],\n                  \"stack\": false,\n                  \"steppedLine\": false,\n                  \"targets\": [\n                    {\n                      \"expr\": \"sum (rate (container_network_receive_bytes_total{id!=\\\"/\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}[5m])) by (id)\",\n                      \"interval\": \"10s\",\n                      \"intervalFactor\": 1,\n                      \"legendFormat\": \"-> {{ id }}\",\n                      \"metric\": \"network\",\n                      \"refId\": \"A\",\n                      \"step\": 10\n                    },\n                    {\n                      \"expr\": \"- sum (rate (container_network_transmit_bytes_total{id!=\\\"/\\\",kubernetes_io_hostname=~\\\"^$Node$\\\"}[5m])) by (id)\",\n                      \"interval\": \"10s\",\n                      \"intervalFactor\": 1,\n                      \"legendFormat\": \"<- {{ id }}\",\n                      \"metric\": \"network\",\n                      \"refId\": \"B\",\n                      \"step\": 10\n                    }\n                  ],\n                  \"thresholds\": [],\n                  \"timeFrom\": null,\n                  \"timeShift\": null,\n                  \"title\": \"All processes network I/O (5m avg)\",\n                  \"tooltip\": {\n                    \"msResolution\": false,\n                    \"shared\": true,\n                    \"sort\": 2,\n                    \"value_type\": \"cumulative\"\n                  },\n                  \"type\": \"graph\",\n                  \"xaxis\": {\n                    \"show\": true\n                  },\n                  \"yaxes\": [\n                    {\n                      \"format\": \"Bps\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    },\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": false\n                    }\n                  ]\n                }\n              ],\n              \"title\": \"All processes network I/O\",\n              \"type\": \"row\"\n            }\n          ],\n          \"refresh\": \"5m\",\n          \"schemaVersion\": 18,\n          \"style\": \"dark\",\n          \"tags\": [\n            \"kubernetes\"\n          ],\n          \"templating\": {\n            \"list\": [\n              {\n                \"current\": {\n                  \"text\": \"prometheus\",\n                  \"value\": \"prometheus\"\n                },\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Prometheus datasource\",\n                \"multi\": false,\n                \"name\": \"DS_PROMETHEUS\",\n                \"options\": [],\n                \"query\": \"prometheus\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"type\": \"datasource\"\n              },\n              {\n                \"allValue\": \".*\",\n                \"current\": {\n                  \"text\": \"All\",\n                  \"value\": \"$__all\"\n                },\n                \"datasource\": \"${DS_PROMETHEUS}\",\n                \"definition\": \"\",\n                \"hide\": 0,\n                \"includeAll\": true,\n                \"label\": null,\n                \"multi\": false,\n                \"name\": \"Node\",\n                \"options\": [],\n                \"query\": \"label_values(kubernetes_io_hostname)\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"sort\": 0,\n                \"tagValuesQuery\": \"\",\n                \"tags\": [],\n                \"tagsQuery\": \"\",\n                \"type\": \"query\",\n                \"useTags\": false\n              }\n            ]\n          },\n          \"time\": {\n            \"from\": \"now-5m\",\n            \"to\": \"now\"\n          },\n          \"timepicker\": {\n            \"refresh_intervals\": [\n              \"5s\",\n              \"10s\",\n              \"30s\",\n              \"1m\",\n              \"5m\",\n              \"15m\",\n              \"30m\",\n              \"1h\",\n              \"2h\",\n              \"1d\"\n            ],\n            \"time_options\": [\n              \"5m\",\n              \"15m\",\n              \"1h\",\n              \"6h\",\n              \"12h\",\n              \"24h\",\n              \"2d\",\n              \"7d\",\n              \"30d\"\n            ]\n          },\n          \"timezone\": \"browser\",\n          \"title\": \"Container Metrics (cAdvisor)\",\n          \"version\": 1\n        }\n...\n"
  },
  {
    "path": "values_overrides/grafana/coredns.yaml",
    "content": "# NOTE(srwilkers): This overrides file provides a reference for a dashboard for\n# CoreDNS\n---\nconf:\n  dashboards:\n    kubernetes:\n      coredns: |-\n        {\n          \"__inputs\": [\n            {\n              \"name\": \"DS_PROMETHEUS\",\n              \"label\": \"prometheus\",\n              \"description\": \"\",\n              \"type\": \"datasource\",\n              \"pluginId\": \"prometheus\",\n              \"pluginName\": \"Prometheus\"\n            }\n          ],\n          \"__requires\": [\n            {\n              \"type\": \"grafana\",\n              \"id\": \"grafana\",\n              \"name\": \"Grafana\",\n              \"version\": \"4.4.3\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"graph\",\n              \"name\": \"Graph\",\n              \"version\": \"\"\n            },\n            {\n              \"type\": \"datasource\",\n              \"id\": \"prometheus\",\n              \"name\": \"Prometheus\",\n              \"version\": \"1.0.0\"\n            }\n          ],\n          \"annotations\": {\n            \"list\": [\n              {\n                \"builtIn\": 1,\n                \"datasource\": \"-- Grafana --\",\n                \"enable\": true,\n                \"hide\": true,\n                \"iconColor\": \"rgba(0, 211, 255, 1)\",\n                \"name\": \"Annotations & Alerts\",\n                \"type\": \"dashboard\"\n              }\n            ]\n          },\n          \"description\": \"A dashboard for the CoreDNS DNS server.\",\n          \"overwrite\": true,\n          \"editable\": true,\n          \"gnetId\": 5926,\n          \"graphTooltip\": 0,\n          \"id\": 20,\n          \"links\": [],\n          \"panels\": [\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 8,\n                \"x\": 0,\n                \"y\": 0\n              },\n              \"id\": 1,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"total\",\n                  \"yaxis\": 2\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(rate(coredns_dns_request_count_total{instance=~\\\"$instance\\\"}[5m])) by (proto)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{proto}}\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"sum(rate(coredns_dns_request_count_total{instance=~\\\"$instance\\\"}[5m]))\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"total\",\n                  \"refId\": \"B\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Requests (total)\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"pps\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"pps\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 8,\n                \"x\": 8,\n                \"y\": 0\n              },\n              \"id\": 12,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"total\",\n                  \"yaxis\": 2\n                },\n                {\n                  \"alias\": \"other\",\n                  \"yaxis\": 2\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(rate(coredns_dns_request_type_count_total{instance=~\\\"$instance\\\"}[5m])) by (type)\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{type}}\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Requests (by qtype)\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"pps\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"pps\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 8,\n                \"x\": 16,\n                \"y\": 0\n              },\n              \"id\": 2,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"total\",\n                  \"yaxis\": 2\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(rate(coredns_dns_request_count_total{instance=~\\\"$instance\\\"}[5m])) by (zone)\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{zone}}\",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"sum(rate(coredns_dns_request_count_total{instance=~\\\"$instance\\\"}[5m]))\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"total\",\n                  \"refId\": \"B\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Requests (by zone)\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"pps\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"pps\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 7\n              },\n              \"id\": 10,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"total\",\n                  \"yaxis\": 2\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(rate(coredns_dns_request_do_count_total{instance=~\\\"$instance\\\"}[5m]))\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"DO\",\n                  \"refId\": \"A\",\n                  \"step\": 40\n                },\n                {\n                  \"expr\": \"sum(rate(coredns_dns_request_count_total{instance=~\\\"$instance\\\"}[5m]))\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"total\",\n                  \"refId\": \"B\",\n                  \"step\": 40\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Requests (DO bit)\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"pps\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"pps\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 6,\n                \"x\": 12,\n                \"y\": 7\n              },\n              \"id\": 9,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"tcp:90\",\n                  \"yaxis\": 2\n                },\n                {\n                  \"alias\": \"tcp:99 \",\n                  \"yaxis\": 2\n                },\n                {\n                  \"alias\": \"tcp:50\",\n                  \"yaxis\": 2\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"histogram_quantile(0.99, sum(rate(coredns_dns_request_size_bytes_bucket{instance=~\\\"$instance\\\",proto=\\\"udp\\\"}[5m])) by (le,proto))\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{proto}}:99 \",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"histogram_quantile(0.90, sum(rate(coredns_dns_request_size_bytes_bucket{instance=~\\\"$instance\\\",proto=\\\"udp\\\"}[5m])) by (le,proto))\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{proto}}:90\",\n                  \"refId\": \"B\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"histogram_quantile(0.50, sum(rate(coredns_dns_request_size_bytes_bucket{instance=~\\\"$instance\\\",proto=\\\"udp\\\"}[5m])) by (le,proto))\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{proto}}:50\",\n                  \"refId\": \"C\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Requests (size, udp)\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 6,\n                \"x\": 18,\n                \"y\": 7\n              },\n              \"id\": 14,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"tcp:90\",\n                  \"yaxis\": 1\n                },\n                {\n                  \"alias\": \"tcp:99 \",\n                  \"yaxis\": 1\n                },\n                {\n                  \"alias\": \"tcp:50\",\n                  \"yaxis\": 1\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"histogram_quantile(0.99, sum(rate(coredns_dns_request_size_bytes_bucket{instance=~\\\"$instance\\\",proto=\\\"tcp\\\"}[5m])) by (le,proto))\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{proto}}:99 \",\n                  \"refId\": \"A\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"histogram_quantile(0.90, sum(rate(coredns_dns_request_size_bytes_bucket{instance=~\\\"$instance\\\",proto=\\\"tcp\\\"}[5m])) by (le,proto))\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{proto}}:90\",\n                  \"refId\": \"B\",\n                  \"step\": 60\n                },\n                {\n                  \"expr\": \"histogram_quantile(0.50, sum(rate(coredns_dns_request_size_bytes_bucket{instance=~\\\"$instance\\\",proto=\\\"tcp\\\"}[5m])) by (le,proto))\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{proto}}:50\",\n                  \"refId\": \"C\",\n                  \"step\": 60\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Requests (size,tcp)\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 14\n              },\n              \"id\": 5,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(rate(coredns_dns_response_rcode_count_total{instance=~\\\"$instance\\\"}[5m])) by (rcode)\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{rcode}}\",\n                  \"refId\": \"A\",\n                  \"step\": 40\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Responses (by rcode)\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"pps\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 14\n              },\n              \"id\": 3,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"histogram_quantile(0.99, sum(rate(coredns_dns_request_duration_seconds_bucket{instance=~\\\"$instance\\\"}[5m])) by (le, job))\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"99%\",\n                  \"refId\": \"A\",\n                  \"step\": 40\n                },\n                {\n                  \"expr\": \"histogram_quantile(0.90, sum(rate(coredns_dns_request_duration_seconds_bucket{instance=~\\\"$instance\\\"}[5m])) by (le))\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"90%\",\n                  \"refId\": \"B\",\n                  \"step\": 40\n                },\n                {\n                  \"expr\": \"histogram_quantile(0.50, sum(rate(coredns_dns_request_duration_seconds_bucket{instance=~\\\"$instance\\\"}[5m])) by (le))\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"50%\",\n                  \"refId\": \"C\",\n                  \"step\": 40\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Responses (duration)\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"s\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 21\n              },\n              \"id\": 8,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"udp:50%\",\n                  \"yaxis\": 1\n                },\n                {\n                  \"alias\": \"tcp:50%\",\n                  \"yaxis\": 2\n                },\n                {\n                  \"alias\": \"tcp:90%\",\n                  \"yaxis\": 2\n                },\n                {\n                  \"alias\": \"tcp:99%\",\n                  \"yaxis\": 2\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"histogram_quantile(0.99, sum(rate(coredns_dns_response_size_bytes_bucket{instance=~\\\"$instance\\\",proto=\\\"udp\\\"}[5m])) by (le,proto)) \",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{proto}}:99%\",\n                  \"refId\": \"A\",\n                  \"step\": 40\n                },\n                {\n                  \"expr\": \"histogram_quantile(0.90, sum(rate(coredns_dns_response_size_bytes_bucket{instance=\\\"$instance\\\",proto=\\\"udp\\\"}[5m])) by (le,proto)) \",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{proto}}:90%\",\n                  \"refId\": \"B\",\n                  \"step\": 40\n                },\n                {\n                  \"expr\": \"histogram_quantile(0.50, sum(rate(coredns_dns_response_size_bytes_bucket{instance=~\\\"$instance\\\",proto=\\\"udp\\\"}[5m])) by (le,proto)) \",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{proto}}:50%\",\n                  \"metric\": \"\",\n                  \"refId\": \"C\",\n                  \"step\": 40\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Responses (size, udp)\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 21\n              },\n              \"id\": 13,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"udp:50%\",\n                  \"yaxis\": 1\n                },\n                {\n                  \"alias\": \"tcp:50%\",\n                  \"yaxis\": 1\n                },\n                {\n                  \"alias\": \"tcp:90%\",\n                  \"yaxis\": 1\n                },\n                {\n                  \"alias\": \"tcp:99%\",\n                  \"yaxis\": 1\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"histogram_quantile(0.99, sum(rate(coredns_dns_response_size_bytes_bucket{instance=~\\\"$instance\\\",proto=\\\"tcp\\\"}[5m])) by (le,proto)) \",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{proto}}:99%\",\n                  \"refId\": \"A\",\n                  \"step\": 40\n                },\n                {\n                  \"expr\": \"histogram_quantile(0.90, sum(rate(coredns_dns_response_size_bytes_bucket{instance=~\\\"$instance\\\",proto=\\\"tcp\\\"}[5m])) by (le,proto)) \",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{proto}}:90%\",\n                  \"refId\": \"B\",\n                  \"step\": 40\n                },\n                {\n                  \"expr\": \"histogram_quantile(0.50, sum(rate(coredns_dns_response_size_bytes_bucket{instance=~\\\"$instance\\\",proto=\\\"tcp\\\"}[5m])) by (le, proto)) \",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{proto}}:50%\",\n                  \"metric\": \"\",\n                  \"refId\": \"C\",\n                  \"step\": 40\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Responses (size, tcp)\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 28\n              },\n              \"id\": 15,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(coredns_cache_size{instance=~\\\"$instance\\\"}) by (type)\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{type}}\",\n                  \"refId\": \"A\",\n                  \"step\": 40\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Cache (size)\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 28\n              },\n              \"id\": 16,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"misses\",\n                  \"yaxis\": 2\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(rate(coredns_cache_hits_total{instance=~\\\"$instance\\\"}[5m])) by (type)\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"hits:{{type}}\",\n                  \"refId\": \"A\",\n                  \"step\": 40\n                },\n                {\n                  \"expr\": \"sum(rate(coredns_cache_misses_total{instance=~\\\"$instance\\\"}[5m])) by (type)\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"misses\",\n                  \"refId\": \"B\",\n                  \"step\": 40\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Cache (hitrate)\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"pps\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"pps\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            }\n          ],\n          \"schemaVersion\": 18,\n          \"style\": \"dark\",\n          \"tags\": [\n            \"dns\",\n            \"coredns\"\n          ],\n          \"templating\": {\n            \"list\": [\n              {\n                \"current\": {\n                  \"text\": \"prometheus\",\n                  \"value\": \"prometheus\"\n                },\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Prometheus datasource\",\n                \"multi\": false,\n                \"name\": \"DS_PROMETHEUS\",\n                \"options\": [],\n                \"query\": \"prometheus\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"type\": \"datasource\"\n              },\n              {\n                \"allValue\": \".*\",\n                \"current\": {\n                  \"text\": \"All\",\n                  \"value\": \"$__all\"\n                },\n                \"datasource\": \"${DS_PROMETHEUS}\",\n                \"definition\": \"\",\n                \"hide\": 0,\n                \"includeAll\": true,\n                \"label\": \"Instance\",\n                \"multi\": false,\n                \"name\": \"instance\",\n                \"options\": [],\n                \"query\": \"up{job=\\\"coredns\\\"}\",\n                \"refresh\": 1,\n                \"regex\": \".*instance=\\\"(.*?)\\\".*\",\n                \"skipUrlSync\": false,\n                \"sort\": 0,\n                \"tagValuesQuery\": \"\",\n                \"tags\": [],\n                \"tagsQuery\": \"\",\n                \"type\": \"query\",\n                \"useTags\": false\n              }\n            ]\n          },\n          \"time\": {\n            \"from\": \"now-1h\",\n            \"to\": \"now\"\n          },\n          \"timepicker\": {\n            \"now\": true,\n            \"refresh_intervals\": [\n              \"5s\",\n              \"10s\",\n              \"30s\",\n              \"1m\",\n              \"5m\",\n              \"15m\",\n              \"30m\",\n              \"1h\",\n              \"2h\",\n              \"1d\"\n            ],\n            \"time_options\": [\n              \"5m\",\n              \"15m\",\n              \"1h\",\n              \"6h\",\n              \"12h\",\n              \"24h\",\n              \"2d\",\n              \"7d\",\n              \"30d\"\n            ]\n          },\n          \"timezone\": \"browser\",\n          \"title\": \"CoreDNS\",\n          \"version\": 1\n        }\n...\n"
  },
  {
    "path": "values_overrides/grafana/elasticsearch.yaml",
    "content": "# NOTE(srwilkers): This overrides file provides a reference for a dashboard for\n# an Elasticsearch cluster\n---\nconf:\n  dashboards:\n    lma:\n      elasticsearch: |-\n        {\n          \"__inputs\": [\n            {\n              \"name\": \"DS_PROMETHEUS\",\n              \"label\": \"prometheus\",\n              \"description\": \"\",\n              \"type\": \"datasource\",\n              \"pluginId\": \"prometheus\",\n              \"pluginName\": \"Prometheus\"\n            }\n          ],\n          \"__requires\": [\n            {\n              \"type\": \"grafana\",\n              \"id\": \"grafana\",\n              \"name\": \"Grafana\",\n              \"version\": \"4.6.3\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"graph\",\n              \"name\": \"Graph\",\n              \"version\": \"\"\n            },\n            {\n              \"type\": \"datasource\",\n              \"id\": \"prometheus\",\n              \"name\": \"Prometheus\",\n              \"version\": \"1.0.0\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"singlestat\",\n              \"name\": \"Singlestat\",\n              \"version\": \"\"\n            }\n          ],\n          \"annotations\": {\n            \"list\": [\n              {\n                \"builtIn\": 1,\n                \"datasource\": \"-- Grafana --\",\n                \"enable\": true,\n                \"hide\": true,\n                \"iconColor\": \"rgba(0, 211, 255, 1)\",\n                \"name\": \"Annotations & Alerts\",\n                \"type\": \"dashboard\"\n              }\n            ]\n          },\n          \"description\": \"Elasticsearch detailed dashboard\",\n          \"overwrite\": true,\n          \"editable\": true,\n          \"gnetId\": 4358,\n          \"graphTooltip\": 1,\n          \"id\": 23,\n          \"links\": [],\n          \"panels\": [\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 0\n              },\n              \"id\": 50,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Cluster\",\n              \"type\": \"row\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": true,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(178, 49, 13, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 10,\n                \"x\": 0,\n                \"y\": 1\n              },\n              \"height\": \"50\",\n              \"id\": 8,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": true,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": true\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"(sum(elasticsearch_cluster_health_status{cluster=~\\\"$cluster\\\",color=\\\"green\\\"})*2)+sum(elasticsearch_cluster_health_status{cluster=~\\\"$cluster\\\",color=\\\"yellow\\\"})\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 3,\n                  \"legendFormat\": \"\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 40\n                }\n              ],\n              \"thresholds\": \"0,1,2\",\n              \"title\": \"Cluster health status\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"GREEN\",\n                  \"value\": \"2\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"YELLOW\",\n                  \"value\": \"1\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"RED\",\n                  \"value\": \"0\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 4,\n                \"x\": 10,\n                \"y\": 1\n              },\n              \"height\": \"50\",\n              \"id\": 10,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"sum(elasticsearch_cluster_health_number_of_nodes{cluster=~\\\"$cluster\\\"})\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 40\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"Nodes\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 4,\n                \"x\": 14,\n                \"y\": 1\n              },\n              \"height\": \"50\",\n              \"id\": 9,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"elasticsearch_cluster_health_number_of_data_nodes{cluster=\\\"$cluster\\\"}\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 40\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"Data nodes\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 6,\n                \"x\": 18,\n                \"y\": 1\n              },\n              \"height\": \"50\",\n              \"hideTimeOverride\": true,\n              \"id\": 16,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": true\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"elasticsearch_cluster_health_number_of_pending_tasks{cluster=\\\"$cluster\\\"}\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 40\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"Pending tasks\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 4\n              },\n              \"id\": 51,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Shards\",\n              \"type\": \"row\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 4,\n                \"x\": 0,\n                \"y\": 5\n              },\n              \"height\": \"50\",\n              \"id\": 11,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"maxPerRow\": 6,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"repeat\": \"shard_type\",\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": true,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": true\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"elasticsearch_cluster_health_active_primary_shards{cluster=\\\"$cluster\\\"}\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 40\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"active primary shards\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 4,\n                \"x\": 4,\n                \"y\": 5\n              },\n              \"height\": \"50\",\n              \"id\": 39,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"maxPerRow\": 6,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": true,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": true\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"elasticsearch_cluster_health_active_shards{cluster=\\\"$cluster\\\"}\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 40\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"active shards\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 4,\n                \"x\": 8,\n                \"y\": 5\n              },\n              \"height\": \"50\",\n              \"id\": 40,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"maxPerRow\": 6,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": true,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": true\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"elasticsearch_cluster_health_initializing_shards{cluster=\\\"$cluster\\\"}\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 40\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"initializing shards\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 4,\n                \"x\": 12,\n                \"y\": 5\n              },\n              \"height\": \"50\",\n              \"id\": 41,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"maxPerRow\": 6,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": true,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": true\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"elasticsearch_cluster_health_relocating_shards{cluster=\\\"$cluster\\\"}\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 40\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"relocating shards\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 4,\n                \"x\": 16,\n                \"y\": 5\n              },\n              \"height\": \"50\",\n              \"id\": 42,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"maxPerRow\": 6,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": true,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": true\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"elasticsearch_cluster_health_unassigned_shards{cluster=\\\"$cluster\\\"}\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 40\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"unassigned shards\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 8\n              },\n              \"id\": 52,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"System\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 10,\n                \"w\": 6,\n                \"x\": 0,\n                \"y\": 9\n              },\n              \"height\": \"400\",\n              \"id\": 30,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"hideEmpty\": false,\n                \"hideZero\": false,\n                \"max\": true,\n                \"min\": true,\n                \"rightSide\": false,\n                \"show\": true,\n                \"sortDesc\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"elasticsearch_process_cpu_percent{cluster=\\\"$cluster\\\",es_master_node=\\\"true\\\",name=~\\\"$node\\\"}\",\n                  \"format\": \"time_series\",\n                  \"instant\": false,\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ name }} -  master\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                },\n                {\n                  \"expr\": \"elasticsearch_process_cpu_percent{cluster=\\\"$cluster\\\",es_data_node=\\\"true\\\",name=~\\\"$node\\\"}\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ name }} -  data\",\n                  \"metric\": \"\",\n                  \"refId\": \"B\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"CPU usage\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"percent\",\n                  \"label\": \"CPU usage\",\n                  \"logBase\": 1,\n                  \"max\": 100,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 0,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 10,\n                \"w\": 6,\n                \"x\": 6,\n                \"y\": 9\n              },\n              \"height\": \"400\",\n              \"id\": 31,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"hideEmpty\": false,\n                \"hideZero\": false,\n                \"max\": true,\n                \"min\": true,\n                \"rightSide\": false,\n                \"show\": true,\n                \"sortDesc\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"elasticsearch_jvm_memory_used_bytes{cluster=\\\"$cluster\\\",name=~\\\"$node\\\",name=~\\\"$node\\\"}\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{  name }}  - used: {{area}}\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                },\n                {\n                  \"expr\": \"elasticsearch_jvm_memory_committed_bytes{cluster=\\\"$cluster\\\",name=~\\\"$node\\\",name=~\\\"$node\\\"}\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{  name }}  - committed: {{area}}\",\n                  \"refId\": \"B\",\n                  \"step\": 10\n                },\n                {\n                  \"expr\": \"elasticsearch_jvm_memory_max_bytes{cluster=\\\"$cluster\\\",name=~\\\"$node\\\",name=~\\\"$node\\\"}\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{  name }}  - max: {{area}}\",\n                  \"refId\": \"C\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"JVM memory usage\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"label\": \"Memory\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 10,\n                \"w\": 6,\n                \"x\": 12,\n                \"y\": 9\n              },\n              \"height\": \"400\",\n              \"id\": 32,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"hideEmpty\": false,\n                \"hideZero\": false,\n                \"max\": true,\n                \"min\": true,\n                \"rightSide\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"1-(elasticsearch_filesystem_data_available_bytes{cluster=\\\"$cluster\\\"}/elasticsearch_filesystem_data_size_bytes{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"})\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ name }} - {{path}}\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [\n                {\n                  \"colorMode\": \"custom\",\n                  \"fill\": true,\n                  \"fillColor\": \"rgba(216, 200, 27, 0.27)\",\n                  \"op\": \"gt\",\n                  \"value\": 0.8\n                },\n                {\n                  \"colorMode\": \"custom\",\n                  \"fill\": true,\n                  \"fillColor\": \"rgba(234, 112, 112, 0.22)\",\n                  \"op\": \"gt\",\n                  \"value\": 0.9\n                }\n              ],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Disk usage\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"percentunit\",\n                  \"label\": \"Disk Usage %\",\n                  \"logBase\": 1,\n                  \"max\": 1,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 10,\n                \"w\": 6,\n                \"x\": 18,\n                \"y\": 9\n              },\n              \"height\": \"400\",\n              \"id\": 47,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"hideEmpty\": false,\n                \"hideZero\": false,\n                \"max\": true,\n                \"min\": true,\n                \"rightSide\": false,\n                \"show\": true,\n                \"sort\": \"max\",\n                \"sortDesc\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"sent\",\n                  \"transform\": \"negative-Y\"\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"irate(elasticsearch_transport_tx_size_bytes_total{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ name }} -sent\",\n                  \"refId\": \"D\",\n                  \"step\": 10\n                },\n                {\n                  \"expr\": \"irate(elasticsearch_transport_rx_size_bytes_total{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ name }} -received\",\n                  \"refId\": \"C\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Network usage\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"Bps\",\n                  \"label\": \"Bytes/sec\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"pps\",\n                  \"label\": \"\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 19\n              },\n              \"id\": 53,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Documents\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 10,\n                \"w\": 6,\n                \"x\": 0,\n                \"y\": 20\n              },\n              \"height\": \"400\",\n              \"id\": 1,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"hideEmpty\": false,\n                \"hideZero\": false,\n                \"max\": true,\n                \"min\": true,\n                \"rightSide\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"elasticsearch_indices_docs{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ name }}\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Documents count\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": \"Documents\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 10,\n                \"w\": 6,\n                \"x\": 6,\n                \"y\": 20\n              },\n              \"height\": \"400\",\n              \"id\": 24,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"hideEmpty\": false,\n                \"hideZero\": false,\n                \"max\": true,\n                \"min\": true,\n                \"rightSide\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"irate(elasticsearch_indices_indexing_index_total{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{name}}\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Documents indexed rate\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": \"index calls/s\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 10,\n                \"w\": 6,\n                \"x\": 12,\n                \"y\": 20\n              },\n              \"height\": \"400\",\n              \"id\": 25,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"hideEmpty\": false,\n                \"hideZero\": false,\n                \"max\": true,\n                \"min\": true,\n                \"rightSide\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"rate(elasticsearch_indices_docs_deleted{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{name}}\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Documents deleted rate\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": \"Documents/s\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 10,\n                \"w\": 6,\n                \"x\": 18,\n                \"y\": 20\n              },\n              \"height\": \"400\",\n              \"id\": 26,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"hideEmpty\": false,\n                \"hideZero\": false,\n                \"max\": true,\n                \"min\": true,\n                \"rightSide\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"rate(elasticsearch_indices_merges_total{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{name}}\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Documents merged rate\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": \"Documents/s\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 30\n              },\n              \"id\": 54,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Total Operations stats\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 10,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 31\n              },\n              \"height\": \"400\",\n              \"id\": 48,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"hideEmpty\": false,\n                \"hideZero\": false,\n                \"max\": true,\n                \"min\": true,\n                \"rightSide\": false,\n                \"show\": true,\n                \"sort\": \"avg\",\n                \"sortDesc\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"irate(elasticsearch_indices_indexing_index_total{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ name }} - indexing\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 4\n                },\n                {\n                  \"expr\": \"irate(elasticsearch_indices_search_query_total{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ name }} - query\",\n                  \"refId\": \"B\",\n                  \"step\": 4\n                },\n                {\n                  \"expr\": \"irate(elasticsearch_indices_search_fetch_total{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ name }} - fetch\",\n                  \"refId\": \"C\",\n                  \"step\": 4\n                },\n                {\n                  \"expr\": \"irate(elasticsearch_indices_merges_total{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ name }} - merges\",\n                  \"refId\": \"D\",\n                  \"step\": 4\n                },\n                {\n                  \"expr\": \"irate(elasticsearch_indices_refresh_total{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ name }} - refresh\",\n                  \"refId\": \"E\",\n                  \"step\": 4\n                },\n                {\n                  \"expr\": \"irate(elasticsearch_indices_flush_total{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ name }} - flush\",\n                  \"refId\": \"F\",\n                  \"step\": 4\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Total Operations  rate\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": \"Operations/s\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 10,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 31\n              },\n              \"height\": \"400\",\n              \"id\": 49,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"hideEmpty\": false,\n                \"hideZero\": false,\n                \"max\": true,\n                \"min\": true,\n                \"rightSide\": false,\n                \"show\": true,\n                \"sort\": \"avg\",\n                \"sortDesc\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"irate(elasticsearch_indices_indexing_index_time_seconds_total{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ name }} - indexing\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 4\n                },\n                {\n                  \"expr\": \"irate(elasticsearch_indices_search_query_time_ms_total{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ name }} - query\",\n                  \"refId\": \"B\",\n                  \"step\": 4\n                },\n                {\n                  \"expr\": \"irate(elasticsearch_indices_search_fetch_time_ms_total{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ name }} - fetch\",\n                  \"refId\": \"C\",\n                  \"step\": 4\n                },\n                {\n                  \"expr\": \"irate(elasticsearch_indices_merges_total_time_ms_total{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ name }} - merges\",\n                  \"refId\": \"D\",\n                  \"step\": 4\n                },\n                {\n                  \"expr\": \"irate(elasticsearch_indices_refresh_total_time_ms_total{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ name }} - refresh\",\n                  \"refId\": \"E\",\n                  \"step\": 4\n                },\n                {\n                  \"expr\": \"irate(elasticsearch_indices_flush_time_ms_total{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ name }} - flush\",\n                  \"refId\": \"F\",\n                  \"step\": 4\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Total Operations  time\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"ms\",\n                  \"label\": \"Time\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 41\n              },\n              \"id\": 55,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Times\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 10,\n                \"w\": 8,\n                \"x\": 0,\n                \"y\": 42\n              },\n              \"height\": \"400\",\n              \"id\": 33,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"hideEmpty\": false,\n                \"hideZero\": false,\n                \"max\": true,\n                \"min\": true,\n                \"rightSide\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"rate(elasticsearch_indices_search_query_time_seconds{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval]) \",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{name}}\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 4\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Query time\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"ms\",\n                  \"label\": \"Time\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 10,\n                \"w\": 8,\n                \"x\": 8,\n                \"y\": 42\n              },\n              \"height\": \"400\",\n              \"id\": 5,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"hideEmpty\": false,\n                \"hideZero\": false,\n                \"max\": true,\n                \"min\": true,\n                \"rightSide\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"rate(elasticsearch_indices_indexing_index_time_seconds_total{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{name}}\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 4\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Indexing time\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"ms\",\n                  \"label\": \"Time\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 10,\n                \"w\": 8,\n                \"x\": 16,\n                \"y\": 42\n              },\n              \"height\": \"400\",\n              \"id\": 3,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"hideEmpty\": false,\n                \"hideZero\": false,\n                \"max\": true,\n                \"min\": true,\n                \"rightSide\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"rate(elasticsearch_indices_merges_total_time_seconds_total{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{name}}\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 4\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Merging time\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"s\",\n                  \"label\": \"Time\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ]\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 52\n              },\n              \"id\": 56,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Caches\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 10,\n                \"w\": 6,\n                \"x\": 0,\n                \"y\": 53\n              },\n              \"height\": \"400\",\n              \"id\": 4,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"hideEmpty\": false,\n                \"hideZero\": false,\n                \"max\": true,\n                \"min\": true,\n                \"rightSide\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"elasticsearch_indices_fielddata_memory_size_bytes{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{name}}\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Field data memory size\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"label\": \"Memory\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 10,\n                \"w\": 6,\n                \"x\": 6,\n                \"y\": 53\n              },\n              \"height\": \"400\",\n              \"id\": 34,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"hideEmpty\": false,\n                \"hideZero\": false,\n                \"max\": true,\n                \"min\": true,\n                \"rightSide\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"rate(elasticsearch_indices_fielddata_evictions{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{name}}\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Field data evictions\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": \"Evictions/s\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 10,\n                \"w\": 6,\n                \"x\": 12,\n                \"y\": 53\n              },\n              \"height\": \"400\",\n              \"id\": 35,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"hideEmpty\": false,\n                \"hideZero\": false,\n                \"max\": true,\n                \"min\": true,\n                \"rightSide\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"elasticsearch_indices_query_cache_memory_size_bytes{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{name}}\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Query cache size\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"label\": \"Size\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 10,\n                \"w\": 6,\n                \"x\": 18,\n                \"y\": 53\n              },\n              \"height\": \"400\",\n              \"id\": 36,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"hideEmpty\": false,\n                \"hideZero\": false,\n                \"max\": true,\n                \"min\": true,\n                \"rightSide\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"rate(elasticsearch_indices_query_cache_evictions{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{name}}\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Query cache evictions\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": \"Evictions/s\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ]\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 63\n              },\n              \"id\": 57,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Thread Pool\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 19,\n                \"w\": 6,\n                \"x\": 0,\n                \"y\": 64\n              },\n              \"id\": 45,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": false,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"sort\": \"avg\",\n                \"sortDesc\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \" irate(elasticsearch_thread_pool_rejected_count{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{name}} - {{ type }}\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Thread Pool operations rejected\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 19,\n                \"w\": 6,\n                \"x\": 6,\n                \"y\": 64\n              },\n              \"id\": 46,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": false,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"sort\": \"avg\",\n                \"sortDesc\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"elasticsearch_thread_pool_active_count{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{name}} - {{ type }}\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Thread Pool operations queued\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 19,\n                \"w\": 6,\n                \"x\": 12,\n                \"y\": 64\n              },\n              \"height\": \"\",\n              \"id\": 43,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": false,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"sort\": \"avg\",\n                \"sortDesc\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"elasticsearch_thread_pool_active_count{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{name}} - {{ type }}\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Thread Pool threads active\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 19,\n                \"w\": 6,\n                \"x\": 18,\n                \"y\": 64\n              },\n              \"id\": 44,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": false,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"sort\": \"avg\",\n                \"sortDesc\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"irate(elasticsearch_thread_pool_completed_count{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{name}} - {{ type }}\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Thread Pool operations completed\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 83\n              },\n              \"id\": 58,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"JVM Garbage Collection\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 10,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 84\n              },\n              \"height\": \"400\",\n              \"id\": 7,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"hideEmpty\": false,\n                \"hideZero\": false,\n                \"max\": true,\n                \"min\": true,\n                \"rightSide\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"rate(elasticsearch_jvm_gc_collection_seconds_count{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{name}} - {{gc}}\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 4\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"GC count\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": \"GCs\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 10,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 84\n              },\n              \"height\": \"400\",\n              \"id\": 27,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"hideEmpty\": false,\n                \"hideZero\": false,\n                \"max\": true,\n                \"min\": true,\n                \"rightSide\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"rate(elasticsearch_jvm_gc_collection_seconds_count{cluster=\\\"$cluster\\\",name=~\\\"$node\\\"}[$interval])\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{name}} - {{gc}}\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 4\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"GC time\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"s\",\n                  \"label\": \"Time\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ]\n            }\n          ],\n          \"refresh\": \"5m\",\n          \"schemaVersion\": 18,\n          \"style\": \"dark\",\n          \"tags\": [\n            \"elasticsearch\",\n            \"App\"\n          ],\n          \"templating\": {\n            \"list\": [\n              {\n                \"auto\": true,\n                \"auto_count\": 30,\n                \"auto_min\": \"10s\",\n                \"current\": {\n                  \"text\": \"auto\",\n                  \"value\": \"$__auto_interval_interval\"\n                },\n                \"hide\": 0,\n                \"label\": \"Interval\",\n                \"name\": \"interval\",\n                \"options\": [\n                  {\n                    \"selected\": true,\n                    \"text\": \"auto\",\n                    \"value\": \"$__auto_interval_interval\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"1m\",\n                    \"value\": \"1m\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"10m\",\n                    \"value\": \"10m\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"30m\",\n                    \"value\": \"30m\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"1h\",\n                    \"value\": \"1h\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"6h\",\n                    \"value\": \"6h\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"12h\",\n                    \"value\": \"12h\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"1d\",\n                    \"value\": \"1d\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"7d\",\n                    \"value\": \"7d\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"14d\",\n                    \"value\": \"14d\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"30d\",\n                    \"value\": \"30d\"\n                  }\n                ],\n                \"query\": \"1m,10m,30m,1h,6h,12h,1d,7d,14d,30d\",\n                \"refresh\": 2,\n                \"skipUrlSync\": false,\n                \"type\": \"interval\"\n              },\n              {\n                \"current\": {\n                  \"text\": \"prometheus\",\n                  \"value\": \"prometheus\"\n                },\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Prometheus datasource\",\n                \"multi\": false,\n                \"name\": \"DS_PROMETHEUS\",\n                \"options\": [],\n                \"query\": \"prometheus\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"type\": \"datasource\"\n              },\n              {\n                \"allValue\": null,\n                \"current\": {},\n                \"datasource\": \"${DS_PROMETHEUS}\",\n                \"definition\": \"\",\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Instance\",\n                \"multi\": false,\n                \"name\": \"cluster\",\n                \"options\": [],\n                \"query\": \"label_values(elasticsearch_cluster_health_status,cluster)\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"sort\": 1,\n                \"tagValuesQuery\": null,\n                \"tags\": [],\n                \"tagsQuery\": null,\n                \"type\": \"query\",\n                \"useTags\": false\n              },\n              {\n                \"allValue\": null,\n                \"current\": {\n                  \"text\": \"All\",\n                  \"value\": \"$__all\"\n                },\n                \"datasource\": \"${DS_PROMETHEUS}\",\n                \"definition\": \"\",\n                \"hide\": 0,\n                \"includeAll\": true,\n                \"label\": \"node\",\n                \"multi\": true,\n                \"name\": \"node\",\n                \"options\": [],\n                \"query\": \"label_values(elasticsearch_process_cpu_percent,name)\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"sort\": 1,\n                \"tagValuesQuery\": null,\n                \"tags\": [],\n                \"tagsQuery\": null,\n                \"type\": \"query\",\n                \"useTags\": false\n              }\n            ]\n          },\n          \"time\": {\n            \"from\": \"now-1h\",\n            \"to\": \"now\"\n          },\n          \"timepicker\": {\n            \"refresh_intervals\": [\n              \"5s\",\n              \"10s\",\n              \"30s\",\n              \"1m\",\n              \"5m\",\n              \"15m\",\n              \"30m\",\n              \"1h\",\n              \"2h\",\n              \"1d\"\n            ],\n            \"time_options\": [\n              \"5m\",\n              \"15m\",\n              \"1h\",\n              \"6h\",\n              \"12h\",\n              \"24h\",\n              \"2d\",\n              \"7d\",\n              \"30d\"\n            ]\n          },\n          \"timezone\": \"browser\",\n          \"title\": \"Elasticsearch\",\n          \"version\": 1\n        }\n...\n"
  },
  {
    "path": "values_overrides/grafana/gateway.yaml",
    "content": "# Gateway API overrides for Grafana.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  grafana:\n    host_fqdn_override:\n      public:\n        host: grafana.openstack-helm.org\n\nmanifests:\n  ingress: false\n  service_ingress: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: grafana-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.grafana.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: grafana-dashboard\n              port: 3000\n...\n"
  },
  {
    "path": "values_overrides/grafana/home_dashboard.yaml",
    "content": "# This override file provides a reference for dashboards for\n# customized OSH Welcome Page\n---\nconf:\n  dashboards:\n    home:\n      home_dashboard: |-\n        {\n          \"annotations\": {\n            \"list\": [\n              {\n                \"builtIn\": 1,\n                \"datasource\": \"-- Grafana --\",\n                \"enable\": true,\n                \"hide\": true,\n                \"iconColor\": \"rgba(0, 211, 255, 1)\",\n                \"name\": \"Annotations & Alerts\",\n                \"type\": \"dashboard\"\n              }\n            ]\n          },\n          \"editable\": true,\n          \"gnetId\": null,\n          \"graphTooltip\": 0,\n          \"id\": 66,\n          \"links\": [],\n          \"panels\": [\n            {\n              \"content\": \"<div class=\\\"text-center dashboard-header\\\">\\n  <span>OSH Home Dashboard</span>\\n</div>\",\n              \"editable\": true,\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 0\n              },\n              \"id\": 1,\n              \"links\": [],\n              \"mode\": \"html\",\n              \"options\": {},\n              \"style\": {},\n              \"title\": \"\",\n              \"transparent\": true,\n              \"type\": \"text\"\n            },\n            {\n              \"folderId\": 0,\n              \"gridPos\": {\n                \"h\": 10,\n                \"w\": 13,\n                \"x\": 6,\n                \"y\": 3\n              },\n              \"headings\": true,\n              \"id\": 3,\n              \"limit\": 30,\n              \"links\": [],\n              \"options\": {},\n              \"query\": \"\",\n              \"recent\": true,\n              \"search\": false,\n              \"starred\": true,\n              \"tags\": [],\n              \"title\": \"\",\n              \"type\": \"dashlist\"\n            }\n          ],\n          \"schemaVersion\": 18,\n          \"style\": \"dark\",\n          \"tags\": [],\n          \"templating\": {\n            \"list\": []\n          },\n          \"time\": {\n            \"from\": \"now-1h\",\n            \"to\": \"now\"\n          },\n          \"timepicker\": {\n            \"hidden\": true,\n            \"refresh_intervals\": [\n              \"5s\",\n              \"10s\",\n              \"30s\",\n              \"1m\",\n              \"5m\",\n              \"15m\",\n              \"30m\",\n              \"1h\",\n              \"2h\",\n              \"1d\"\n            ],\n            \"time_options\": [\n              \"5m\",\n              \"15m\",\n              \"1h\",\n              \"6h\",\n              \"12h\",\n              \"24h\",\n              \"2d\",\n              \"7d\",\n              \"30d\"\n            ],\n            \"type\": \"timepicker\"\n          },\n          \"timezone\": \"browser\",\n          \"title\": \"OSH Home\",\n          \"version\": 1\n        }\n...\n"
  },
  {
    "path": "values_overrides/grafana/kubernetes.yaml",
    "content": "# NOTE(srwilkers): This overrides file provides a reference for dashboards that\n# reflect the overall state of a Kubernetes deployment\n---\nconf:\n  dashboards:\n    kubernetes:\n      kubernetes_capacity_planning: |-\n        {\n          \"__inputs\": [\n            {\n              \"name\": \"DS_PROMETHEUS\",\n              \"label\": \"prometheus\",\n              \"description\": \"\",\n              \"type\": \"datasource\",\n              \"pluginId\": \"prometheus\",\n              \"pluginName\": \"Prometheus\"\n            }\n          ],\n          \"__requires\": [\n            {\n              \"type\": \"grafana\",\n              \"id\": \"grafana\",\n              \"name\": \"Grafana\",\n              \"version\": \"4.4.1\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"graph\",\n              \"name\": \"Graph\",\n              \"version\": \"\"\n            },\n            {\n              \"type\": \"datasource\",\n              \"id\": \"prometheus\",\n              \"name\": \"Prometheus\",\n              \"version\": \"1.0.0\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"singlestat\",\n              \"name\": \"Singlestat\",\n              \"version\": \"\"\n            }\n          ],\n          \"annotations\": {\n            \"list\": [\n              {\n                \"builtIn\": 1,\n                \"datasource\": \"-- Grafana --\",\n                \"enable\": true,\n                \"hide\": true,\n                \"iconColor\": \"rgba(0, 211, 255, 1)\",\n                \"name\": \"Annotations & Alerts\",\n                \"type\": \"dashboard\"\n              }\n            ]\n          },\n          \"description\": \"\",\n          \"overwrite\": true,\n          \"editable\": false,\n          \"gnetId\": 22,\n          \"graphTooltip\": 0,\n          \"id\": 35,\n          \"links\": [],\n          \"panels\": [\n            {\n              \"alerting\": {},\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 0\n              },\n              \"id\": 3,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(rate(node_cpu{mode=\\\"idle\\\"}[2m])) * 100\",\n                  \"hide\": false,\n                  \"intervalFactor\": 10,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 50\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Idle cpu\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"percent\",\n                  \"label\": \"cpu usage\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"alerting\": {},\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 0\n              },\n              \"id\": 9,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(node_load1)\",\n                  \"intervalFactor\": 4,\n                  \"legendFormat\": \"load 1m\",\n                  \"refId\": \"A\",\n                  \"step\": 20,\n                  \"target\": \"\"\n                },\n                {\n                  \"expr\": \"sum(node_load5)\",\n                  \"intervalFactor\": 4,\n                  \"legendFormat\": \"load 5m\",\n                  \"refId\": \"B\",\n                  \"step\": 20,\n                  \"target\": \"\"\n                },\n                {\n                  \"expr\": \"sum(node_load15)\",\n                  \"intervalFactor\": 4,\n                  \"legendFormat\": \"load 15m\",\n                  \"refId\": \"C\",\n                  \"step\": 20,\n                  \"target\": \"\"\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"System load\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"percentunit\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"alerting\": {},\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 18,\n                \"x\": 0,\n                \"y\": 7\n              },\n              \"id\": 4,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"node_memory_SwapFree{instance=\\\"172.17.0.1:9100\\\",job=\\\"prometheus\\\"}\",\n                  \"yaxis\": 2\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(node_memory_MemTotal) - sum(node_memory_MemFree) - sum(node_memory_Buffers) - sum(node_memory_Cached)\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"memory usage\",\n                  \"metric\": \"memo\",\n                  \"refId\": \"A\",\n                  \"step\": 10,\n                  \"target\": \"\"\n                },\n                {\n                  \"expr\": \"sum(node_memory_Buffers)\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"memory buffers\",\n                  \"metric\": \"memo\",\n                  \"refId\": \"B\",\n                  \"step\": 10,\n                  \"target\": \"\"\n                },\n                {\n                  \"expr\": \"sum(node_memory_Cached)\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"memory cached\",\n                  \"metric\": \"memo\",\n                  \"refId\": \"C\",\n                  \"step\": 10,\n                  \"target\": \"\"\n                },\n                {\n                  \"expr\": \"sum(node_memory_MemFree)\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"memory free\",\n                  \"metric\": \"memo\",\n                  \"refId\": \"D\",\n                  \"step\": 10,\n                  \"target\": \"\"\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Memory usage\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": \"0\",\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"percent\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": true,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 6,\n                \"x\": 18,\n                \"y\": 7\n              },\n              \"id\": 5,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"((sum(node_memory_MemTotal) - sum(node_memory_MemFree) - sum(node_memory_Buffers) - sum(node_memory_Cached)) / sum(node_memory_MemTotal)) * 100\",\n                  \"intervalFactor\": 2,\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 60,\n                  \"target\": \"\"\n                }\n              ],\n              \"thresholds\": \"80, 90\",\n              \"title\": \"Memory usage\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"avg\"\n            },\n            {\n              \"alerting\": {},\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 18,\n                \"x\": 0,\n                \"y\": 14\n              },\n              \"id\": 6,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"read\",\n                  \"yaxis\": 1\n                },\n                {\n                  \"alias\": \"{instance=\\\"172.17.0.1:9100\\\"}\",\n                  \"yaxis\": 2\n                },\n                {\n                  \"alias\": \"io time\",\n                  \"yaxis\": 2\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(rate(node_disk_bytes_read[5m]))\",\n                  \"hide\": false,\n                  \"intervalFactor\": 4,\n                  \"legendFormat\": \"read\",\n                  \"refId\": \"A\",\n                  \"step\": 20,\n                  \"target\": \"\"\n                },\n                {\n                  \"expr\": \"sum(rate(node_disk_bytes_written[5m]))\",\n                  \"intervalFactor\": 4,\n                  \"legendFormat\": \"written\",\n                  \"refId\": \"B\",\n                  \"step\": 20\n                },\n                {\n                  \"expr\": \"sum(rate(node_disk_io_time_ms[5m]))\",\n                  \"intervalFactor\": 4,\n                  \"legendFormat\": \"io time\",\n                  \"refId\": \"C\",\n                  \"step\": 20\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Disk I/O\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"ms\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"percentunit\",\n              \"gauge\": {\n                \"maxValue\": 1,\n                \"minValue\": 0,\n                \"show\": true,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 6,\n                \"x\": 18,\n                \"y\": 14\n              },\n              \"id\": 12,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"(sum(node_filesystem_size{device!=\\\"rootfs\\\"}) - sum(node_filesystem_free{device!=\\\"rootfs\\\"})) / sum(node_filesystem_size{device!=\\\"rootfs\\\"})\",\n                  \"intervalFactor\": 2,\n                  \"refId\": \"A\",\n                  \"step\": 60,\n                  \"target\": \"\"\n                }\n              ],\n              \"thresholds\": \"0.75, 0.9\",\n              \"title\": \"Disk space usage\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"alerting\": {},\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 21\n              },\n              \"id\": 8,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"transmitted \",\n                  \"yaxis\": 2\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(rate(node_network_receive_bytes{device!~\\\"lo\\\"}[5m]))\",\n                  \"hide\": false,\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 10,\n                  \"target\": \"\"\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Network received\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"bytes\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"alerting\": {},\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 21\n              },\n              \"id\": 10,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"transmitted \",\n                  \"yaxis\": 2\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(rate(node_network_transmit_bytes{device!~\\\"lo\\\"}[5m]))\",\n                  \"hide\": false,\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"B\",\n                  \"step\": 10,\n                  \"target\": \"\"\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Network transmitted\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"bytes\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 18,\n                \"x\": 0,\n                \"y\": 28\n              },\n              \"id\": 11,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(kube_pod_info)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"Current number of Pods\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                },\n                {\n                  \"expr\": \"sum(kube_node_status_capacity_pods)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"Maximum capacity of pods\",\n                  \"refId\": \"B\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Cluster Pod Utilization\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"percent\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": true,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 6,\n                \"x\": 18,\n                \"y\": 28\n              },\n              \"id\": 7,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"100 - (sum(kube_node_status_capacity_pods) - sum(kube_pod_info)) / sum(kube_node_status_capacity_pods) * 100\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 60,\n                  \"target\": \"\"\n                }\n              ],\n              \"thresholds\": \"80,90\",\n              \"title\": \"Pod Utilization\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            }\n          ],\n          \"refresh\": false,\n          \"schemaVersion\": 18,\n          \"style\": \"dark\",\n          \"tags\": [],\n          \"templating\": {\n            \"list\": [\n              {\n                \"current\": {\n                  \"text\": \"prometheus\",\n                  \"value\": \"prometheus\"\n                },\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Prometheus datasource\",\n                \"multi\": false,\n                \"name\": \"DS_PROMETHEUS\",\n                \"options\": [],\n                \"query\": \"prometheus\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"type\": \"datasource\"\n              }\n            ]\n          },\n          \"time\": {\n            \"from\": \"now-1h\",\n            \"to\": \"now\"\n          },\n          \"timepicker\": {\n            \"refresh_intervals\": [\n              \"5s\",\n              \"10s\",\n              \"30s\",\n              \"1m\",\n              \"5m\",\n              \"15m\",\n              \"30m\",\n              \"1h\",\n              \"2h\",\n              \"1d\"\n            ],\n            \"time_options\": [\n              \"5m\",\n              \"15m\",\n              \"1h\",\n              \"6h\",\n              \"12h\",\n              \"24h\",\n              \"2d\",\n              \"7d\",\n              \"30d\"\n            ]\n          },\n          \"timezone\": \"browser\",\n          \"title\": \"Kubernetes Capacity Planning\",\n          \"version\": 1\n        }\n      kubernetes_cluster_status: |-\n        {\n          \"__inputs\": [\n            {\n              \"name\": \"DS_PROMETHEUS\",\n              \"label\": \"prometheus\",\n              \"description\": \"\",\n              \"type\": \"datasource\",\n              \"pluginId\": \"prometheus\",\n              \"pluginName\": \"Prometheus\"\n            }\n          ],\n          \"__requires\": [\n            {\n              \"type\": \"grafana\",\n              \"id\": \"grafana\",\n              \"name\": \"Grafana\",\n              \"version\": \"4.4.1\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"graph\",\n              \"name\": \"Graph\",\n              \"version\": \"\"\n            },\n            {\n              \"type\": \"datasource\",\n              \"id\": \"prometheus\",\n              \"name\": \"Prometheus\",\n              \"version\": \"1.0.0\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"singlestat\",\n              \"name\": \"Singlestat\",\n              \"version\": \"\"\n            }\n          ],\n          \"annotations\": {\n            \"list\": [\n              {\n                \"builtIn\": 1,\n                \"datasource\": \"-- Grafana --\",\n                \"enable\": true,\n                \"hide\": true,\n                \"iconColor\": \"rgba(0, 211, 255, 1)\",\n                \"name\": \"Annotations & Alerts\",\n                \"type\": \"dashboard\"\n              }\n            ]\n          },\n          \"editable\": false,\n          \"overwrite\": true,\n          \"gnetId\": null,\n          \"graphTooltip\": 0,\n          \"id\": 5,\n          \"links\": [],\n          \"panels\": [\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 0\n              },\n              \"id\": 11,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Cluster Health\",\n              \"type\": \"row\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": true,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 4,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 1\n              },\n              \"id\": 5,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"sum(up{job=~\\\"apiserver|kube-scheduler|kube-controller-manager\\\"} == 0)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 600\n                }\n              ],\n              \"thresholds\": \"1,3\",\n              \"title\": \"Control Plane UP\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"UP\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"total\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": true,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 4,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 1\n              },\n              \"id\": 6,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"sum(ALERTS{alertstate=\\\"firing\\\",alertname!=\\\"DeadMansSwitch\\\"})\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 600\n                }\n              ],\n              \"thresholds\": \"3,5\",\n              \"title\": \"Alerts Firing\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"0\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 5\n              },\n              \"id\": 12,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Control Plane Status\",\n              \"type\": \"row\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": null,\n              \"format\": \"percent\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": true,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 5,\n                \"w\": 6,\n                \"x\": 0,\n                \"y\": 6\n              },\n              \"id\": 1,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"(sum(up{job=\\\"apiserver\\\"} == 1) / count(up{job=\\\"apiserver\\\"})) * 100\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 600\n                }\n              ],\n              \"thresholds\": \"50,80\",\n              \"title\": \"API Servers UP\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": null,\n              \"format\": \"percent\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": true,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 5,\n                \"w\": 6,\n                \"x\": 6,\n                \"y\": 6\n              },\n              \"id\": 2,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"(sum(up{job=\\\"kube-controller-manager-discovery\\\"} == 1) / count(up{job=\\\"kube-controller-manager-discovery\\\"})) * 100\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 600\n                }\n              ],\n              \"thresholds\": \"50,80\",\n              \"title\": \"Controller Managers UP\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": null,\n              \"format\": \"percent\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": true,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 5,\n                \"w\": 6,\n                \"x\": 12,\n                \"y\": 6\n              },\n              \"id\": 3,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"(sum(up{job=\\\"kube-scheduler-discovery\\\"} == 1) / count(up{job=\\\"kube-scheduler-discovery\\\"})) * 100\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 600\n                }\n              ],\n              \"thresholds\": \"50,80\",\n              \"title\": \"Schedulers UP\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": true,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": null,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 5,\n                \"w\": 6,\n                \"x\": 18,\n                \"y\": 6\n              },\n              \"hideTimeOverride\": false,\n              \"id\": 4,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"count(increase(kube_pod_container_status_restarts{namespace=~\\\"kube-system|tectonic-system\\\"}[1h]) > 5)\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 600\n                }\n              ],\n              \"thresholds\": \"1,3\",\n              \"title\": \"Crashlooping Control Plane Pods\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"0\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 11\n              },\n              \"id\": 13,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Capacity Planing\",\n              \"type\": \"row\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"format\": \"percent\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": true,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 4,\n                \"w\": 6,\n                \"x\": 0,\n                \"y\": 12\n              },\n              \"id\": 8,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"sum(100 - (avg by (instance) (rate(node_cpu{job=\\\"node-exporter\\\",mode=\\\"idle\\\"}[5m])) * 100)) / count(node_cpu{job=\\\"node-exporter\\\",mode=\\\"idle\\\"})\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 600\n                }\n              ],\n              \"thresholds\": \"80,90\",\n              \"title\": \"CPU Utilization\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"avg\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"format\": \"percent\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": true,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 4,\n                \"w\": 6,\n                \"x\": 6,\n                \"y\": 12\n              },\n              \"id\": 7,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"((sum(node_memory_MemTotal) - sum(node_memory_MemFree) - sum(node_memory_Buffers) - sum(node_memory_Cached)) / sum(node_memory_MemTotal)) * 100\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 600\n                }\n              ],\n              \"thresholds\": \"80,90\",\n              \"title\": \"Memory Utilization\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"avg\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"format\": \"percent\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": true,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 4,\n                \"w\": 6,\n                \"x\": 12,\n                \"y\": 12\n              },\n              \"id\": 9,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"(sum(node_filesystem_size{device!=\\\"rootfs\\\"}) - sum(node_filesystem_free{device!=\\\"rootfs\\\"})) / sum(node_filesystem_size{device!=\\\"rootfs\\\"})\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 600\n                }\n              ],\n              \"thresholds\": \"80,90\",\n              \"title\": \"Filesystem Utilization\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"avg\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"format\": \"percent\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": true,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 4,\n                \"w\": 6,\n                \"x\": 18,\n                \"y\": 12\n              },\n              \"id\": 10,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"100 - (sum(kube_node_status_capacity_pods) - sum(kube_pod_info)) / sum(kube_node_status_capacity_pods) * 100\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 600\n                }\n              ],\n              \"thresholds\": \"80,90\",\n              \"title\": \"Pod Utilization\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"avg\"\n            }\n          ],\n          \"schemaVersion\": 18,\n          \"style\": \"dark\",\n          \"tags\": [],\n          \"templating\": {\n            \"list\": [\n              {\n                \"current\": {\n                  \"text\": \"prometheus\",\n                  \"value\": \"prometheus\"\n                },\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Prometheus datasource\",\n                \"multi\": false,\n                \"name\": \"DS_PROMETHEUS\",\n                \"options\": [],\n                \"query\": \"prometheus\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"type\": \"datasource\"\n              }\n            ]\n          },\n          \"time\": {\n            \"from\": \"now-1h\",\n            \"to\": \"now\"\n          },\n          \"timepicker\": {\n            \"refresh_intervals\": [\n              \"5s\",\n              \"10s\",\n              \"30s\",\n              \"1m\",\n              \"5m\",\n              \"15m\",\n              \"30m\",\n              \"1h\",\n              \"2h\",\n              \"1d\"\n            ],\n            \"time_options\": [\n              \"5m\",\n              \"15m\",\n              \"1h\",\n              \"6h\",\n              \"12h\",\n              \"24h\",\n              \"2d\",\n              \"7d\",\n              \"30d\"\n            ]\n          },\n          \"timezone\": \"browser\",\n          \"title\": \"Kubernetes Cluster Status\",\n          \"version\": 1\n        }\n...\n"
  },
  {
    "path": "values_overrides/grafana/loci-2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    grafana_db_session_sync: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/grafana/loci-2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    grafana_db_session_sync: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/grafana/mariadb-operator.yaml",
    "content": "---\nconf:\n  grafana:\n    database:\n      url: null\n\nmanifests:\n  job_db_init: false\n  job_db_init_session: false\n  secret_db_session: false\n\netcSources:\n  grafana_api:\n    - grafana-db-conn\n  grafana_db_sync:\n    - grafana-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: grafana\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: grafana\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: grafana-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: grafana-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"grafana\"\n      table: \"*\"\n      username: grafana\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: grafana-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: grafana\n      passwordSecretKeyRef:\n        name: grafana-db-password\n        key: password\n      database: grafana\n      secretName: grafana-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/grafana/nginx.yaml",
    "content": "# NOTE(srwilkers): This overrides file provides a reference for a dashboard for\n# nginx\n---\nconf:\n  dashboards:\n    kubernetes:\n      nginx_stats: |-\n        {\n          \"__inputs\": [\n            {\n              \"name\": \"DS_PROMETHEUS\",\n              \"label\": \"Prometheus\",\n              \"description\": \"\",\n              \"type\": \"datasource\",\n              \"pluginId\": \"prometheus\",\n              \"pluginName\": \"Prometheus\"\n            }\n          ],\n          \"__requires\": [\n            {\n              \"type\": \"grafana\",\n              \"id\": \"grafana\",\n              \"name\": \"Grafana\",\n              \"version\": \"5.0.0\"\n            },\n            {\n              \"type\": \"datasource\",\n              \"id\": \"prometheus\",\n              \"name\": \"Prometheus\",\n              \"version\": \"5.0.0\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"graph\",\n              \"name\": \"Graph\",\n              \"version\": \"\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"singlestat\",\n              \"name\": \"Singlestat\",\n              \"version\": \"\"\n            }\n          ],\n          \"annotations\": {\n            \"list\": [\n              {\n                \"builtIn\": 1,\n                \"datasource\": \"-- Grafana --\",\n                \"enable\": true,\n                \"hide\": true,\n                \"iconColor\": \"rgba(0, 211, 255, 1)\",\n                \"name\": \"Annotations & Alerts\",\n                \"type\": \"dashboard\"\n              },\n              {\n                \"datasource\": \"${DS_PROMETHEUS}\",\n                \"enable\": true,\n                \"expr\": \"sum(changes(nginx_ingress_controller_config_last_reload_successful_timestamp_seconds{instance!=\\\"unknown\\\",controller_class=~\\\"$controller_class\\\",namespace=~\\\"$namespace\\\"}[30s])) by (controller_class)\",\n                \"hide\": false,\n                \"iconColor\": \"rgba(255, 96, 96, 1)\",\n                \"limit\": 100,\n                \"name\": \"Config Reloads\",\n                \"showIn\": 0,\n                \"step\": \"30s\",\n                \"tagKeys\": \"controller_class\",\n                \"tags\": [],\n                \"titleFormat\": \"Config Reloaded\",\n                \"type\": \"tags\"\n              }\n            ]\n          },\n          \"editable\": true,\n          \"overwrite\": true,\n          \"gnetId\": null,\n          \"graphTooltip\": 0,\n          \"links\": [],\n          \"panels\": [\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"format\": \"ops\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 6,\n                \"x\": 0,\n                \"y\": 0\n              },\n              \"id\": 20,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": true,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": true\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"round(sum(irate(nginx_ingress_controller_requests{controller_pod=~\\\"$controller\\\",controller_class=~\\\"$controller_class\\\",namespace=~\\\"$namespace\\\"}[2m])), 0.001)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 1,\n                  \"refId\": \"A\",\n                  \"step\": 4\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"Controller Request Volume\",\n              \"transparent\": false,\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"avg\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 6,\n                \"x\": 6,\n                \"y\": 0\n              },\n              \"id\": 82,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": true,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": true\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"sum(avg_over_time(nginx_ingress_controller_nginx_process_connections{controller_pod=~\\\"$controller\\\",controller_class=~\\\"$controller_class\\\",controller_namespace=~\\\"$namespace\\\"}[2m]))\",\n                  \"format\": \"time_series\",\n                  \"instant\": false,\n                  \"intervalFactor\": 1,\n                  \"refId\": \"A\",\n                  \"step\": 4\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"Controller Connections\",\n              \"transparent\": false,\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"avg\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"format\": \"percentunit\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 80,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": false\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 6,\n                \"x\": 12,\n                \"y\": 0\n              },\n              \"id\": 21,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": true,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": true\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"sum(rate(nginx_ingress_controller_requests{controller_pod=~\\\"$controller\\\",controller_class=~\\\"$controller_class\\\",namespace=~\\\"$namespace\\\",status!~\\\"[4-5].*\\\"}[2m])) / sum(rate(nginx_ingress_controller_requests{controller_pod=~\\\"$controller\\\",controller_class=~\\\"$controller_class\\\",namespace=~\\\"$namespace\\\"}[2m]))\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 1,\n                  \"refId\": \"A\",\n                  \"step\": 4\n                }\n              ],\n              \"thresholds\": \"95, 99, 99.5\",\n              \"title\": \"Controller Success Rate (non-4|5xx responses)\",\n              \"transparent\": false,\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"avg\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 0,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 3,\n                \"x\": 18,\n                \"y\": 0\n              },\n              \"id\": 81,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": true,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": true\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"avg(nginx_ingress_controller_success{controller_pod=~\\\"$controller\\\",controller_class=~\\\"$controller_class\\\",controller_namespace=~\\\"$namespace\\\"})\",\n                  \"format\": \"time_series\",\n                  \"instant\": true,\n                  \"intervalFactor\": 1,\n                  \"refId\": \"A\",\n                  \"step\": 4\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"Config Reloads\",\n              \"transparent\": false,\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"avg\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(245, 54, 54, 0.9)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(50, 172, 45, 0.97)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 0,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 3,\n                \"w\": 3,\n                \"x\": 21,\n                \"y\": 0\n              },\n              \"id\": 83,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": true,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": true\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"count(nginx_ingress_controller_config_last_reload_successful{controller_pod=~\\\"$controller\\\",controller_namespace=~\\\"$namespace\\\"} == 0)\",\n                  \"format\": \"time_series\",\n                  \"instant\": true,\n                  \"intervalFactor\": 1,\n                  \"refId\": \"A\",\n                  \"step\": 4\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"Last Config Failed\",\n              \"transparent\": false,\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"None\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"avg\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 3\n              },\n              \"height\": \"200px\",\n              \"id\": 86,\n              \"isNew\": true,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": false,\n                \"hideEmpty\": false,\n                \"hideZero\": true,\n                \"max\": false,\n                \"min\": false,\n                \"rightSide\": true,\n                \"show\": true,\n                \"sideWidth\": 300,\n                \"sort\": \"current\",\n                \"sortDesc\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"repeat\": null,\n              \"repeatDirection\": \"h\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"round(sum(irate(nginx_ingress_controller_requests{controller_pod=~\\\"$controller\\\",controller_class=~\\\"$controller_class\\\",controller_namespace=~\\\"$namespace\\\",ingress=~\\\"$ingress\\\"}[2m])) by (ingress), 0.001)\",\n                  \"format\": \"time_series\",\n                  \"hide\": false,\n                  \"instant\": false,\n                  \"interval\": \"\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"{{ ingress }}\",\n                  \"metric\": \"network\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Ingress Request Volume\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"cumulative\"\n              },\n              \"transparent\": false,\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"Bps\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"Bps\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {\n                \"max - istio-proxy\": \"#890f02\",\n                \"max - master\": \"#bf1b00\",\n                \"max - prometheus\": \"#bf1b00\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": false,\n              \"error\": false,\n              \"fill\": 0,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 3\n              },\n              \"id\": 87,\n              \"isNew\": true,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": false,\n                \"hideEmpty\": true,\n                \"hideZero\": false,\n                \"max\": false,\n                \"min\": false,\n                \"rightSide\": true,\n                \"show\": true,\n                \"sideWidth\": 300,\n                \"sort\": \"avg\",\n                \"sortDesc\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(rate(nginx_ingress_controller_requests{controller_pod=~\\\"$controller\\\",controller_class=~\\\"$controller_class\\\",namespace=~\\\"$namespace\\\",ingress=~\\\"$ingress\\\",status!~\\\"[4-5].*\\\"}[2m])) by (ingress) / sum(rate(nginx_ingress_controller_requests{controller_pod=~\\\"$controller\\\",controller_class=~\\\"$controller_class\\\",namespace=~\\\"$namespace\\\",ingress=~\\\"$ingress\\\"}[2m])) by (ingress)\",\n                  \"format\": \"time_series\",\n                  \"instant\": false,\n                  \"interval\": \"10s\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"{{ ingress }}\",\n                  \"metric\": \"container_memory_usage:sort_desc\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Ingress Success Rate (non-4|5xx responses)\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 1,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"percentunit\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 6,\n                \"w\": 8,\n                \"x\": 0,\n                \"y\": 10\n              },\n              \"height\": \"200px\",\n              \"id\": 32,\n              \"isNew\": true,\n              \"legend\": {\n                \"alignAsTable\": false,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": false,\n                \"min\": false,\n                \"rightSide\": false,\n                \"show\": false,\n                \"sideWidth\": 200,\n                \"sort\": \"current\",\n                \"sortDesc\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum (irate (nginx_ingress_controller_request_size_sum{controller_pod=~\\\"$controller\\\",controller_class=~\\\"$controller_class\\\",controller_namespace=~\\\"$namespace\\\"}[2m]))\",\n                  \"format\": \"time_series\",\n                  \"instant\": false,\n                  \"interval\": \"10s\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Received\",\n                  \"metric\": \"network\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                },\n                {\n                  \"expr\": \"- sum (irate (nginx_ingress_controller_response_size_sum{controller_pod=~\\\"$controller\\\",controller_class=~\\\"$controller_class\\\",controller_namespace=~\\\"$namespace\\\"}[2m]))\",\n                  \"format\": \"time_series\",\n                  \"hide\": false,\n                  \"interval\": \"10s\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"Sent\",\n                  \"metric\": \"network\",\n                  \"refId\": \"B\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Network I/O pressure\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"transparent\": false,\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"Bps\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"Bps\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {\n                \"max - istio-proxy\": \"#890f02\",\n                \"max - master\": \"#bf1b00\",\n                \"max - prometheus\": \"#bf1b00\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": false,\n              \"error\": false,\n              \"fill\": 0,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 6,\n                \"w\": 8,\n                \"x\": 8,\n                \"y\": 10\n              },\n              \"id\": 77,\n              \"isNew\": true,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": false,\n                \"min\": false,\n                \"rightSide\": false,\n                \"show\": false,\n                \"sideWidth\": 200,\n                \"sort\": \"current\",\n                \"sortDesc\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"avg(nginx_ingress_controller_nginx_process_resident_memory_bytes{controller_pod=~\\\"$controller\\\",controller_class=~\\\"$controller_class\\\",controller_namespace=~\\\"$namespace\\\"}) \",\n                  \"format\": \"time_series\",\n                  \"instant\": false,\n                  \"interval\": \"10s\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"nginx\",\n                  \"metric\": \"container_memory_usage:sort_desc\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Average Memory Usage\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {\n                \"max - istio-proxy\": \"#890f02\",\n                \"max - master\": \"#bf1b00\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 3,\n              \"editable\": false,\n              \"error\": false,\n              \"fill\": 0,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 6,\n                \"w\": 8,\n                \"x\": 16,\n                \"y\": 10\n              },\n              \"height\": \"\",\n              \"id\": 79,\n              \"isNew\": true,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": false,\n                \"min\": false,\n                \"rightSide\": false,\n                \"show\": false,\n                \"sort\": null,\n                \"sortDesc\": null,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum (rate (nginx_ingress_controller_nginx_process_cpu_seconds_total{controller_pod=~\\\"$controller\\\",controller_class=~\\\"$controller_class\\\",controller_namespace=~\\\"$namespace\\\"}[2m])) \",\n                  \"format\": \"time_series\",\n                  \"interval\": \"10s\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"nginx\",\n                  \"metric\": \"container_cpu\",\n                  \"refId\": \"A\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [\n                {\n                  \"colorMode\": \"critical\",\n                  \"fill\": true,\n                  \"line\": true,\n                  \"op\": \"gt\"\n                }\n              ],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Average CPU Usage\",\n              \"tooltip\": {\n                \"msResolution\": true,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"cumulative\"\n              },\n              \"transparent\": false,\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"none\",\n                  \"label\": \"cores\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"columns\": [],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fontSize\": \"100%\",\n              \"gridPos\": {\n                \"h\": 8,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 16\n              },\n              \"hideTimeOverride\": false,\n              \"id\": 75,\n              \"links\": [],\n              \"pageSize\": 7,\n              \"repeat\": null,\n              \"repeatDirection\": \"h\",\n              \"scroll\": true,\n              \"showHeader\": true,\n              \"sort\": {\n                \"col\": 1,\n                \"desc\": true\n              },\n              \"styles\": [\n                {\n                  \"alias\": \"Ingress\",\n                  \"colorMode\": null,\n                  \"colors\": [\n                    \"rgba(245, 54, 54, 0.9)\",\n                    \"rgba(237, 129, 40, 0.89)\",\n                    \"rgba(50, 172, 45, 0.97)\"\n                  ],\n                  \"dateFormat\": \"YYYY-MM-DD HH:mm:ss\",\n                  \"decimals\": 2,\n                  \"pattern\": \"ingress\",\n                  \"preserveFormat\": false,\n                  \"sanitize\": false,\n                  \"thresholds\": [],\n                  \"type\": \"string\",\n                  \"unit\": \"short\"\n                },\n                {\n                  \"alias\": \"Requests\",\n                  \"colorMode\": null,\n                  \"colors\": [\n                    \"rgba(245, 54, 54, 0.9)\",\n                    \"rgba(237, 129, 40, 0.89)\",\n                    \"rgba(50, 172, 45, 0.97)\"\n                  ],\n                  \"dateFormat\": \"YYYY-MM-DD HH:mm:ss\",\n                  \"decimals\": 2,\n                  \"pattern\": \"Value #A\",\n                  \"thresholds\": [\n                    \"\"\n                  ],\n                  \"type\": \"number\",\n                  \"unit\": \"ops\"\n                },\n                {\n                  \"alias\": \"Errors\",\n                  \"colorMode\": null,\n                  \"colors\": [\n                    \"rgba(245, 54, 54, 0.9)\",\n                    \"rgba(237, 129, 40, 0.89)\",\n                    \"rgba(50, 172, 45, 0.97)\"\n                  ],\n                  \"dateFormat\": \"YYYY-MM-DD HH:mm:ss\",\n                  \"decimals\": 2,\n                  \"pattern\": \"Value #B\",\n                  \"thresholds\": [],\n                  \"type\": \"number\",\n                  \"unit\": \"ops\"\n                },\n                {\n                  \"alias\": \"P50 Latency\",\n                  \"colorMode\": null,\n                  \"colors\": [\n                    \"rgba(245, 54, 54, 0.9)\",\n                    \"rgba(237, 129, 40, 0.89)\",\n                    \"rgba(50, 172, 45, 0.97)\"\n                  ],\n                  \"dateFormat\": \"YYYY-MM-DD HH:mm:ss\",\n                  \"decimals\": 0,\n                  \"link\": false,\n                  \"pattern\": \"Value #C\",\n                  \"thresholds\": [],\n                  \"type\": \"number\",\n                  \"unit\": \"dtdurations\"\n                },\n                {\n                  \"alias\": \"P90 Latency\",\n                  \"colorMode\": null,\n                  \"colors\": [\n                    \"rgba(245, 54, 54, 0.9)\",\n                    \"rgba(237, 129, 40, 0.89)\",\n                    \"rgba(50, 172, 45, 0.97)\"\n                  ],\n                  \"dateFormat\": \"YYYY-MM-DD HH:mm:ss\",\n                  \"decimals\": 0,\n                  \"pattern\": \"Value #D\",\n                  \"thresholds\": [],\n                  \"type\": \"number\",\n                  \"unit\": \"dtdurations\"\n                },\n                {\n                  \"alias\": \"P99 Latency\",\n                  \"colorMode\": null,\n                  \"colors\": [\n                    \"rgba(245, 54, 54, 0.9)\",\n                    \"rgba(237, 129, 40, 0.89)\",\n                    \"rgba(50, 172, 45, 0.97)\"\n                  ],\n                  \"dateFormat\": \"YYYY-MM-DD HH:mm:ss\",\n                  \"decimals\": 0,\n                  \"pattern\": \"Value #E\",\n                  \"thresholds\": [],\n                  \"type\": \"number\",\n                  \"unit\": \"dtdurations\"\n                },\n                {\n                  \"alias\": \"IN\",\n                  \"colorMode\": null,\n                  \"colors\": [\n                    \"rgba(245, 54, 54, 0.9)\",\n                    \"rgba(237, 129, 40, 0.89)\",\n                    \"rgba(50, 172, 45, 0.97)\"\n                  ],\n                  \"dateFormat\": \"YYYY-MM-DD HH:mm:ss\",\n                  \"decimals\": 2,\n                  \"pattern\": \"Value #F\",\n                  \"thresholds\": [\n                    \"\"\n                  ],\n                  \"type\": \"number\",\n                  \"unit\": \"Bps\"\n                },\n                {\n                  \"alias\": \"\",\n                  \"colorMode\": null,\n                  \"colors\": [\n                    \"rgba(245, 54, 54, 0.9)\",\n                    \"rgba(237, 129, 40, 0.89)\",\n                    \"rgba(50, 172, 45, 0.97)\"\n                  ],\n                  \"dateFormat\": \"YYYY-MM-DD HH:mm:ss\",\n                  \"decimals\": 2,\n                  \"pattern\": \"Time\",\n                  \"thresholds\": [],\n                  \"type\": \"hidden\",\n                  \"unit\": \"short\"\n                },\n                {\n                  \"alias\": \"OUT\",\n                  \"colorMode\": null,\n                  \"colors\": [\n                    \"rgba(245, 54, 54, 0.9)\",\n                    \"rgba(237, 129, 40, 0.89)\",\n                    \"rgba(50, 172, 45, 0.97)\"\n                  ],\n                  \"dateFormat\": \"YYYY-MM-DD HH:mm:ss\",\n                  \"decimals\": 2,\n                  \"mappingType\": 1,\n                  \"pattern\": \"Value #G\",\n                  \"thresholds\": [],\n                  \"type\": \"number\",\n                  \"unit\": \"Bps\"\n                }\n              ],\n              \"targets\": [\n                {\n                  \"expr\": \"histogram_quantile(0.50, sum(rate(nginx_ingress_controller_request_duration_seconds_bucket{ingress!=\\\"\\\",controller_pod=~\\\"$controller\\\",controller_class=~\\\"$controller_class\\\",controller_namespace=~\\\"$namespace\\\",ingress=~\\\"$ingress\\\"}[2m])) by (le, ingress))\",\n                  \"format\": \"table\",\n                  \"hide\": false,\n                  \"instant\": true,\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"{{ ingress }}\",\n                  \"refId\": \"C\"\n                },\n                {\n                  \"expr\": \"histogram_quantile(0.90, sum(rate(nginx_ingress_controller_request_duration_seconds_bucket{ingress!=\\\"\\\",controller_pod=~\\\"$controller\\\",controller_class=~\\\"$controller_class\\\",controller_namespace=~\\\"$namespace\\\",ingress=~\\\"$ingress\\\"}[2m])) by (le, ingress))\",\n                  \"format\": \"table\",\n                  \"hide\": false,\n                  \"instant\": true,\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"{{ ingress }}\",\n                  \"refId\": \"D\"\n                },\n                {\n                  \"expr\": \"histogram_quantile(0.99, sum(rate(nginx_ingress_controller_request_duration_seconds_bucket{ingress!=\\\"\\\",controller_pod=~\\\"$controller\\\",controller_class=~\\\"$controller_class\\\",controller_namespace=~\\\"$namespace\\\",ingress=~\\\"$ingress\\\"}[2m])) by (le, ingress))\",\n                  \"format\": \"table\",\n                  \"hide\": false,\n                  \"instant\": true,\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"{{ destination_service }}\",\n                  \"refId\": \"E\"\n                },\n                {\n                  \"expr\": \"sum(irate(nginx_ingress_controller_request_size_sum{ingress!=\\\"\\\",controller_pod=~\\\"$controller\\\",controller_class=~\\\"$controller_class\\\",controller_namespace=~\\\"$namespace\\\",ingress=~\\\"$ingress\\\"}[2m])) by (ingress)\",\n                  \"format\": \"table\",\n                  \"hide\": false,\n                  \"instant\": true,\n                  \"interval\": \"\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"{{ ingress }}\",\n                  \"refId\": \"F\"\n                },\n                {\n                  \"expr\": \"sum(irate(nginx_ingress_controller_response_size_sum{ingress!=\\\"\\\",controller_pod=~\\\"$controller\\\",controller_class=~\\\"$controller_class\\\",controller_namespace=~\\\"$namespace\\\",ingress=~\\\"$ingress\\\"}[2m])) by (ingress)\",\n                  \"format\": \"table\",\n                  \"instant\": true,\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"{{ ingress }}\",\n                  \"refId\": \"G\"\n                }\n              ],\n              \"timeFrom\": null,\n              \"title\": \"Ingress Percentile Response Times and Transfer Rates\",\n              \"transform\": \"table\",\n              \"transparent\": false,\n              \"type\": \"table\"\n            },\n            {\n              \"columns\": [\n                {\n                  \"text\": \"Current\",\n                  \"value\": \"current\"\n                }\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fontSize\": \"100%\",\n              \"gridPos\": {\n                \"h\": 8,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 24\n              },\n              \"height\": \"1024\",\n              \"id\": 85,\n              \"links\": [],\n              \"pageSize\": 7,\n              \"scroll\": true,\n              \"showHeader\": true,\n              \"sort\": {\n                \"col\": 1,\n                \"desc\": false\n              },\n              \"styles\": [\n                {\n                  \"alias\": \"Time\",\n                  \"dateFormat\": \"YYYY-MM-DD HH:mm:ss\",\n                  \"pattern\": \"Time\",\n                  \"type\": \"date\"\n                },\n                {\n                  \"alias\": \"TTL\",\n                  \"colorMode\": \"cell\",\n                  \"colors\": [\n                    \"rgba(245, 54, 54, 0.9)\",\n                    \"rgba(237, 129, 40, 0.89)\",\n                    \"rgba(50, 172, 45, 0.97)\"\n                  ],\n                  \"dateFormat\": \"YYYY-MM-DD HH:mm:ss\",\n                  \"decimals\": 0,\n                  \"pattern\": \"Current\",\n                  \"thresholds\": [\n                    \"0\",\n                    \"691200\"\n                  ],\n                  \"type\": \"number\",\n                  \"unit\": \"s\"\n                },\n                {\n                  \"alias\": \"\",\n                  \"colorMode\": null,\n                  \"colors\": [\n                    \"rgba(245, 54, 54, 0.9)\",\n                    \"rgba(237, 129, 40, 0.89)\",\n                    \"rgba(50, 172, 45, 0.97)\"\n                  ],\n                  \"decimals\": 2,\n                  \"pattern\": \"/.*/\",\n                  \"thresholds\": [],\n                  \"type\": \"number\",\n                  \"unit\": \"short\"\n                }\n              ],\n              \"targets\": [\n                {\n                  \"expr\": \"avg(nginx_ingress_controller_ssl_expire_time_seconds{kubernetes_pod_name=~\\\"$controller\\\",namespace=~\\\"$namespace\\\",ingress=~\\\"$ingress\\\"}) by (host) - time()\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"{{ host }}\",\n                  \"metric\": \"gke_letsencrypt_cert_expiration\",\n                  \"refId\": \"A\",\n                  \"step\": 1\n                }\n              ],\n              \"title\": \"Ingress Certificate Expiry\",\n              \"transform\": \"timeseries_aggregations\",\n              \"type\": \"table\"\n            }\n          ],\n          \"refresh\": \"5s\",\n          \"schemaVersion\": 16,\n          \"style\": \"dark\",\n          \"tags\": [\n            \"nginx\"\n          ],\n          \"templating\": {\n            \"list\": [\n              {\n                \"allValue\": \".*\",\n                \"current\": {\n                  \"text\": \"All\",\n                  \"value\": \"$__all\"\n                },\n                \"datasource\": \"${DS_PROMETHEUS}\",\n                \"hide\": 0,\n                \"includeAll\": true,\n                \"label\": \"Namespace\",\n                \"multi\": false,\n                \"name\": \"namespace\",\n                \"options\": [],\n                \"query\": \"label_values(nginx_ingress_controller_config_hash, controller_namespace)\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"sort\": 0,\n                \"tagValuesQuery\": \"\",\n                \"tags\": [],\n                \"tagsQuery\": \"\",\n                \"type\": \"query\",\n                \"useTags\": false\n              },\n              {\n                \"allValue\": \".*\",\n                \"current\": {\n                  \"text\": \"All\",\n                  \"value\": \"$__all\"\n                },\n                \"datasource\": \"${DS_PROMETHEUS}\",\n                \"hide\": 0,\n                \"includeAll\": true,\n                \"label\": \"Controller Class\",\n                \"multi\": false,\n                \"name\": \"controller_class\",\n                \"options\": [],\n                \"query\": \"label_values(nginx_ingress_controller_config_hash{namespace=~\\\"$namespace\\\"}, controller_class) \",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"sort\": 0,\n                \"tagValuesQuery\": \"\",\n                \"tags\": [],\n                \"tagsQuery\": \"\",\n                \"type\": \"query\",\n                \"useTags\": false\n              },\n              {\n                \"allValue\": \".*\",\n                \"current\": {\n                  \"text\": \"All\",\n                  \"value\": \"$__all\"\n                },\n                \"datasource\": \"${DS_PROMETHEUS}\",\n                \"hide\": 0,\n                \"includeAll\": true,\n                \"label\": \"Controller\",\n                \"multi\": false,\n                \"name\": \"controller\",\n                \"options\": [],\n                \"query\": \"label_values(nginx_ingress_controller_config_hash{namespace=~\\\"$namespace\\\",controller_class=~\\\"$controller_class\\\"}, controller_pod) \",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"sort\": 0,\n                \"tagValuesQuery\": \"\",\n                \"tags\": [],\n                \"tagsQuery\": \"\",\n                \"type\": \"query\",\n                \"useTags\": false\n              },\n              {\n                \"allValue\": \".*\",\n                \"current\": {\n                  \"tags\": [],\n                  \"text\": \"All\",\n                  \"value\": \"$__all\"\n                },\n                \"datasource\": \"${DS_PROMETHEUS}\",\n                \"hide\": 0,\n                \"includeAll\": true,\n                \"label\": \"Ingress\",\n                \"multi\": false,\n                \"name\": \"ingress\",\n                \"options\": [],\n                \"query\": \"label_values(nginx_ingress_controller_requests{namespace=~\\\"$namespace\\\",controller_class=~\\\"$controller_class\\\",controller=~\\\"$controller\\\"}, ingress) \",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"sort\": 2,\n                \"tagValuesQuery\": \"\",\n                \"tags\": [],\n                \"tagsQuery\": \"\",\n                \"type\": \"query\",\n                \"useTags\": false\n              }\n            ]\n          },\n          \"time\": {\n            \"from\": \"now-1h\",\n            \"to\": \"now\"\n          },\n          \"timepicker\": {\n            \"refresh_intervals\": [\n              \"5s\",\n              \"10s\",\n              \"30s\",\n              \"2m\",\n              \"5m\",\n              \"15m\",\n              \"30m\",\n              \"1h\",\n              \"2h\",\n              \"1d\"\n            ],\n            \"time_options\": [\n              \"5m\",\n              \"15m\",\n              \"1h\",\n              \"6h\",\n              \"12h\",\n              \"24h\",\n              \"2d\",\n              \"7d\",\n              \"30d\"\n            ]\n          },\n          \"timezone\": \"browser\",\n          \"title\": \"NGINX Ingress controller\",\n          \"uid\": \"nginx\",\n          \"version\": 1\n        }\n...\n"
  },
  {
    "path": "values_overrides/grafana/nodes.yaml",
    "content": "# NOTE(srwilkers): This overrides file provides a reference for a dashboard for\n# the status of all nodes in a deployment\n---\nconf:\n  dashboards:\n    lma:\n      nodes: |-\n        {\n          \"__inputs\": [\n            {\n              \"name\": \"DS_PROMETHEUS\",\n              \"label\": \"prometheus\",\n              \"description\": \"\",\n              \"type\": \"datasource\",\n              \"pluginId\": \"prometheus\",\n              \"pluginName\": \"Prometheus\"\n            }\n          ],\n          \"__requires\": [\n            {\n              \"type\": \"grafana\",\n              \"id\": \"grafana\",\n              \"name\": \"Grafana\",\n              \"version\": \"4.4.1\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"graph\",\n              \"name\": \"Graph\",\n              \"version\": \"\"\n            },\n            {\n              \"type\": \"datasource\",\n              \"id\": \"prometheus\",\n              \"name\": \"Prometheus\",\n              \"version\": \"1.0.0\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"singlestat\",\n              \"name\": \"Singlestat\",\n              \"version\": \"\"\n            }\n          ],\n          \"annotations\": {\n            \"list\": [\n              {\n                \"builtIn\": 1,\n                \"datasource\": \"-- Grafana --\",\n                \"enable\": true,\n                \"hide\": true,\n                \"iconColor\": \"rgba(0, 211, 255, 1)\",\n                \"name\": \"Annotations & Alerts\",\n                \"type\": \"dashboard\"\n              }\n            ]\n          },\n          \"description\": \"Dashboard to get an overview of one server\",\n          \"overwrite\": true,\n          \"editable\": true,\n          \"gnetId\": 22,\n          \"graphTooltip\": 0,\n          \"id\": 8,\n          \"links\": [],\n          \"panels\": [\n            {\n              \"alerting\": {},\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 0\n              },\n              \"id\": 3,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"100 - (avg by (cpu) (irate(node_cpu{mode=\\\"idle\\\", instance=\\\"$server\\\"}[5m])) * 100)\",\n                  \"hide\": false,\n                  \"intervalFactor\": 10,\n                  \"legendFormat\": \"{{cpu}}\",\n                  \"refId\": \"A\",\n                  \"step\": 50\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Idle cpu\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"percent\",\n                  \"label\": \"cpu usage\",\n                  \"logBase\": 1,\n                  \"max\": 100,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"alerting\": {},\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 0\n              },\n              \"id\": 9,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"node_load1{instance=\\\"$server\\\"}\",\n                  \"intervalFactor\": 4,\n                  \"legendFormat\": \"load 1m\",\n                  \"refId\": \"A\",\n                  \"step\": 20,\n                  \"target\": \"\"\n                },\n                {\n                  \"expr\": \"node_load5{instance=\\\"$server\\\"}\",\n                  \"intervalFactor\": 4,\n                  \"legendFormat\": \"load 5m\",\n                  \"refId\": \"B\",\n                  \"step\": 20,\n                  \"target\": \"\"\n                },\n                {\n                  \"expr\": \"node_load15{instance=\\\"$server\\\"}\",\n                  \"intervalFactor\": 4,\n                  \"legendFormat\": \"load 15m\",\n                  \"refId\": \"C\",\n                  \"step\": 20,\n                  \"target\": \"\"\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"System load\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"percentunit\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"alerting\": {},\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 18,\n                \"x\": 0,\n                \"y\": 7\n              },\n              \"id\": 4,\n              \"legend\": {\n                \"alignAsTable\": false,\n                \"avg\": false,\n                \"current\": false,\n                \"hideEmpty\": false,\n                \"hideZero\": false,\n                \"max\": false,\n                \"min\": false,\n                \"rightSide\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"node_memory_SwapFree{instance=\\\"$server\\\",job=\\\"prometheus\\\"}\",\n                  \"yaxis\": 2\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"node_memory_MemTotal{instance=\\\"$server\\\"} - node_memory_MemFree{instance=\\\"$server\\\"} - node_memory_Buffers{instance=\\\"$server\\\"} - node_memory_Cached{instance=\\\"$server\\\"}\",\n                  \"hide\": false,\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"memory used\",\n                  \"metric\": \"\",\n                  \"refId\": \"C\",\n                  \"step\": 10\n                },\n                {\n                  \"expr\": \"node_memory_Buffers{instance=\\\"$server\\\"}\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"memory buffers\",\n                  \"metric\": \"\",\n                  \"refId\": \"E\",\n                  \"step\": 10\n                },\n                {\n                  \"expr\": \"node_memory_Cached{instance=\\\"$server\\\"}\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"memory cached\",\n                  \"metric\": \"\",\n                  \"refId\": \"F\",\n                  \"step\": 10\n                },\n                {\n                  \"expr\": \"node_memory_MemFree{instance=\\\"$server\\\"}\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"memory free\",\n                  \"metric\": \"\",\n                  \"refId\": \"D\",\n                  \"step\": 10\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Memory usage\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": \"0\",\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"percent\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": true,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 6,\n                \"x\": 18,\n                \"y\": 7\n              },\n              \"id\": 5,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"((node_memory_MemTotal{instance=\\\"$server\\\"} - node_memory_MemFree{instance=\\\"$server\\\"}  - node_memory_Buffers{instance=\\\"$server\\\"} - node_memory_Cached{instance=\\\"$server\\\"}) / node_memory_MemTotal{instance=\\\"$server\\\"}) * 100\",\n                  \"intervalFactor\": 2,\n                  \"refId\": \"A\",\n                  \"step\": 60,\n                  \"target\": \"\"\n                }\n              ],\n              \"thresholds\": \"80, 90\",\n              \"title\": \"Memory usage\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"avg\"\n            },\n            {\n              \"alerting\": {},\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 18,\n                \"x\": 0,\n                \"y\": 14\n              },\n              \"id\": 6,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"read\",\n                  \"yaxis\": 1\n                },\n                {\n                  \"alias\": \"{instance=\\\"$server\\\"}\",\n                  \"yaxis\": 2\n                },\n                {\n                  \"alias\": \"io time\",\n                  \"yaxis\": 2\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum by (instance) (rate(node_disk_bytes_read{instance=\\\"$server\\\"}[2m]))\",\n                  \"hide\": false,\n                  \"intervalFactor\": 4,\n                  \"legendFormat\": \"read\",\n                  \"refId\": \"A\",\n                  \"step\": 20,\n                  \"target\": \"\"\n                },\n                {\n                  \"expr\": \"sum by (instance) (rate(node_disk_bytes_written{instance=\\\"$server\\\"}[2m]))\",\n                  \"intervalFactor\": 4,\n                  \"legendFormat\": \"written\",\n                  \"refId\": \"B\",\n                  \"step\": 20\n                },\n                {\n                  \"expr\": \"sum by (instance) (rate(node_disk_io_time_ms{instance=\\\"$server\\\"}[2m]))\",\n                  \"intervalFactor\": 4,\n                  \"legendFormat\": \"io time\",\n                  \"refId\": \"C\",\n                  \"step\": 20\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Disk I/O\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"ms\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(50, 172, 45, 0.97)\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"rgba(245, 54, 54, 0.9)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"percentunit\",\n              \"gauge\": {\n                \"maxValue\": 1,\n                \"minValue\": 0,\n                \"show\": true,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 6,\n                \"x\": 18,\n                \"y\": 14\n              },\n              \"id\": 7,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"(sum(node_filesystem_size{device!=\\\"rootfs\\\",instance=\\\"$server\\\"}) - sum(node_filesystem_free{device!=\\\"rootfs\\\",instance=\\\"$server\\\"})) / sum(node_filesystem_size{device!=\\\"rootfs\\\",instance=\\\"$server\\\"})\",\n                  \"intervalFactor\": 2,\n                  \"refId\": \"A\",\n                  \"step\": 60,\n                  \"target\": \"\"\n                }\n              ],\n              \"thresholds\": \"0.75, 0.9\",\n              \"title\": \"Disk space usage\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"alerting\": {},\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 21\n              },\n              \"id\": 8,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"transmitted \",\n                  \"yaxis\": 2\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"rate(node_network_receive_bytes{instance=\\\"$server\\\",device!~\\\"lo\\\"}[5m])\",\n                  \"hide\": false,\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{device}}\",\n                  \"refId\": \"A\",\n                  \"step\": 10,\n                  \"target\": \"\"\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Network received\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"bytes\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"alerting\": {},\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 21\n              },\n              \"id\": 10,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"transmitted \",\n                  \"yaxis\": 2\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"rate(node_network_transmit_bytes{instance=\\\"$server\\\",device!~\\\"lo\\\"}[5m])\",\n                  \"hide\": false,\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{device}}\",\n                  \"refId\": \"B\",\n                  \"step\": 10,\n                  \"target\": \"\"\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Network transmitted\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"bytes\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            }\n          ],\n          \"refresh\": false,\n          \"schemaVersion\": 18,\n          \"style\": \"dark\",\n          \"tags\": [],\n          \"templating\": {\n            \"list\": [\n              {\n                \"current\": {\n                  \"text\": \"prometheus\",\n                  \"value\": \"prometheus\"\n                },\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Prometheus datasource\",\n                \"multi\": false,\n                \"name\": \"DS_PROMETHEUS\",\n                \"options\": [],\n                \"query\": \"prometheus\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"type\": \"datasource\"\n              },\n              {\n                \"allValue\": null,\n                \"current\": {},\n                \"datasource\": \"${DS_PROMETHEUS}\",\n                \"definition\": \"\",\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Server\",\n                \"multi\": false,\n                \"name\": \"host\",\n                \"options\": [],\n                \"query\": \"label_values(node_uname_info, nodename)\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"sort\": 0,\n                \"tagValuesQuery\": \"\",\n                \"tags\": [],\n                \"tagsQuery\": \"\",\n                \"type\": \"query\",\n                \"useTags\": false\n              },\n              {\n                \"allValue\": null,\n                \"current\": {},\n                \"datasource\": \"${DS_PROMETHEUS}\",\n                \"definition\": \"\",\n                \"hide\": 2,\n                \"includeAll\": false,\n                \"label\": \"Instance\",\n                \"multi\": false,\n                \"name\": \"server\",\n                \"options\": [],\n                \"query\": \"label_values(node_uname_info{nodename=\\\"$host\\\"}, instance)\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"sort\": 0,\n                \"tagValuesQuery\": \"\",\n                \"tags\": [],\n                \"tagsQuery\": \"\",\n                \"type\": \"query\",\n                \"useTags\": false\n              }\n            ]\n          },\n          \"time\": {\n            \"from\": \"now-1h\",\n            \"to\": \"now\"\n          },\n          \"timepicker\": {\n            \"refresh_intervals\": [\n              \"5s\",\n              \"10s\",\n              \"30s\",\n              \"1m\",\n              \"5m\",\n              \"15m\",\n              \"30m\",\n              \"1h\",\n              \"2h\",\n              \"1d\"\n            ],\n            \"time_options\": [\n              \"5m\",\n              \"15m\",\n              \"1h\",\n              \"6h\",\n              \"12h\",\n              \"24h\",\n              \"2d\",\n              \"7d\",\n              \"30d\"\n            ]\n          },\n          \"timezone\": \"browser\",\n          \"title\": \"Nodes\",\n          \"version\": 1\n        }\n...\n"
  },
  {
    "path": "values_overrides/grafana/openstack.yaml",
    "content": "# NOTE(srwilkers): This overrides file provides a reference for dashboards for\n# the openstack control plane as a whole, the individual openstack services, and\n# rabbitmq\n---\nconf:\n  dashboards:\n    openstack:\n      rabbitmq: |-\n        {\n          \"__inputs\": [\n            {\n              \"name\": \"DS_PROMETHEUS\",\n              \"label\": \"Prometheus\",\n              \"description\": \"\",\n              \"type\": \"datasource\",\n              \"pluginId\": \"prometheus\",\n              \"pluginName\": \"Prometheus\"\n            }\n          ],\n          \"__requires\": [\n            {\n              \"type\": \"grafana\",\n              \"id\": \"grafana\",\n              \"name\": \"Grafana\",\n              \"version\": \"4.2.0\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"graph\",\n              \"name\": \"Graph\",\n              \"version\": \"\"\n            },\n            {\n              \"type\": \"datasource\",\n              \"id\": \"prometheus\",\n              \"name\": \"Prometheus\",\n              \"version\": \"1.0.0\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"singlestat\",\n              \"name\": \"Singlestat\",\n              \"version\": \"\"\n            }\n          ],\n          \"annotations\": {\n            \"list\": []\n          },\n          \"editable\": true,\n          \"overwrite\": true,\n          \"gnetId\": 2121,\n          \"graphTooltip\": 0,\n          \"hideControls\": false,\n          \"id\": null,\n          \"links\": [],\n          \"refresh\": \"5s\",\n          \"rows\": [\n            {\n              \"collapse\": false,\n              \"height\": 266,\n              \"panels\": [\n                {\n                  \"cacheTimeout\": null,\n                  \"colorBackground\": true,\n                  \"colorValue\": false,\n                  \"colors\": [\n                    \"rgba(50, 172, 45, 0.97)\",\n                    \"rgba(237, 129, 40, 0.89)\",\n                    \"rgba(245, 54, 54, 0.9)\"\n                  ],\n                  \"datasource\": \"${DS_PROMETHEUS}\",\n                  \"format\": \"none\",\n                  \"gauge\": {\n                    \"maxValue\": 100,\n                    \"minValue\": 0,\n                    \"show\": false,\n                    \"thresholdLabels\": false,\n                    \"thresholdMarkers\": true\n                  },\n                  \"id\": 13,\n                  \"interval\": null,\n                  \"links\": [],\n                  \"mappingType\": 1,\n                  \"mappingTypes\": [\n                    {\n                      \"name\": \"value to text\",\n                      \"value\": 1\n                    },\n                    {\n                      \"name\": \"range to text\",\n                      \"value\": 2\n                    }\n                  ],\n                  \"maxDataPoints\": 100,\n                  \"nullPointMode\": \"connected\",\n                  \"nullText\": null,\n                  \"postfix\": \"\",\n                  \"postfixFontSize\": \"50%\",\n                  \"prefix\": \"\",\n                  \"prefixFontSize\": \"50%\",\n                  \"rangeMaps\": [\n                    {\n                      \"from\": \"null\",\n                      \"text\": \"N/A\",\n                      \"to\": \"null\"\n                    }\n                  ],\n                  \"span\": 3,\n                  \"sparkline\": {\n                    \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                    \"full\": false,\n                    \"lineColor\": \"rgb(31, 120, 193)\",\n                    \"show\": false\n                  },\n                  \"targets\": [\n                    {\n                      \"expr\": \"rabbitmq_up\",\n                      \"intervalFactor\": 2,\n                      \"metric\": \"rabbitmq_up\",\n                      \"refId\": \"A\",\n                      \"step\": 2\n                    }\n                  ],\n                  \"thresholds\": \"Up,Down\",\n                  \"timeFrom\": \"30s\",\n                  \"title\": \"RabbitMQ Server\",\n                  \"type\": \"singlestat\",\n                  \"valueFontSize\": \"80%\",\n                  \"valueMaps\": [\n                    {\n                      \"op\": \"=\",\n                      \"text\": \"N/A\",\n                      \"value\": \"null\"\n                    },\n                    {\n                      \"op\": \"=\",\n                      \"text\": \"Down\",\n                      \"value\": \"0\"\n                    },\n                    {\n                      \"op\": \"=\",\n                      \"text\": \"Up\",\n                      \"value\": \"1\"\n                    }\n                  ],\n                  \"valueName\": \"current\"\n                },\n                {\n                  \"alert\": {\n                    \"conditions\": [\n                      {\n                        \"evaluator\": {\n                          \"params\": [\n                            1\n                          ],\n                          \"type\": \"lt\"\n                        },\n                        \"operator\": {\n                          \"type\": \"and\"\n                        },\n                        \"query\": {\n                          \"params\": [\n                            \"A\",\n                            \"10s\",\n                            \"now\"\n                          ]\n                        },\n                        \"reducer\": {\n                          \"params\": [],\n                          \"type\": \"last\"\n                        },\n                        \"type\": \"query\"\n                      },\n                      {\n                        \"evaluator\": {\n                          \"params\": [],\n                          \"type\": \"no_value\"\n                        },\n                        \"operator\": {\n                          \"type\": \"and\"\n                        },\n                        \"query\": {\n                          \"params\": [\n                            \"A\",\n                            \"10s\",\n                            \"now\"\n                          ]\n                        },\n                        \"reducer\": {\n                          \"params\": [],\n                          \"type\": \"last\"\n                        },\n                        \"type\": \"query\"\n                      }\n                    ],\n                    \"executionErrorState\": \"alerting\",\n                    \"frequency\": \"60s\",\n                    \"handler\": 1,\n                    \"message\": \"Some of the RabbitMQ node is down\",\n                    \"name\": \"Node Stats alert\",\n                    \"noDataState\": \"no_data\",\n                    \"notifications\": []\n                  },\n                  \"aliasColors\": {},\n                  \"bars\": true,\n                  \"datasource\": \"${DS_PROMETHEUS}\",\n                  \"decimals\": 0,\n                  \"fill\": 1,\n                  \"id\": 12,\n                  \"legend\": {\n                    \"alignAsTable\": true,\n                    \"avg\": false,\n                    \"current\": true,\n                    \"max\": false,\n                    \"min\": false,\n                    \"show\": true,\n                    \"total\": false,\n                    \"values\": true\n                  },\n                  \"lines\": false,\n                  \"linewidth\": 1,\n                  \"links\": [],\n                  \"nullPointMode\": \"null\",\n                  \"percentage\": false,\n                  \"pointradius\": 5,\n                  \"points\": false,\n                  \"renderer\": \"flot\",\n                  \"seriesOverrides\": [],\n                  \"span\": 9,\n                  \"stack\": false,\n                  \"steppedLine\": false,\n                  \"targets\": [\n                    {\n                      \"expr\": \"rabbitmq_running\",\n                      \"intervalFactor\": 2,\n                      \"legendFormat\": \"{{node}}\",\n                      \"metric\": \"rabbitmq_running\",\n                      \"refId\": \"A\",\n                      \"step\": 2\n                    }\n                  ],\n                  \"thresholds\": [\n                    {\n                      \"colorMode\": \"critical\",\n                      \"fill\": true,\n                      \"line\": true,\n                      \"op\": \"lt\",\n                      \"value\": 1\n                    }\n                  ],\n                  \"timeFrom\": \"30s\",\n                  \"timeShift\": null,\n                  \"title\": \"Node up Stats\",\n                  \"tooltip\": {\n                    \"shared\": true,\n                    \"sort\": 0,\n                    \"value_type\": \"individual\"\n                  },\n                  \"type\": \"graph\",\n                  \"xaxis\": {\n                    \"mode\": \"time\",\n                    \"name\": null,\n                    \"show\": true,\n                    \"values\": []\n                  },\n                  \"yaxes\": [\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    },\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    }\n                  ]\n                },\n                {\n                  \"aliasColors\": {},\n                  \"bars\": false,\n                  \"datasource\": \"${DS_PROMETHEUS}\",\n                  \"decimals\": 0,\n                  \"fill\": 1,\n                  \"id\": 6,\n                  \"legend\": {\n                    \"alignAsTable\": true,\n                    \"avg\": true,\n                    \"current\": true,\n                    \"max\": true,\n                    \"min\": true,\n                    \"show\": true,\n                    \"total\": false,\n                    \"values\": true\n                  },\n                  \"lines\": true,\n                  \"linewidth\": 1,\n                  \"links\": [],\n                  \"nullPointMode\": \"null\",\n                  \"percentage\": false,\n                  \"pointradius\": 5,\n                  \"points\": false,\n                  \"renderer\": \"flot\",\n                  \"seriesOverrides\": [],\n                  \"span\": 4,\n                  \"stack\": false,\n                  \"steppedLine\": false,\n                  \"targets\": [\n                    {\n                      \"expr\": \"rabbitmq_exchangesTotal\",\n                      \"intervalFactor\": 2,\n                      \"legendFormat\": \"{{instance}}:exchanges\",\n                      \"metric\": \"rabbitmq_exchangesTotal\",\n                      \"refId\": \"A\",\n                      \"step\": 2\n                    }\n                  ],\n                  \"thresholds\": [],\n                  \"timeFrom\": null,\n                  \"timeShift\": null,\n                  \"title\": \"Exchanges\",\n                  \"tooltip\": {\n                    \"shared\": true,\n                    \"sort\": 0,\n                    \"value_type\": \"individual\"\n                  },\n                  \"type\": \"graph\",\n                  \"xaxis\": {\n                    \"mode\": \"time\",\n                    \"name\": null,\n                    \"show\": true,\n                    \"values\": []\n                  },\n                  \"yaxes\": [\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    },\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    }\n                  ]\n                },\n                {\n                  \"aliasColors\": {},\n                  \"bars\": false,\n                  \"datasource\": \"${DS_PROMETHEUS}\",\n                  \"decimals\": 0,\n                  \"fill\": 1,\n                  \"id\": 4,\n                  \"legend\": {\n                    \"alignAsTable\": true,\n                    \"avg\": true,\n                    \"current\": true,\n                    \"max\": true,\n                    \"min\": true,\n                    \"show\": true,\n                    \"total\": false,\n                    \"values\": true\n                  },\n                  \"lines\": true,\n                  \"linewidth\": 1,\n                  \"links\": [],\n                  \"nullPointMode\": \"null\",\n                  \"percentage\": false,\n                  \"pointradius\": 5,\n                  \"points\": false,\n                  \"renderer\": \"flot\",\n                  \"seriesOverrides\": [],\n                  \"span\": 4,\n                  \"stack\": false,\n                  \"steppedLine\": false,\n                  \"targets\": [\n                    {\n                      \"expr\": \"rabbitmq_channelsTotal\",\n                      \"intervalFactor\": 2,\n                      \"legendFormat\": \"{{instance}}:channels\",\n                      \"metric\": \"rabbitmq_channelsTotal\",\n                      \"refId\": \"A\",\n                      \"step\": 2\n                    }\n                  ],\n                  \"thresholds\": [],\n                  \"timeFrom\": null,\n                  \"timeShift\": null,\n                  \"title\": \"Channels\",\n                  \"tooltip\": {\n                    \"shared\": true,\n                    \"sort\": 0,\n                    \"value_type\": \"individual\"\n                  },\n                  \"type\": \"graph\",\n                  \"xaxis\": {\n                    \"mode\": \"time\",\n                    \"name\": null,\n                    \"show\": true,\n                    \"values\": []\n                  },\n                  \"yaxes\": [\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    },\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    }\n                  ]\n                },\n                {\n                  \"aliasColors\": {},\n                  \"bars\": false,\n                  \"datasource\": \"${DS_PROMETHEUS}\",\n                  \"decimals\": 0,\n                  \"fill\": 1,\n                  \"id\": 3,\n                  \"legend\": {\n                    \"alignAsTable\": true,\n                    \"avg\": true,\n                    \"current\": true,\n                    \"max\": true,\n                    \"min\": true,\n                    \"show\": true,\n                    \"total\": false,\n                    \"values\": true\n                  },\n                  \"lines\": true,\n                  \"linewidth\": 1,\n                  \"links\": [],\n                  \"nullPointMode\": \"null\",\n                  \"percentage\": false,\n                  \"pointradius\": 5,\n                  \"points\": false,\n                  \"renderer\": \"flot\",\n                  \"seriesOverrides\": [],\n                  \"span\": 4,\n                  \"stack\": false,\n                  \"steppedLine\": false,\n                  \"targets\": [\n                    {\n                      \"expr\": \"rabbitmq_consumersTotal\",\n                      \"intervalFactor\": 2,\n                      \"legendFormat\": \"{{instance}}:consumers\",\n                      \"metric\": \"rabbitmq_consumersTotal\",\n                      \"refId\": \"A\",\n                      \"step\": 2\n                    }\n                  ],\n                  \"thresholds\": [],\n                  \"timeFrom\": null,\n                  \"timeShift\": null,\n                  \"title\": \"Consumers\",\n                  \"tooltip\": {\n                    \"shared\": true,\n                    \"sort\": 0,\n                    \"value_type\": \"individual\"\n                  },\n                  \"type\": \"graph\",\n                  \"xaxis\": {\n                    \"mode\": \"time\",\n                    \"name\": null,\n                    \"show\": true,\n                    \"values\": []\n                  },\n                  \"yaxes\": [\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    },\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    }\n                  ]\n                },\n                {\n                  \"aliasColors\": {},\n                  \"bars\": false,\n                  \"datasource\": \"${DS_PROMETHEUS}\",\n                  \"decimals\": 0,\n                  \"fill\": 1,\n                  \"id\": 5,\n                  \"legend\": {\n                    \"avg\": true,\n                    \"current\": true,\n                    \"max\": true,\n                    \"min\": true,\n                    \"show\": true,\n                    \"total\": false,\n                    \"values\": true\n                  },\n                  \"lines\": true,\n                  \"linewidth\": 1,\n                  \"links\": [],\n                  \"nullPointMode\": \"null\",\n                  \"percentage\": false,\n                  \"pointradius\": 5,\n                  \"points\": false,\n                  \"renderer\": \"flot\",\n                  \"seriesOverrides\": [],\n                  \"span\": 4,\n                  \"stack\": false,\n                  \"steppedLine\": false,\n                  \"targets\": [\n                    {\n                      \"expr\": \"rabbitmq_connectionsTotal\",\n                      \"intervalFactor\": 2,\n                      \"legendFormat\": \"{{instance}}:connections\",\n                      \"metric\": \"rabbitmq_connectionsTotal\",\n                      \"refId\": \"A\",\n                      \"step\": 2\n                    }\n                  ],\n                  \"thresholds\": [],\n                  \"timeFrom\": null,\n                  \"timeShift\": null,\n                  \"title\": \"Connections\",\n                  \"tooltip\": {\n                    \"shared\": true,\n                    \"sort\": 0,\n                    \"value_type\": \"individual\"\n                  },\n                  \"type\": \"graph\",\n                  \"xaxis\": {\n                    \"mode\": \"time\",\n                    \"name\": null,\n                    \"show\": true,\n                    \"values\": []\n                  },\n                  \"yaxes\": [\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    },\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    }\n                  ]\n                },\n                {\n                  \"aliasColors\": {},\n                  \"bars\": false,\n                  \"datasource\": \"${DS_PROMETHEUS}\",\n                  \"fill\": 1,\n                  \"id\": 7,\n                  \"legend\": {\n                    \"alignAsTable\": true,\n                    \"avg\": true,\n                    \"current\": true,\n                    \"max\": true,\n                    \"min\": true,\n                    \"show\": true,\n                    \"total\": false,\n                    \"values\": true\n                  },\n                  \"lines\": true,\n                  \"linewidth\": 1,\n                  \"links\": [],\n                  \"nullPointMode\": \"null\",\n                  \"percentage\": false,\n                  \"pointradius\": 5,\n                  \"points\": false,\n                  \"renderer\": \"flot\",\n                  \"seriesOverrides\": [],\n                  \"span\": 4,\n                  \"stack\": false,\n                  \"steppedLine\": false,\n                  \"targets\": [\n                    {\n                      \"expr\": \"rabbitmq_queuesTotal\",\n                      \"intervalFactor\": 2,\n                      \"legendFormat\": \"{{instance}}:queues\",\n                      \"metric\": \"rabbitmq_queuesTotal\",\n                      \"refId\": \"A\",\n                      \"step\": 2\n                    }\n                  ],\n                  \"thresholds\": [],\n                  \"timeFrom\": null,\n                  \"timeShift\": null,\n                  \"title\": \"Queues\",\n                  \"tooltip\": {\n                    \"shared\": true,\n                    \"sort\": 0,\n                    \"value_type\": \"individual\"\n                  },\n                  \"type\": \"graph\",\n                  \"xaxis\": {\n                    \"mode\": \"time\",\n                    \"name\": null,\n                    \"show\": true,\n                    \"values\": []\n                  },\n                  \"yaxes\": [\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    },\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    }\n                  ]\n                },\n                {\n                  \"aliasColors\": {},\n                  \"bars\": false,\n                  \"datasource\": \"${DS_PROMETHEUS}\",\n                  \"decimals\": 0,\n                  \"fill\": 1,\n                  \"id\": 8,\n                  \"legend\": {\n                    \"alignAsTable\": true,\n                    \"avg\": true,\n                    \"current\": true,\n                    \"max\": true,\n                    \"min\": true,\n                    \"show\": true,\n                    \"total\": false,\n                    \"values\": true\n                  },\n                  \"lines\": true,\n                  \"linewidth\": 1,\n                  \"links\": [],\n                  \"nullPointMode\": \"null\",\n                  \"percentage\": false,\n                  \"pointradius\": 5,\n                  \"points\": false,\n                  \"renderer\": \"flot\",\n                  \"seriesOverrides\": [],\n                  \"span\": 6,\n                  \"stack\": false,\n                  \"steppedLine\": false,\n                  \"targets\": [\n                    {\n                      \"expr\": \"sum by (vhost)(rabbitmq_queue_messages_ready)\",\n                      \"intervalFactor\": 2,\n                      \"legendFormat\": \"{{vhost}}:ready\",\n                      \"metric\": \"rabbitmq_queue_messages_ready\",\n                      \"refId\": \"A\",\n                      \"step\": 2\n                    },\n                    {\n                      \"expr\": \"sum by (vhost)(rabbitmq_queue_messages_published_total)\",\n                      \"intervalFactor\": 2,\n                      \"legendFormat\": \"{{vhost}}:published\",\n                      \"metric\": \"rabbitmq_queue_messages_published_total\",\n                      \"refId\": \"B\",\n                      \"step\": 2\n                    },\n                    {\n                      \"expr\": \"sum by (vhost)(rabbitmq_queue_messages_delivered_total)\",\n                      \"intervalFactor\": 2,\n                      \"legendFormat\": \"{{vhost}}:delivered\",\n                      \"metric\": \"rabbitmq_queue_messages_delivered_total\",\n                      \"refId\": \"C\",\n                      \"step\": 2\n                    },\n                    {\n                      \"expr\": \"sum by (vhost)(rabbitmq_queue_messages_unacknowledged)\",\n                      \"intervalFactor\": 2,\n                      \"legendFormat\": \"{{vhost}}:unack\",\n                      \"metric\": \"ack\",\n                      \"refId\": \"D\",\n                      \"step\": 2\n                    }\n                  ],\n                  \"thresholds\": [],\n                  \"timeFrom\": null,\n                  \"timeShift\": null,\n                  \"title\": \"Messages/host\",\n                  \"tooltip\": {\n                    \"shared\": true,\n                    \"sort\": 0,\n                    \"value_type\": \"individual\"\n                  },\n                  \"type\": \"graph\",\n                  \"xaxis\": {\n                    \"mode\": \"time\",\n                    \"name\": null,\n                    \"show\": true,\n                    \"values\": []\n                  },\n                  \"yaxes\": [\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    },\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    }\n                  ]\n                },\n                {\n                  \"aliasColors\": {},\n                  \"bars\": false,\n                  \"datasource\": \"${DS_PROMETHEUS}\",\n                  \"decimals\": 0,\n                  \"fill\": 1,\n                  \"id\": 2,\n                  \"legend\": {\n                    \"alignAsTable\": true,\n                    \"avg\": false,\n                    \"current\": true,\n                    \"max\": false,\n                    \"min\": false,\n                    \"rightSide\": false,\n                    \"show\": true,\n                    \"total\": false,\n                    \"values\": true\n                  },\n                  \"lines\": true,\n                  \"linewidth\": 1,\n                  \"links\": [],\n                  \"nullPointMode\": \"null\",\n                  \"percentage\": false,\n                  \"pointradius\": 5,\n                  \"points\": false,\n                  \"renderer\": \"flot\",\n                  \"seriesOverrides\": [],\n                  \"span\": 6,\n                  \"stack\": false,\n                  \"steppedLine\": false,\n                  \"targets\": [\n                    {\n                      \"expr\": \"rabbitmq_queue_messages\",\n                      \"intervalFactor\": 2,\n                      \"legendFormat\": \"{{queue}}:{{durable}}\",\n                      \"metric\": \"rabbitmq_queue_messages\",\n                      \"refId\": \"A\",\n                      \"step\": 2\n                    }\n                  ],\n                  \"thresholds\": [],\n                  \"timeFrom\": null,\n                  \"timeShift\": null,\n                  \"title\": \"Messages / Queue\",\n                  \"tooltip\": {\n                    \"shared\": true,\n                    \"sort\": 0,\n                    \"value_type\": \"individual\"\n                  },\n                  \"type\": \"graph\",\n                  \"xaxis\": {\n                    \"mode\": \"time\",\n                    \"name\": null,\n                    \"show\": true,\n                    \"values\": []\n                  },\n                  \"yaxes\": [\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    },\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    }\n                  ]\n                },\n                {\n                  \"aliasColors\": {},\n                  \"bars\": false,\n                  \"datasource\": \"${DS_PROMETHEUS}\",\n                  \"fill\": 1,\n                  \"id\": 9,\n                  \"legend\": {\n                    \"alignAsTable\": true,\n                    \"avg\": true,\n                    \"current\": true,\n                    \"max\": true,\n                    \"min\": true,\n                    \"show\": true,\n                    \"total\": false,\n                    \"values\": true\n                  },\n                  \"lines\": true,\n                  \"linewidth\": 1,\n                  \"links\": [],\n                  \"nullPointMode\": \"null\",\n                  \"percentage\": false,\n                  \"pointradius\": 5,\n                  \"points\": false,\n                  \"renderer\": \"flot\",\n                  \"seriesOverrides\": [],\n                  \"span\": 6,\n                  \"stack\": false,\n                  \"steppedLine\": false,\n                  \"targets\": [\n                    {\n                      \"expr\": \"rabbitmq_node_mem_used\",\n                      \"intervalFactor\": 2,\n                      \"legendFormat\": \"{{node}}:used\",\n                      \"metric\": \"rabbitmq_node_mem_used\",\n                      \"refId\": \"A\",\n                      \"step\": 2\n                    },\n                    {\n                      \"expr\": \"rabbitmq_node_mem_limit\",\n                      \"intervalFactor\": 2,\n                      \"legendFormat\": \"{{node}}:limit\",\n                      \"metric\": \"node_mem\",\n                      \"refId\": \"B\",\n                      \"step\": 2\n                    }\n                  ],\n                  \"thresholds\": [],\n                  \"timeFrom\": null,\n                  \"timeShift\": null,\n                  \"title\": \"Memory\",\n                  \"tooltip\": {\n                    \"shared\": true,\n                    \"sort\": 0,\n                    \"value_type\": \"individual\"\n                  },\n                  \"type\": \"graph\",\n                  \"xaxis\": {\n                    \"mode\": \"time\",\n                    \"name\": null,\n                    \"show\": true,\n                    \"values\": []\n                  },\n                  \"yaxes\": [\n                    {\n                      \"format\": \"decbytes\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    },\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    }\n                  ]\n                },\n                {\n                  \"aliasColors\": {},\n                  \"bars\": false,\n                  \"datasource\": \"${DS_PROMETHEUS}\",\n                  \"fill\": 1,\n                  \"id\": 10,\n                  \"legend\": {\n                    \"alignAsTable\": true,\n                    \"avg\": true,\n                    \"current\": true,\n                    \"max\": true,\n                    \"min\": true,\n                    \"show\": true,\n                    \"total\": false,\n                    \"values\": true\n                  },\n                  \"lines\": true,\n                  \"linewidth\": 1,\n                  \"links\": [],\n                  \"nullPointMode\": \"null\",\n                  \"percentage\": false,\n                  \"pointradius\": 5,\n                  \"points\": false,\n                  \"renderer\": \"flot\",\n                  \"seriesOverrides\": [],\n                  \"span\": 6,\n                  \"stack\": false,\n                  \"steppedLine\": false,\n                  \"targets\": [\n                    {\n                      \"expr\": \"rabbitmq_fd_used\",\n                      \"intervalFactor\": 2,\n                      \"legendFormat\": \"{{node}}:used\",\n                      \"metric\": \"\",\n                      \"refId\": \"A\",\n                      \"step\": 2\n                    },\n                    {\n                      \"expr\": \"rabbitmq_fd_total\",\n                      \"intervalFactor\": 2,\n                      \"legendFormat\": \"{{node}}:total\",\n                      \"metric\": \"node_mem\",\n                      \"refId\": \"B\",\n                      \"step\": 2\n                    }\n                  ],\n                  \"thresholds\": [],\n                  \"timeFrom\": null,\n                  \"timeShift\": null,\n                  \"title\": \"FIle descriptors\",\n                  \"tooltip\": {\n                    \"shared\": true,\n                    \"sort\": 0,\n                    \"value_type\": \"individual\"\n                  },\n                  \"type\": \"graph\",\n                  \"xaxis\": {\n                    \"mode\": \"time\",\n                    \"name\": null,\n                    \"show\": true,\n                    \"values\": []\n                  },\n                  \"yaxes\": [\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    },\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    }\n                  ]\n                },\n                {\n                  \"aliasColors\": {},\n                  \"bars\": false,\n                  \"datasource\": \"${DS_PROMETHEUS}\",\n                  \"fill\": 1,\n                  \"id\": 11,\n                  \"legend\": {\n                    \"alignAsTable\": true,\n                    \"avg\": true,\n                    \"current\": true,\n                    \"max\": true,\n                    \"min\": true,\n                    \"show\": true,\n                    \"total\": false,\n                    \"values\": true\n                  },\n                  \"lines\": true,\n                  \"linewidth\": 1,\n                  \"links\": [],\n                  \"nullPointMode\": \"null\",\n                  \"percentage\": false,\n                  \"pointradius\": 5,\n                  \"points\": false,\n                  \"renderer\": \"flot\",\n                  \"seriesOverrides\": [],\n                  \"span\": 6,\n                  \"stack\": false,\n                  \"steppedLine\": false,\n                  \"targets\": [\n                    {\n                      \"expr\": \"rabbitmq_sockets_used\",\n                      \"intervalFactor\": 2,\n                      \"legendFormat\": \"{{node}}:used\",\n                      \"metric\": \"\",\n                      \"refId\": \"A\",\n                      \"step\": 2\n                    },\n                    {\n                      \"expr\": \"rabbitmq_sockets_total\",\n                      \"intervalFactor\": 2,\n                      \"legendFormat\": \"{{node}}:total\",\n                      \"metric\": \"\",\n                      \"refId\": \"B\",\n                      \"step\": 2\n                    }\n                  ],\n                  \"thresholds\": [],\n                  \"timeFrom\": null,\n                  \"timeShift\": null,\n                  \"title\": \"Sockets\",\n                  \"tooltip\": {\n                    \"shared\": true,\n                    \"sort\": 0,\n                    \"value_type\": \"individual\"\n                  },\n                  \"transparent\": false,\n                  \"type\": \"graph\",\n                  \"xaxis\": {\n                    \"mode\": \"time\",\n                    \"name\": null,\n                    \"show\": true,\n                    \"values\": []\n                  },\n                  \"yaxes\": [\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    },\n                    {\n                      \"format\": \"short\",\n                      \"label\": null,\n                      \"logBase\": 1,\n                      \"max\": null,\n                      \"min\": null,\n                      \"show\": true\n                    }\n                  ]\n                }\n              ],\n              \"repeat\": null,\n              \"repeatIteration\": null,\n              \"repeatRowId\": null,\n              \"showTitle\": false,\n              \"title\": \"Dashboard Row\",\n              \"titleSize\": \"h6\"\n            }\n          ],\n          \"schemaVersion\": 14,\n          \"style\": \"dark\",\n          \"tags\": [],\n          \"templating\": {\n            \"list\": [\n              {\n                \"current\": {\n                  \"tags\": [],\n                  \"text\": \"Prometheus\",\n                  \"value\": \"Prometheus\"\n                },\n                \"hide\": 0,\n                \"label\": null,\n                \"name\": \"datasource\",\n                \"options\": [],\n                \"query\": \"prometheus\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"type\": \"datasource\"\n              }\n            ]\n          },\n          \"time\": {\n            \"from\": \"now-1h\",\n            \"to\": \"now\"\n          },\n          \"timepicker\": {\n            \"refresh_intervals\": [\n              \"5s\",\n              \"10s\",\n              \"30s\",\n              \"1m\",\n              \"5m\",\n              \"15m\",\n              \"30m\",\n              \"1h\",\n              \"2h\",\n              \"1d\"\n            ],\n            \"time_options\": [\n              \"5m\",\n              \"15m\",\n              \"1h\",\n              \"6h\",\n              \"12h\",\n              \"24h\",\n              \"2d\",\n              \"7d\",\n              \"30d\"\n            ]\n          },\n          \"timezone\": \"browser\",\n          \"title\": \"RabbitMQ Metrics\",\n          \"version\": 17,\n          \"description\": \"Basic rabbitmq host stats: Node Stats, Exchanges, Channels, Consumers,  Connections, Queues, Messages, Messages per Queue, Memory, File Descriptors, Sockets.\"\n        }\n      openstack_control_plane: |-\n        {\n          \"__inputs\": [\n            {\n              \"name\": \"DS_PROMETHEUS\",\n              \"label\": \"prometheus\",\n              \"description\": \"\",\n              \"type\": \"datasource\",\n              \"pluginId\": \"prometheus\",\n              \"pluginName\": \"Prometheus\"\n            }\n          ],\n          \"__requires\": [\n            {\n              \"type\": \"grafana\",\n              \"id\": \"grafana\",\n              \"name\": \"Grafana\",\n              \"version\": \"4.5.2\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"graph\",\n              \"name\": \"Graph\",\n              \"version\": \"\"\n            },\n            {\n              \"type\": \"datasource\",\n              \"id\": \"prometheus\",\n              \"name\": \"Prometheus\",\n              \"version\": \"1.0.0\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"singlestat\",\n              \"name\": \"Singlestat\",\n              \"version\": \"\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"text\",\n              \"name\": \"Text\",\n              \"version\": \"\"\n            }\n          ],\n          \"annotations\": {\n            \"list\": [\n              {\n                \"builtIn\": 1,\n                \"datasource\": \"-- Grafana --\",\n                \"enable\": true,\n                \"hide\": true,\n                \"iconColor\": \"rgba(0, 211, 255, 1)\",\n                \"name\": \"Annotations & Alerts\",\n                \"type\": \"dashboard\"\n              }\n            ]\n          },\n          \"editable\": false,\n          \"overwrite\": true,\n          \"gnetId\": null,\n          \"graphTooltip\": 1,\n          \"id\": 11,\n          \"links\": [],\n          \"panels\": [\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 0\n              },\n              \"id\": 28,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"OpenStack Services\",\n              \"type\": \"row\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": true,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(200, 54, 35, 0.88)\",\n                \"rgba(118, 245, 40, 0.73)\",\n                \"rgba(225, 177, 40, 0.59)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 2,\n                \"x\": 0,\n                \"y\": 1\n              },\n              \"id\": 24,\n              \"interval\": \"> 60s\",\n              \"links\": [\n                {\n                  \"dashboard\": \"Openstack Service\",\n                  \"name\": \"Drilldown dashboard\",\n                  \"params\": \"var-Service=keystone\",\n                  \"title\": \"Openstack Service\",\n                  \"type\": \"dashboard\"\n                }\n              ],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"column\": \"value\",\n                  \"condition\": \"\",\n                  \"expr\": \"openstack_check_keystone_api{job=\\\"openstack-metrics\\\", region=\\\"$region\\\"}\",\n                  \"fill\": \"\",\n                  \"format\": \"time_series\",\n                  \"function\": \"last\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"null\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"groupby_field\": \"\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refId\": \"A\",\n                  \"resultFormat\": \"time_series\",\n                  \"step\": 120\n                }\n              ],\n              \"thresholds\": \"1,2\",\n              \"title\": \"Keystone\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"50%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"no data\",\n                  \"value\": \"null\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"CRIT\",\n                  \"value\": \"0\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"OK\",\n                  \"value\": \"1\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"UNKW\",\n                  \"value\": \"2\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": true,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(200, 54, 35, 0.88)\",\n                \"rgba(118, 245, 40, 0.73)\",\n                \"rgba(225, 177, 40, 0.59)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 2,\n                \"x\": 2,\n                \"y\": 1\n              },\n              \"id\": 23,\n              \"interval\": \"> 60s\",\n              \"links\": [\n                {\n                  \"dashboard\": \"Openstack Service\",\n                  \"name\": \"Drilldown dashboard\",\n                  \"params\": \"var-Service=glance\",\n                  \"title\": \"Openstack Service\",\n                  \"type\": \"dashboard\"\n                }\n              ],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"column\": \"value\",\n                  \"condition\": \"\",\n                  \"expr\": \"openstack_check_glance_api{job=\\\"openstack-metrics\\\", region=\\\"$region\\\"}\",\n                  \"fill\": \"\",\n                  \"format\": \"time_series\",\n                  \"function\": \"last\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"null\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"groupby_field\": \"\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refId\": \"A\",\n                  \"resultFormat\": \"time_series\",\n                  \"step\": 120\n                }\n              ],\n              \"thresholds\": \"1,2\",\n              \"title\": \"Glance\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"50%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"no data\",\n                  \"value\": \"null\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"CRIT\",\n                  \"value\": \"0\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"OK\",\n                  \"value\": \"1\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"UNKW\",\n                  \"value\": \"2\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": true,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(202, 58, 40, 0.86)\",\n                \"rgba(118, 245, 40, 0.73)\",\n                \"rgba(225, 177, 40, 0.59)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 2,\n                \"x\": 4,\n                \"y\": 1\n              },\n              \"id\": 22,\n              \"interval\": \"> 60s\",\n              \"links\": [\n                {\n                  \"dashboard\": \"Openstack Service\",\n                  \"name\": \"Drilldown dashboard\",\n                  \"params\": \"var-Service=heat\",\n                  \"title\": \"Openstack Service\",\n                  \"type\": \"dashboard\"\n                }\n              ],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"column\": \"value\",\n                  \"condition\": \"\",\n                  \"expr\": \"openstack_check_heat_api{job=\\\"openstack-metrics\\\", region=\\\"$region\\\"}\",\n                  \"fill\": \"\",\n                  \"format\": \"time_series\",\n                  \"function\": \"last\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"null\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"groupby_field\": \"\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refId\": \"A\",\n                  \"resultFormat\": \"time_series\",\n                  \"step\": 120\n                }\n              ],\n              \"thresholds\": \"1,2\",\n              \"title\": \"Heat\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"50%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"no data\",\n                  \"value\": \"null\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"CRIT\",\n                  \"value\": \"0\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"OK\",\n                  \"value\": \"1\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"UNKW\",\n                  \"value\": \"2\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": true,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(200, 54, 35, 0.88)\",\n                \"rgba(118, 245, 40, 0.73)\",\n                \"rgba(225, 177, 40, 0.59)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 2,\n                \"x\": 6,\n                \"y\": 1\n              },\n              \"id\": 21,\n              \"interval\": \"> 60s\",\n              \"links\": [\n                {\n                  \"dashboard\": \"Openstack Service\",\n                  \"name\": \"Drilldown dashboard\",\n                  \"params\": \"var-Service=neutron\",\n                  \"title\": \"Openstack Service\",\n                  \"type\": \"dashboard\"\n                }\n              ],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"column\": \"value\",\n                  \"condition\": \"\",\n                  \"expr\": \"openstack_check_neutron_api{job=\\\"openstack-metrics\\\", region=\\\"$region\\\"}\",\n                  \"fill\": \"\",\n                  \"format\": \"time_series\",\n                  \"function\": \"last\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"null\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"groupby_field\": \"\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refId\": \"A\",\n                  \"resultFormat\": \"time_series\",\n                  \"step\": 120\n                }\n              ],\n              \"thresholds\": \"1,2\",\n              \"title\": \"Neutron\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"50%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"no data\",\n                  \"value\": \"null\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"CRIT\",\n                  \"value\": \"0\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"OK\",\n                  \"value\": \"1\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"UNKW\",\n                  \"value\": \"2\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": true,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(208, 53, 34, 0.82)\",\n                \"rgba(118, 245, 40, 0.73)\",\n                \"rgba(225, 177, 40, 0.59)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 2,\n                \"x\": 8,\n                \"y\": 1\n              },\n              \"id\": 20,\n              \"interval\": \"> 60s\",\n              \"links\": [\n                {\n                  \"dashboard\": \"Openstack Service\",\n                  \"name\": \"Drilldown dashboard\",\n                  \"params\": \"var-Service=nova\",\n                  \"title\": \"Openstack Service\",\n                  \"type\": \"dashboard\"\n                }\n              ],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"column\": \"value\",\n                  \"condition\": \"\",\n                  \"expr\": \"openstack_check_nova_api{job=\\\"openstack-metrics\\\", region=\\\"$region\\\"}\",\n                  \"fill\": \"\",\n                  \"format\": \"time_series\",\n                  \"function\": \"last\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"null\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"groupby_field\": \"\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refId\": \"A\",\n                  \"resultFormat\": \"time_series\",\n                  \"step\": 120\n                }\n              ],\n              \"thresholds\": \"1,2\",\n              \"title\": \"Nova\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"50%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"no data\",\n                  \"value\": \"null\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"CRIT\",\n                  \"value\": \"0\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"OK\",\n                  \"value\": \"1\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"UNKW\",\n                  \"value\": \"2\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": true,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(200, 54, 35, 0.88)\",\n                \"rgba(118, 245, 40, 0.73)\",\n                \"rgba(225, 177, 40, 0.59)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 2,\n                \"x\": 10,\n                \"y\": 1\n              },\n              \"id\": 19,\n              \"interval\": \"> 60s\",\n              \"links\": [\n                {\n                  \"dashboard\": \"Openstack Service\",\n                  \"name\": \"Drilldown dashboard\",\n                  \"params\": \"var-Service=swift\",\n                  \"title\": \"Openstack Service\",\n                  \"type\": \"dashboard\"\n                }\n              ],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"column\": \"value\",\n                  \"condition\": \"\",\n                  \"expr\": \"openstack_check_swift_api{job=\\\"openstack-metrics\\\", region=\\\"$region\\\"}\",\n                  \"fill\": \"\",\n                  \"format\": \"time_series\",\n                  \"function\": \"last\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"null\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"groupby_field\": \"\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refId\": \"A\",\n                  \"resultFormat\": \"time_series\",\n                  \"step\": 120\n                }\n              ],\n              \"thresholds\": \"1,2\",\n              \"title\": \"Ceph\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"50%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"no data\",\n                  \"value\": \"null\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"CRIT\",\n                  \"value\": \"0\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"OK\",\n                  \"value\": \"1\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"UNKW\",\n                  \"value\": \"2\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": true,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(200, 54, 35, 0.88)\",\n                \"rgba(118, 245, 40, 0.73)\",\n                \"rgba(225, 177, 40, 0.59)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 2,\n                \"x\": 12,\n                \"y\": 1\n              },\n              \"id\": 18,\n              \"interval\": \"> 60s\",\n              \"links\": [\n                {\n                  \"dashboard\": \"Openstack Service\",\n                  \"name\": \"Drilldown dashboard\",\n                  \"params\": \"var-Service=cinder\",\n                  \"title\": \"Openstack Service\",\n                  \"type\": \"dashboard\"\n                }\n              ],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"column\": \"value\",\n                  \"condition\": \"\",\n                  \"expr\": \"openstack_check_cinder_api{job=\\\"openstack-metrics\\\", region=\\\"$region\\\"}\",\n                  \"fill\": \"\",\n                  \"format\": \"time_series\",\n                  \"function\": \"last\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"null\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"groupby_field\": \"\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refId\": \"A\",\n                  \"resultFormat\": \"time_series\",\n                  \"step\": 120\n                }\n              ],\n              \"thresholds\": \"1,2\",\n              \"title\": \"Cinder\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"50%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"no data\",\n                  \"value\": \"null\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"CRIT\",\n                  \"value\": \"0\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"OK\",\n                  \"value\": \"1\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"UNKW\",\n                  \"value\": \"2\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": true,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(200, 54, 35, 0.88)\",\n                \"rgba(118, 245, 40, 0.73)\",\n                \"rgba(225, 177, 40, 0.59)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 2,\n                \"x\": 14,\n                \"y\": 1\n              },\n              \"id\": 17,\n              \"interval\": \"> 60s\",\n              \"links\": [\n                {\n                  \"dashboard\": \"Openstack Service\",\n                  \"name\": \"Drilldown dashboard\",\n                  \"params\": \"var-Service=placement\",\n                  \"title\": \"Openstack Service\",\n                  \"type\": \"dashboard\"\n                }\n              ],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"column\": \"value\",\n                  \"condition\": \"\",\n                  \"expr\": \"openstack_check_placement_api{job=\\\"openstack-metrics\\\", region=\\\"$region\\\"}\",\n                  \"fill\": \"\",\n                  \"format\": \"time_series\",\n                  \"function\": \"last\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"null\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"groupby_field\": \"\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refId\": \"A\",\n                  \"resultFormat\": \"time_series\",\n                  \"step\": 120\n                }\n              ],\n              \"thresholds\": \"1,2\",\n              \"title\": \"Placement\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"50%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"no data\",\n                  \"value\": \"null\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"CRIT\",\n                  \"value\": \"0\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"OK\",\n                  \"value\": \"1\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"UNKW\",\n                  \"value\": \"2\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": true,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(208, 53, 34, 0.82)\",\n                \"rgba(118, 245, 40, 0.73)\",\n                \"rgba(225, 177, 40, 0.59)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 2,\n                \"x\": 16,\n                \"y\": 1\n              },\n              \"id\": 16,\n              \"interval\": \"> 60s\",\n              \"links\": [\n                {\n                  \"dashboard\": \"RabbitMQ Metrics\",\n                  \"name\": \"Drilldown dashboard\",\n                  \"title\": \"RabbitMQ Metrics\",\n                  \"type\": \"dashboard\"\n                }\n              ],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"column\": \"value\",\n                  \"condition\": \"\",\n                  \"expr\": \"min(rabbitmq_up)\",\n                  \"fill\": \"\",\n                  \"format\": \"time_series\",\n                  \"function\": \"last\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"null\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"groupby_field\": \"\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refId\": \"A\",\n                  \"resultFormat\": \"time_series\",\n                  \"step\": 120\n                }\n              ],\n              \"thresholds\": \"1,2\",\n              \"title\": \"RabbitMQ\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"50%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"no data\",\n                  \"value\": \"null\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"CRIT\",\n                  \"value\": \"0\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"OK\",\n                  \"value\": \"1\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"UNKW\",\n                  \"value\": \"2\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": true,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(208, 53, 34, 0.82)\",\n                \"rgba(118, 245, 40, 0.73)\",\n                \"rgba(225, 177, 40, 0.59)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 2,\n                \"x\": 18,\n                \"y\": 1\n              },\n              \"id\": 15,\n              \"interval\": \"> 60s\",\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"column\": \"value\",\n                  \"condition\": \"\",\n                  \"expr\": \"min(mysql_global_status_wsrep_ready)\",\n                  \"fill\": \"\",\n                  \"format\": \"time_series\",\n                  \"function\": \"last\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"null\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"groupby_field\": \"\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refId\": \"A\",\n                  \"resultFormat\": \"time_series\",\n                  \"step\": 120\n                }\n              ],\n              \"thresholds\": \"1,2\",\n              \"title\": \"MariaDB\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"50%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"no data\",\n                  \"value\": \"null\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"CRIT\",\n                  \"value\": \"0\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"OK\",\n                  \"value\": \"1\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"UNKW\",\n                  \"value\": \"2\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": true,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(225, 177, 40, 0.59)\",\n                \"rgba(208, 53, 34, 0.82)\",\n                \"rgba(118, 245, 40, 0.73)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 2,\n                \"x\": 20,\n                \"y\": 1\n              },\n              \"id\": 14,\n              \"interval\": \"> 60s\",\n              \"links\": [\n                {\n                  \"dashboard\": \"Nginx Stats\",\n                  \"name\": \"Drilldown dashboard\",\n                  \"title\": \"Nginx Stats\",\n                  \"type\": \"dashboard\"\n                }\n              ],\n              \"mappingType\": 2,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"1\",\n                  \"text\": \"OK\",\n                  \"to\": \"99999999999999\"\n                },\n                {\n                  \"from\": \"0\",\n                  \"text\": \"CRIT\",\n                  \"to\": \"0\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"column\": \"value\",\n                  \"condition\": \"\",\n                  \"expr\": \"sum_over_time(nginx_connections_total{type=\\\"active\\\", namespace=\\\"openstack\\\"}[5m])\",\n                  \"fill\": \"\",\n                  \"format\": \"time_series\",\n                  \"function\": \"last\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"null\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"groupby_field\": \"\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refId\": \"A\",\n                  \"resultFormat\": \"time_series\",\n                  \"step\": 120\n                }\n              ],\n              \"thresholds\": \"0,1\",\n              \"title\": \"Nginx\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"50%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": true,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(208, 53, 34, 0.82)\",\n                \"rgba(118, 245, 40, 0.73)\",\n                \"rgba(225, 177, 40, 0.59)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 2,\n                \"x\": 22,\n                \"y\": 1\n              },\n              \"id\": 13,\n              \"interval\": \"> 60s\",\n              \"links\": [\n                {\n                  \"dashboard\": \"Memcached\",\n                  \"name\": \"Drilldown dashboard\",\n                  \"title\": \"Memcached\",\n                  \"type\": \"dashboard\"\n                }\n              ],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"column\": \"value\",\n                  \"condition\": \"\",\n                  \"expr\": \"min(memcached_up)\",\n                  \"fill\": \"\",\n                  \"format\": \"time_series\",\n                  \"function\": \"last\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"null\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"groupby_field\": \"\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refId\": \"A\",\n                  \"resultFormat\": \"time_series\",\n                  \"step\": 120\n                }\n              ],\n              \"thresholds\": \"1,2\",\n              \"title\": \"Memcached\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"50%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"no data\",\n                  \"value\": \"null\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"CRIT\",\n                  \"value\": \"0\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"OK\",\n                  \"value\": \"1\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"UNKW\",\n                  \"value\": \"2\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 2,\n                \"x\": 22,\n                \"y\": 8\n              },\n              \"id\": 13,\n              \"interval\": \"> 60s\",\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 3,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"alias\": \"free\",\n                  \"column\": \"value\",\n                  \"expr\": \"openstack_total_used_disk_GB{job=\\\"openstack-metrics\\\", region=\\\"$region\\\"} + openstack_total_free_disk_GB{job=\\\"openstack-metrics\\\", region=\\\"$region\\\"}\",\n                  \"format\": \"time_series\",\n                  \"function\": \"mean\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"0\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"intervalFactor\": 2,\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refId\": \"A\",\n                  \"resultFormat\": \"time_series\",\n                  \"step\": 120\n                },\n                {\n                  \"alias\": \"used\",\n                  \"column\": \"value\",\n                  \"expr\": \"openstack_total_used_disk_GB{job=\\\"openstack-metrics\\\", region=\\\"$region\\\"}\",\n                  \"format\": \"time_series\",\n                  \"function\": \"mean\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"0\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"intervalFactor\": 2,\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refId\": \"B\",\n                  \"resultFormat\": \"time_series\",\n                  \"step\": 120\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Disk (used vs total)\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"gbytes\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 15\n              },\n              \"id\": 29,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Virtual resources\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 8,\n                \"x\": 0,\n                \"y\": 16\n              },\n              \"id\": 11,\n              \"interval\": \"> 60s\",\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 3,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"alias\": \"free\",\n                  \"column\": \"value\",\n                  \"expr\": \"openstack_total_used_vcpus{job=\\\"openstack-metrics\\\", region=\\\"$region\\\"} + openstack_total_free_vcpus{job=\\\"openstack-metrics\\\", region=\\\"$region\\\"}\",\n                  \"format\": \"time_series\",\n                  \"function\": \"min\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"0\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"intervalFactor\": 2,\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refId\": \"A\",\n                  \"resultFormat\": \"time_series\",\n                  \"step\": 120\n                },\n                {\n                  \"alias\": \"used\",\n                  \"column\": \"value\",\n                  \"expr\": \"openstack_total_used_vcpus{job=\\\"openstack-metrics\\\", region=\\\"$region\\\"}\",\n                  \"format\": \"time_series\",\n                  \"function\": \"max\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"0\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"intervalFactor\": 2,\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refId\": \"B\",\n                  \"resultFormat\": \"time_series\",\n                  \"step\": 120\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"VCPUs (total vs used)\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 8,\n                \"x\": 8,\n                \"y\": 16\n              },\n              \"id\": 12,\n              \"interval\": \"> 60s\",\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 3,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"alias\": \"free\",\n                  \"column\": \"value\",\n                  \"expr\": \"openstack_total_used_ram_MB{job=\\\"openstack-metrics\\\", region=\\\"$region\\\"} + openstack_total_free_ram_MB{job=\\\"openstack-metrics\\\", region=\\\"$region\\\"}\",\n                  \"format\": \"time_series\",\n                  \"function\": \"mean\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"0\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"intervalFactor\": 2,\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refId\": \"A\",\n                  \"resultFormat\": \"time_series\",\n                  \"step\": 120\n                },\n                {\n                  \"alias\": \"used\",\n                  \"column\": \"value\",\n                  \"expr\": \"openstack_total_used_ram_MB{job=\\\"openstack-metrics\\\", region=\\\"$region\\\"}\",\n                  \"format\": \"time_series\",\n                  \"function\": \"mean\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"0\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refId\": \"B\",\n                  \"resultFormat\": \"time_series\",\n                  \"step\": 120\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"RAM (total vs used)\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"mbytes\",\n                  \"label\": \"\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"dashes\\\"\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 8,\n                \"x\": 0,\n                \"y\": 23\n              },\n              \"id\": 27,\n              \"interval\": \"> 60s\",\n              \"legend\": {\n                \"alignAsTable\": false,\n                \"avg\": true,\n                \"current\": true,\n                \"hideEmpty\": true,\n                \"hideZero\": false,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 4,\n              \"links\": [],\n              \"nullPointMode\": null,\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"alias\": \"free\",\n                  \"column\": \"value\",\n                  \"expr\": \"sum(openstack_running_instances)\",\n                  \"format\": \"time_series\",\n                  \"function\": \"mean\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"0\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"interval\": \"15s\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"{{ running_vms }}\",\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refID\": \"A\",\n                  \"refId\": \"A\",\n                  \"resultFormat\": \"time_series\"\n                },\n                {\n                  \"alias\": \"used\",\n                  \"column\": \"value\",\n                  \"expr\": \"sum(openstack_total_running_instances)\",\n                  \"format\": \"time_series\",\n                  \"function\": \"mean\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"0\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"interval\": \"15s\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"{{ total_vms }}\",\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refID\": \"B\",\n                  \"refId\": \"B\",\n                  \"resultFormat\": \"time_series\",\n                  \"step\": 120\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"OpenStack Instances\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"transparent\": true,\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"none\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            }\n          ],\n          \"refresh\": \"5m\",\n          \"schemaVersion\": 18,\n          \"style\": \"dark\",\n          \"tags\": [],\n          \"templating\": {\n            \"list\": [\n              {\n                \"current\": {\n                  \"text\": \"prometheus\",\n                  \"value\": \"prometheus\"\n                },\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Prometheus datasource\",\n                \"multi\": false,\n                \"name\": \"DS_PROMETHEUS\",\n                \"options\": [],\n                \"query\": \"prometheus\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"type\": \"datasource\"\n              },\n              {\n                \"allValue\": null,\n                \"current\": {},\n                \"datasource\": \"${DS_PROMETHEUS}\",\n                \"definition\": \"\",\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": null,\n                \"multi\": false,\n                \"name\": \"region\",\n                \"options\": [],\n                \"query\": \"label_values(openstack_exporter_cache_refresh_duration_seconds, region)\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"sort\": 0,\n                \"tagValuesQuery\": \"\",\n                \"tags\": [],\n                \"tagsQuery\": \"\",\n                \"type\": \"query\",\n                \"useTags\": false\n              }\n            ]\n          },\n          \"time\": {\n            \"from\": \"now-1h\",\n            \"to\": \"now\"\n          },\n          \"timepicker\": {\n            \"collapse\": false,\n            \"enable\": true,\n            \"notice\": false,\n            \"now\": true,\n            \"refresh_intervals\": [\n              \"5s\",\n              \"10s\",\n              \"30s\",\n              \"1m\",\n              \"5m\",\n              \"15m\",\n              \"30m\",\n              \"1h\",\n              \"2h\",\n              \"1d\"\n            ],\n            \"status\": \"Stable\",\n            \"time_options\": [\n              \"5m\",\n              \"15m\",\n              \"1h\",\n              \"6h\",\n              \"12h\",\n              \"24h\",\n              \"2d\",\n              \"7d\",\n              \"30d\"\n            ],\n            \"type\": \"timepicker\"\n          },\n          \"timezone\": \"browser\",\n          \"title\": \"OpenStack Metrics\",\n          \"version\": 1\n        }\n      openstack-service: |-\n        {\n          \"__inputs\": [\n            {\n              \"name\": \"DS_PROMETHEUS\",\n              \"label\": \"prometheus\",\n              \"description\": \"\",\n              \"type\": \"datasource\",\n              \"pluginId\": \"prometheus\",\n              \"pluginName\": \"Prometheus\"\n            }\n          ],\n          \"__requires\": [\n            {\n              \"type\": \"grafana\",\n              \"id\": \"grafana\",\n              \"name\": \"Grafana\",\n              \"version\": \"4.5.2\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"graph\",\n              \"name\": \"Graph\",\n              \"version\": \"\"\n            },\n            {\n              \"type\": \"datasource\",\n              \"id\": \"prometheus\",\n              \"name\": \"Prometheus\",\n              \"version\": \"1.0.0\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"singlestat\",\n              \"name\": \"Singlestat\",\n              \"version\": \"\"\n            }\n          ],\n          \"annotations\": {\n            \"enable\": true,\n            \"list\": [\n              {\n                \"builtIn\": 1,\n                \"datasource\": \"-- Grafana --\",\n                \"enable\": true,\n                \"hide\": true,\n                \"iconColor\": \"rgba(0, 211, 255, 1)\",\n                \"name\": \"Annotations & Alerts\",\n                \"type\": \"dashboard\"\n              }\n            ]\n          },\n          \"editable\": false,\n          \"overwrite\": true,\n          \"gnetId\": null,\n          \"graphTooltip\": 1,\n          \"id\": 29,\n          \"links\": [],\n          \"panels\": [\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 0\n              },\n              \"id\": 14,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Service Status\",\n              \"type\": \"row\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": true,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(225, 177, 40, 0.59)\",\n                \"rgba(200, 54, 35, 0.88)\",\n                \"rgba(118, 245, 40, 0.73)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 4,\n                \"x\": 0,\n                \"y\": 1\n              },\n              \"id\": 6,\n              \"interval\": \"> 60s\",\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"column\": \"value\",\n                  \"condition\": \"\",\n                  \"expr\": \"openstack_check_[[Service]]_api{job=\\\"openstack-metrics\\\",region=\\\"$region\\\"}\",\n                  \"fill\": \"\",\n                  \"format\": \"time_series\",\n                  \"function\": \"last\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"null\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"groupby_field\": \"\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refId\": \"A\",\n                  \"resultFormat\": \"time_series\",\n                  \"step\": 120\n                }\n              ],\n              \"thresholds\": \"0,1\",\n              \"title\": \"\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"CRITICAL\",\n                  \"value\": \"0\"\n                },\n                {\n                  \"op\": \"=\",\n                  \"text\": \"OK\",\n                  \"value\": \"1\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"rgba(200, 54, 35, 0.88)\",\n                \"rgba(118, 245, 40, 0.73)\",\n                \"rgba(225, 177, 40, 0.59)\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 4,\n                \"x\": 4,\n                \"y\": 1\n              },\n              \"id\": 13,\n              \"interval\": \"> 60s\",\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": true\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"column\": \"value\",\n                  \"condition\": \"\",\n                  \"expr\": \"sum(nginx_responses_total{server_zone=~\\\"[[Service]].*\\\", status_code=\\\"5xx\\\",region=\\\"$region\\\"})\",\n                  \"fill\": \"\",\n                  \"format\": \"time_series\",\n                  \"function\": \"count\",\n                  \"groupBy\": [\n                    {\n                      \"interval\": \"auto\",\n                      \"params\": [\n                        \"auto\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"0\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupby_field\": \"\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refId\": \"A\",\n                  \"resultFormat\": \"time_series\",\n                  \"step\": 120,\n                  \"tags\": []\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"HTTP 5xx errors\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"0\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 0,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 16,\n                \"x\": 8,\n                \"y\": 1\n              },\n              \"id\": 7,\n              \"interval\": \">60s\",\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": false,\n                \"max\": true,\n                \"min\": true,\n                \"show\": true,\n                \"sortDesc\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(nginx_upstream_response_msecs_avg{upstream=~\\\"openstack-[[Service]].*\\\",region=\\\"$region\\\"}) by (upstream)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"refId\": \"A\",\n                  \"step\": 120\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"HTTP response time\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"s\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 8,\n                \"x\": 0,\n                \"y\": 8\n              },\n              \"id\": 9,\n              \"interval\": \"> 60s\",\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": true,\n              \"targets\": [\n                {\n                  \"alias\": \"healthy\",\n                  \"column\": \"value\",\n                  \"expr\": \"openstack_check_[[Service]]_api{region=\\\"$region\\\"}\",\n                  \"format\": \"time_series\",\n                  \"function\": \"last\",\n                  \"groupBy\": [\n                    {\n                      \"params\": [\n                        \"$interval\"\n                      ],\n                      \"type\": \"time\"\n                    },\n                    {\n                      \"params\": [\n                        \"0\"\n                      ],\n                      \"type\": \"fill\"\n                    }\n                  ],\n                  \"groupByTags\": [],\n                  \"intervalFactor\": 2,\n                  \"policy\": \"default\",\n                  \"rawQuery\": false,\n                  \"refId\": \"A\",\n                  \"resultFormat\": \"time_series\",\n                  \"select\": [],\n                  \"step\": 120,\n                  \"tags\": []\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"API Availability\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": false,\n                \"sort\": 0,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"none\",\n                  \"label\": \"\",\n                  \"logBase\": 1,\n                  \"max\": 1,\n                  \"min\": 0,\n                  \"show\": false\n                },\n                {\n                  \"format\": \"short\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": false\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {\n                \"{status_code=\\\"2xx\\\"}\": \"#629E51\",\n                \"{status_code=\\\"5xx\\\"}\": \"#BF1B00\"\n              },\n              \"bars\": true,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 0,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 16,\n                \"x\": 8,\n                \"y\": 8\n              },\n              \"id\": 8,\n              \"interval\": \"> 60s\",\n              \"legend\": {\n                \"alignAsTable\": false,\n                \"avg\": false,\n                \"current\": false,\n                \"hideEmpty\": false,\n                \"max\": false,\n                \"min\": false,\n                \"rightSide\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": false,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(nginx_responses_total{server_zone=~\\\"[[Service]].*\\\",region=\\\"$region\\\"}) by (status_code)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"refId\": \"A\",\n                  \"step\": 120\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Number of HTTP responses\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": 0,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            }\n          ],\n          \"refresh\": \"5m\",\n          \"schemaVersion\": 18,\n          \"style\": \"dark\",\n          \"tags\": [],\n          \"templating\": {\n            \"list\": [\n              {\n                \"current\": {\n                  \"text\": \"prometheus\",\n                  \"value\": \"prometheus\"\n                },\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Prometheus datasource\",\n                \"multi\": false,\n                \"name\": \"DS_PROMETHEUS\",\n                \"options\": [],\n                \"query\": \"prometheus\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"type\": \"datasource\"\n              },\n              {\n                \"allValue\": null,\n                \"current\": {},\n                \"datasource\": \"prometheus\",\n                \"definition\": \"\",\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"region\",\n                \"multi\": false,\n                \"name\": \"region\",\n                \"options\": [],\n                \"query\": \"label_values(openstack_exporter_cache_refresh_duration_seconds, region)\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"sort\": 0,\n                \"tagValuesQuery\": \"\",\n                \"tags\": [],\n                \"tagsQuery\": \"\",\n                \"type\": \"query\",\n                \"useTags\": false\n              },\n              {\n                \"allValue\": null,\n                \"current\": {\n                  \"tags\": [],\n                  \"text\": \"cinder\",\n                  \"value\": \"cinder\"\n                },\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": null,\n                \"multi\": false,\n                \"name\": \"Service\",\n                \"options\": [\n                  {\n                    \"selected\": false,\n                    \"text\": \"nova\",\n                    \"value\": \"nova\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"glance\",\n                    \"value\": \"glance\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"keystone\",\n                    \"value\": \"keystone\"\n                  },\n                  {\n                    \"selected\": true,\n                    \"text\": \"cinder\",\n                    \"value\": \"cinder\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"heat\",\n                    \"value\": \"heat\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"placement\",\n                    \"value\": \"placement\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"neutron\",\n                    \"value\": \"neutron\"\n                  }\n                ],\n                \"query\": \"nova,glance,keystone,cinder,heat,placement,neutron\",\n                \"skipUrlSync\": false,\n                \"type\": \"custom\"\n              }\n            ]\n          },\n          \"time\": {\n            \"from\": \"now-1h\",\n            \"to\": \"now\"\n          },\n          \"timepicker\": {\n            \"collapse\": false,\n            \"enable\": true,\n            \"notice\": false,\n            \"now\": true,\n            \"refresh_intervals\": [\n              \"5s\",\n              \"10s\",\n              \"30s\",\n              \"1m\",\n              \"5m\",\n              \"15m\",\n              \"30m\",\n              \"1h\",\n              \"2h\",\n              \"1d\"\n            ],\n            \"status\": \"Stable\",\n            \"time_options\": [\n              \"5m\",\n              \"15m\",\n              \"1h\",\n              \"6h\",\n              \"12h\",\n              \"24h\",\n              \"2d\",\n              \"7d\",\n              \"30d\"\n            ],\n            \"type\": \"timepicker\"\n          },\n          \"timezone\": \"browser\",\n          \"title\": \"Openstack Service\",\n          \"version\": 1\n        }\n...\n"
  },
  {
    "path": "values_overrides/grafana/persistentvolume.yaml",
    "content": "# This overrides file provides a raw json file for a dashboard for\n# the etcd\n---\nconf:\n  dashboards:\n    openstack:\n      persistent_volume: |-\n        {\n          \"__inputs\": [\n              {\n              \"name\": \"prometheus\",\n              \"label\": \"Prometheus\",\n              \"description\": \"\",\n              \"type\": \"datasource\",\n              \"pluginId\": \"prometheus\",\n              \"pluginName\": \"Prometheus\"\n              }\n          ],\n          \"__requires\": [\n              {\n              \"type\": \"grafana\",\n              \"id\": \"grafana\",\n              \"name\": \"Grafana\",\n              \"version\": \"5.0.0\"\n              },\n              {\n              \"type\": \"panel\",\n              \"id\": \"graph\",\n              \"name\": \"Graph\",\n              \"version\": \"\"\n              },\n              {\n              \"type\": \"datasource\",\n              \"id\": \"prometheus\",\n              \"name\": \"Prometheus\",\n              \"version\": \"1.0.0\"\n              }\n          ],\n          \"annotations\": {\n              \"list\": [\n              ]\n          },\n          \"editable\": false,\n          \"overwrite\": true,\n          \"gnetId\": null,\n          \"graphTooltip\": 0,\n          \"hideControls\": false,\n          \"id\": null,\n          \"links\": [\n          ],\n          \"refresh\": \"\",\n          \"rows\": [\n              {\n                  \"collapse\": false,\n                  \"collapsed\": false,\n                  \"panels\": [\n                      {\n                          \"aliasColors\": {\n                          },\n                          \"bars\": false,\n                          \"dashLength\": 10,\n                          \"dashes\": false,\n                          \"datasource\": \"$datasource\",\n                          \"fill\": 1,\n                          \"gridPos\": {\n                          },\n                          \"id\": 2,\n                          \"legend\": {\n                              \"alignAsTable\": true,\n                              \"avg\": true,\n                              \"current\": true,\n                              \"max\": true,\n                              \"min\": true,\n                              \"rightSide\": false,\n                              \"show\": true,\n                              \"total\": false,\n                              \"values\": true\n                          },\n                          \"lines\": true,\n                          \"linewidth\": 1,\n                          \"links\": [\n                          ],\n                          \"nullPointMode\": \"null\",\n                          \"percentage\": false,\n                          \"pointradius\": 5,\n                          \"points\": false,\n                          \"renderer\": \"flot\",\n                          \"repeat\": null,\n                          \"seriesOverrides\": [\n                          ],\n                          \"spaceLength\": 10,\n                          \"span\": 9,\n                          \"stack\": true,\n                          \"steppedLine\": false,\n                          \"targets\": [\n                              {\n                                  \"expr\": \"(\\n  sum without(instance, node) (kubelet_volume_stats_capacity_bytes{cluster=\\\"$cluster\\\", job=\\\"kubelet\\\", namespace=\\\"$namespace\\\", persistentvolumeclaim=\\\"$volume\\\"})\\n  -\\n  sum without(instance, node) (kubelet_volume_stats_available_bytes{cluster=\\\"$cluster\\\", job=\\\"kubelet\\\", namespace=\\\"$namespace\\\", persistentvolumeclaim=\\\"$volume\\\"})\\n)\\n\",\n                                  \"format\": \"time_series\",\n                                  \"intervalFactor\": 1,\n                                  \"legendFormat\": \"Used Space\",\n                                  \"refId\": \"A\"\n                              },\n                              {\n                                  \"expr\": \"sum without(instance, node) (kubelet_volume_stats_available_bytes{cluster=\\\"$cluster\\\", job=\\\"kubelet\\\", namespace=\\\"$namespace\\\", persistentvolumeclaim=\\\"$volume\\\"})\\n\",\n                                  \"format\": \"time_series\",\n                                  \"intervalFactor\": 1,\n                                  \"legendFormat\": \"Free Space\",\n                                  \"refId\": \"B\"\n                              }\n                          ],\n                          \"thresholds\": [\n                          ],\n                          \"timeFrom\": null,\n                          \"timeShift\": null,\n                          \"title\": \"Volume Space Usage\",\n                          \"tooltip\": {\n                              \"shared\": false,\n                              \"sort\": 0,\n                              \"value_type\": \"individual\"\n                          },\n                          \"type\": \"graph\",\n                          \"xaxis\": {\n                              \"buckets\": null,\n                              \"mode\": \"time\",\n                              \"name\": null,\n                              \"show\": true,\n                              \"values\": [\n                              ]\n                          },\n                          \"yaxes\": [\n                              {\n                                  \"format\": \"bytes\",\n                                  \"label\": null,\n                                  \"logBase\": 1,\n                                  \"max\": null,\n                                  \"min\": 0,\n                                  \"show\": true\n                              },\n                              {\n                                  \"format\": \"bytes\",\n                                  \"label\": null,\n                                  \"logBase\": 1,\n                                  \"max\": null,\n                                  \"min\": 0,\n                                  \"show\": true\n                              }\n                          ]\n                      },\n                      {\n                          \"cacheTimeout\": null,\n                          \"colorBackground\": false,\n                          \"colorValue\": false,\n                          \"colors\": [\n                              \"rgba(50, 172, 45, 0.97)\",\n                              \"rgba(237, 129, 40, 0.89)\",\n                              \"rgba(245, 54, 54, 0.9)\"\n                          ],\n                          \"datasource\": \"$datasource\",\n                          \"format\": \"percent\",\n                          \"gauge\": {\n                              \"maxValue\": 100,\n                              \"minValue\": 0,\n                              \"show\": true,\n                              \"thresholdLabels\": false,\n                              \"thresholdMarkers\": true\n                          },\n                          \"gridPos\": {\n                          },\n                          \"id\": 3,\n                          \"interval\": null,\n                          \"links\": [\n                          ],\n                          \"mappingType\": 1,\n                          \"mappingTypes\": [\n                              {\n                                  \"name\": \"value to text\",\n                                  \"value\": 1\n                              },\n                              {\n                                  \"name\": \"range to text\",\n                                  \"value\": 2\n                              }\n                          ],\n                          \"maxDataPoints\": 100,\n                          \"nullPointMode\": \"connected\",\n                          \"nullText\": null,\n                          \"postfix\": \"\",\n                          \"postfixFontSize\": \"50%\",\n                          \"prefix\": \"\",\n                          \"prefixFontSize\": \"50%\",\n                          \"rangeMaps\": [\n                              {\n                                  \"from\": \"null\",\n                                  \"text\": \"N/A\",\n                                  \"to\": \"null\"\n                              }\n                          ],\n                          \"span\": 3,\n                          \"sparkline\": {\n                              \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                              \"full\": false,\n                              \"lineColor\": \"rgb(31, 120, 193)\",\n                              \"show\": false\n                          },\n                          \"tableColumn\": \"\",\n                          \"targets\": [\n                              {\n                                  \"expr\": \"(\\n  kubelet_volume_stats_capacity_bytes{cluster=\\\"$cluster\\\", job=\\\"kubelet\\\", namespace=\\\"$namespace\\\", persistentvolumeclaim=\\\"$volume\\\"}\\n  -\\n  kubelet_volume_stats_available_bytes{cluster=\\\"$cluster\\\", job=\\\"kubelet\\\", namespace=\\\"$namespace\\\", persistentvolumeclaim=\\\"$volume\\\"}\\n)\\n/\\nkubelet_volume_stats_capacity_bytes{cluster=\\\"$cluster\\\", job=\\\"kubelet\\\", namespace=\\\"$namespace\\\", persistentvolumeclaim=\\\"$volume\\\"}\\n* 100\\n\",\n                                  \"format\": \"time_series\",\n                                  \"intervalFactor\": 2,\n                                  \"legendFormat\": \"\",\n                                  \"refId\": \"A\"\n                              }\n                          ],\n                          \"thresholds\": \"80, 90\",\n                          \"title\": \"Volume Space Usage\",\n                          \"tooltip\": {\n                              \"shared\": false\n                          },\n                          \"type\": \"singlestat\",\n                          \"valueFontSize\": \"80%\",\n                          \"valueMaps\": [\n                              {\n                                  \"op\": \"=\",\n                                  \"text\": \"N/A\",\n                                  \"value\": \"null\"\n                              }\n                          ],\n                          \"valueName\": \"current\"\n                      }\n                  ],\n                  \"repeat\": null,\n                  \"repeatIteration\": null,\n                  \"repeatRowId\": null,\n                  \"showTitle\": false,\n                  \"title\": \"Dashboard Row\",\n                  \"titleSize\": \"h6\",\n                  \"type\": \"row\"\n              },\n              {\n                  \"collapse\": false,\n                  \"collapsed\": false,\n                  \"panels\": [\n                      {\n                          \"aliasColors\": {\n                          },\n                          \"bars\": false,\n                          \"dashLength\": 10,\n                          \"dashes\": false,\n                          \"datasource\": \"$datasource\",\n                          \"fill\": 1,\n                          \"gridPos\": {\n                          },\n                          \"id\": 4,\n                          \"legend\": {\n                              \"alignAsTable\": true,\n                              \"avg\": true,\n                              \"current\": true,\n                              \"max\": true,\n                              \"min\": true,\n                              \"rightSide\": false,\n                              \"show\": true,\n                              \"total\": false,\n                              \"values\": true\n                          },\n                          \"lines\": true,\n                          \"linewidth\": 1,\n                          \"links\": [\n                          ],\n                          \"nullPointMode\": \"null\",\n                          \"percentage\": false,\n                          \"pointradius\": 5,\n                          \"points\": false,\n                          \"renderer\": \"flot\",\n                          \"repeat\": null,\n                          \"seriesOverrides\": [\n                          ],\n                          \"spaceLength\": 10,\n                          \"span\": 9,\n                          \"stack\": true,\n                          \"steppedLine\": false,\n                          \"targets\": [\n                              {\n                                  \"expr\": \"sum without(instance, node) (kubelet_volume_stats_inodes_used{cluster=\\\"$cluster\\\", job=\\\"kubelet\\\", namespace=\\\"$namespace\\\", persistentvolumeclaim=\\\"$volume\\\"})\\n\",\n                                  \"format\": \"time_series\",\n                                  \"intervalFactor\": 1,\n                                  \"legendFormat\": \"Used inodes\",\n                                  \"refId\": \"A\"\n                              },\n                              {\n                                  \"expr\": \"(\\n  sum without(instance, node) (kubelet_volume_stats_inodes{cluster=\\\"$cluster\\\", job=\\\"kubelet\\\", namespace=\\\"$namespace\\\", persistentvolumeclaim=\\\"$volume\\\"})\\n  -\\n  sum without(instance, node) (kubelet_volume_stats_inodes_used{cluster=\\\"$cluster\\\", job=\\\"kubelet\\\", namespace=\\\"$namespace\\\", persistentvolumeclaim=\\\"$volume\\\"})\\n)\\n\",\n                                  \"format\": \"time_series\",\n                                  \"intervalFactor\": 1,\n                                  \"legendFormat\": \" Free inodes\",\n                                  \"refId\": \"B\"\n                              }\n                          ],\n                          \"thresholds\": [\n                          ],\n                          \"timeFrom\": null,\n                          \"timeShift\": null,\n                          \"title\": \"Volume inodes Usage\",\n                          \"tooltip\": {\n                              \"shared\": false,\n                              \"sort\": 0,\n                              \"value_type\": \"individual\"\n                          },\n                          \"type\": \"graph\",\n                          \"xaxis\": {\n                              \"buckets\": null,\n                              \"mode\": \"time\",\n                              \"name\": null,\n                              \"show\": true,\n                              \"values\": [\n                              ]\n                          },\n                          \"yaxes\": [\n                              {\n                                  \"format\": \"none\",\n                                  \"label\": null,\n                                  \"logBase\": 1,\n                                  \"max\": null,\n                                  \"min\": 0,\n                                  \"show\": true\n                              },\n                              {\n                                  \"format\": \"none\",\n                                  \"label\": null,\n                                  \"logBase\": 1,\n                                  \"max\": null,\n                                  \"min\": 0,\n                                  \"show\": true\n                              }\n                          ]\n                      },\n                      {\n                          \"cacheTimeout\": null,\n                          \"colorBackground\": false,\n                          \"colorValue\": false,\n                          \"colors\": [\n                              \"rgba(50, 172, 45, 0.97)\",\n                              \"rgba(237, 129, 40, 0.89)\",\n                              \"rgba(245, 54, 54, 0.9)\"\n                          ],\n                          \"datasource\": \"$datasource\",\n                          \"format\": \"percent\",\n                          \"gauge\": {\n                              \"maxValue\": 100,\n                              \"minValue\": 0,\n                              \"show\": true,\n                              \"thresholdLabels\": false,\n                              \"thresholdMarkers\": true\n                          },\n                          \"gridPos\": {\n                          },\n                          \"id\": 5,\n                          \"interval\": null,\n                          \"links\": [\n                          ],\n                          \"mappingType\": 1,\n                          \"mappingTypes\": [\n                              {\n                                  \"name\": \"value to text\",\n                                  \"value\": 1\n                              },\n                              {\n                                  \"name\": \"range to text\",\n                                  \"value\": 2\n                              }\n                          ],\n                          \"maxDataPoints\": 100,\n                          \"nullPointMode\": \"connected\",\n                          \"nullText\": null,\n                          \"postfix\": \"\",\n                          \"postfixFontSize\": \"50%\",\n                          \"prefix\": \"\",\n                          \"prefixFontSize\": \"50%\",\n                          \"rangeMaps\": [\n                              {\n                                  \"from\": \"null\",\n                                  \"text\": \"N/A\",\n                                  \"to\": \"null\"\n                              }\n                          ],\n                          \"span\": 3,\n                          \"sparkline\": {\n                              \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                              \"full\": false,\n                              \"lineColor\": \"rgb(31, 120, 193)\",\n                              \"show\": false\n                          },\n                          \"tableColumn\": \"\",\n                          \"targets\": [\n                              {\n                                  \"expr\": \"kubelet_volume_stats_inodes_used{cluster=\\\"$cluster\\\", job=\\\"kubelet\\\", namespace=\\\"$namespace\\\", persistentvolumeclaim=\\\"$volume\\\"}\\n/\\nkubelet_volume_stats_inodes{cluster=\\\"$cluster\\\", job=\\\"kubelet\\\", namespace=\\\"$namespace\\\", persistentvolumeclaim=\\\"$volume\\\"}\\n* 100\\n\",\n                                  \"format\": \"time_series\",\n                                  \"intervalFactor\": 2,\n                                  \"legendFormat\": \"\",\n                                  \"refId\": \"A\"\n                              }\n                          ],\n                          \"thresholds\": \"80, 90\",\n                          \"title\": \"Volume inodes Usage\",\n                          \"tooltip\": {\n                              \"shared\": false\n                          },\n                          \"type\": \"singlestat\",\n                          \"valueFontSize\": \"80%\",\n                          \"valueMaps\": [\n                              {\n                                  \"op\": \"=\",\n                                  \"text\": \"N/A\",\n                                  \"value\": \"null\"\n                              }\n                          ],\n                          \"valueName\": \"current\"\n                      }\n                  ],\n                  \"repeat\": null,\n                  \"repeatIteration\": null,\n                  \"repeatRowId\": null,\n                  \"showTitle\": false,\n                  \"title\": \"Dashboard Row\",\n                  \"titleSize\": \"h6\",\n                  \"type\": \"row\"\n              }\n          ],\n          \"schemaVersion\": 14,\n          \"style\": \"dark\",\n          \"tags\": [\n              \"kubernetes-mixin\"\n          ],\n          \"templating\": {\n              \"list\": [\n                  {\n                      \"current\": {\n                          \"text\": \"Prometheus\",\n                          \"value\": \"Prometheus\"\n                      },\n                      \"hide\": 0,\n                      \"label\": null,\n                      \"name\": \"datasource\",\n                      \"options\": [\n                      ],\n                      \"query\": \"prometheus\",\n                      \"refresh\": 1,\n                      \"regex\": \"\",\n                      \"type\": \"datasource\"\n                  },\n                  {\n                      \"allValue\": null,\n                      \"current\": {\n                      },\n                      \"datasource\": \"$datasource\",\n                      \"hide\": 2,\n                      \"includeAll\": false,\n                      \"label\": \"cluster\",\n                      \"multi\": false,\n                      \"name\": \"cluster\",\n                      \"options\": [\n                      ],\n                      \"query\": \"label_values(kubelet_volume_stats_capacity_bytes, cluster)\",\n                      \"refresh\": 2,\n                      \"regex\": \"\",\n                      \"sort\": 1,\n                      \"tagValuesQuery\": \"\",\n                      \"tags\": [\n                      ],\n                      \"tagsQuery\": \"\",\n                      \"type\": \"query\",\n                      \"useTags\": false\n                  },\n                  {\n                      \"allValue\": null,\n                      \"current\": {\n                      },\n                      \"datasource\": \"$datasource\",\n                      \"hide\": 0,\n                      \"includeAll\": false,\n                      \"label\": \"Namespace\",\n                      \"multi\": false,\n                      \"name\": \"namespace\",\n                      \"options\": [\n                      ],\n                      \"query\": \"label_values(kubelet_volume_stats_capacity_bytes{cluster=\\\"$cluster\\\", job=\\\"kubelet\\\"}, namespace)\",\n                      \"refresh\": 2,\n                      \"regex\": \"\",\n                      \"sort\": 1,\n                      \"tagValuesQuery\": \"\",\n                      \"tags\": [\n                      ],\n                      \"tagsQuery\": \"\",\n                      \"type\": \"query\",\n                      \"useTags\": false\n                  },\n                  {\n                      \"allValue\": null,\n                      \"current\": {\n                      },\n                      \"datasource\": \"$datasource\",\n                      \"hide\": 0,\n                      \"includeAll\": false,\n                      \"label\": \"PersistentVolumeClaim\",\n                      \"multi\": false,\n                      \"name\": \"volume\",\n                      \"options\": [\n                      ],\n                      \"query\": \"label_values(kubelet_volume_stats_capacity_bytes{cluster=\\\"$cluster\\\", job=\\\"kubelet\\\", namespace=\\\"$namespace\\\"}, persistentvolumeclaim)\",\n                      \"refresh\": 2,\n                      \"regex\": \"\",\n                      \"sort\": 1,\n                      \"tagValuesQuery\": \"\",\n                      \"tags\": [\n                      ],\n                      \"tagsQuery\": \"\",\n                      \"type\": \"query\",\n                      \"useTags\": false\n                  }\n              ]\n          },\n          \"time\": {\n              \"from\": \"now-1h\",\n              \"to\": \"now\"\n          },\n          \"timepicker\": {\n              \"refresh_intervals\": [\n                  \"5s\",\n                  \"10s\",\n                  \"30s\",\n                  \"1m\",\n                  \"5m\",\n                  \"15m\",\n                  \"30m\",\n                  \"1h\",\n                  \"2h\",\n                  \"1d\"\n              ],\n              \"time_options\": [\n                  \"5m\",\n                  \"15m\",\n                  \"1h\",\n                  \"6h\",\n                  \"12h\",\n                  \"24h\",\n                  \"2d\",\n                  \"7d\",\n                  \"30d\"\n              ]\n          },\n          \"timezone\": \"\",\n          \"title\": \"Persistent Volumes\",\n          \"version\": 0\n        }\n...\n"
  },
  {
    "path": "values_overrides/grafana/prometheus.yaml",
    "content": "# NOTE(srwilkers): This overrides file provides a reference for a dashboard for\n# Prometheus\n---\nconf:\n  dashboards:\n    lma:\n      prometheus: |-\n        {\n          \"__inputs\": [\n            {\n              \"name\": \"DS_PROMETHEUS\",\n              \"label\": \"prometheus\",\n              \"description\": \"Prometheus which you want to monitor\",\n              \"type\": \"datasource\",\n              \"pluginId\": \"prometheus\",\n              \"pluginName\": \"Prometheus\"\n            }\n          ],\n          \"__requires\": [\n            {\n              \"type\": \"grafana\",\n              \"id\": \"grafana\",\n              \"name\": \"Grafana\",\n              \"version\": \"4.6.0\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"graph\",\n              \"name\": \"Graph\",\n              \"version\": \"\"\n            },\n            {\n              \"type\": \"datasource\",\n              \"id\": \"prometheus\",\n              \"name\": \"Prometheus\",\n              \"version\": \"1.0.0\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"singlestat\",\n              \"name\": \"Singlestat\",\n              \"version\": \"\"\n            },\n            {\n              \"type\": \"panel\",\n              \"id\": \"text\",\n              \"name\": \"Text\",\n              \"version\": \"\"\n            }\n          ],\n          \"annotations\": {\n            \"list\": [\n              {\n                \"builtIn\": 1,\n                \"datasource\": \"-- Grafana --\",\n                \"enable\": true,\n                \"hide\": true,\n                \"iconColor\": \"rgba(0, 211, 255, 1)\",\n                \"name\": \"Annotations & Alerts\",\n                \"type\": \"dashboard\"\n              },\n              {\n                \"datasource\": \"${DS_PROMETHEUS}\",\n                \"enable\": true,\n                \"expr\": \"count(sum(up{instance=\\\"$instance\\\"}) by (instance) < 1)\",\n                \"hide\": false,\n                \"iconColor\": \"rgb(250, 44, 18)\",\n                \"limit\": 100,\n                \"name\": \"downage\",\n                \"showIn\": 0,\n                \"step\": \"30s\",\n                \"tagKeys\": \"instance\",\n                \"textFormat\": \"prometheus down\",\n                \"titleFormat\": \"Downage\",\n                \"type\": \"alert\"\n              },\n              {\n                \"datasource\": \"${DS_PROMETHEUS}\",\n                \"enable\": true,\n                \"expr\": \"sum(changes(prometheus_config_last_reload_success_timestamp_seconds[10m])) by (instance)\",\n                \"hide\": false,\n                \"iconColor\": \"#fceaca\",\n                \"limit\": 100,\n                \"name\": \"Reload\",\n                \"showIn\": 0,\n                \"step\": \"5m\",\n                \"tagKeys\": \"instance\",\n                \"tags\": [],\n                \"titleFormat\": \"Reload\",\n                \"type\": \"tags\"\n              }\n            ]\n          },\n          \"description\": \"Dashboard for monitoring of Prometheus v2.x.x\",\n          \"overwrite\": true,\n          \"editable\": false,\n          \"gnetId\": 3681,\n          \"graphTooltip\": 1,\n          \"id\": 41,\n          \"links\": [\n            {\n              \"icon\": \"info\",\n              \"tags\": [],\n              \"targetBlank\": true,\n              \"title\": \"Dashboard's Github \",\n              \"tooltip\": \"Github repo of this dashboard\",\n              \"type\": \"link\",\n              \"url\": \"https://github.com/FUSAKLA/Prometheus2-grafana-dashboard\"\n            },\n            {\n              \"icon\": \"doc\",\n              \"tags\": [],\n              \"targetBlank\": true,\n              \"title\": \"Prometheus Docs\",\n              \"tooltip\": \"\",\n              \"type\": \"link\",\n              \"url\": \"http://prometheus.io/docs/introduction/overview/\"\n            }\n          ],\n          \"panels\": [\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 0\n              },\n              \"id\": 53,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Header instance info\",\n              \"type\": \"row\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"#299c46\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"#bf1b00\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 1,\n              \"format\": \"s\",\n              \"gauge\": {\n                \"maxValue\": 1000000,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 5,\n                \"w\": 4,\n                \"x\": 0,\n                \"y\": 1\n              },\n              \"id\": 41,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"time() - process_start_time_seconds{instance=\\\"$instance\\\"}\",\n                  \"format\": \"time_series\",\n                  \"instant\": false,\n                  \"intervalFactor\": 2,\n                  \"refId\": \"A\"\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"Uptime\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": true,\n              \"colors\": [\n                \"#299c46\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"#bf1b00\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"format\": \"short\",\n              \"gauge\": {\n                \"maxValue\": 1000000,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 5,\n                \"w\": 8,\n                \"x\": 4,\n                \"y\": 1\n              },\n              \"id\": 42,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": true\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"prometheus_tsdb_head_series{instance=\\\"$instance\\\"}\",\n                  \"format\": \"time_series\",\n                  \"instant\": false,\n                  \"intervalFactor\": 2,\n                  \"refId\": \"A\"\n                }\n              ],\n              \"thresholds\": \"500000,800000,1000000\",\n              \"title\": \"Total count of time series\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"150%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"#299c46\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"#d44a3a\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 5,\n                \"w\": 4,\n                \"x\": 12,\n                \"y\": 1\n              },\n              \"id\": 48,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"version\",\n              \"targets\": [\n                {\n                  \"expr\": \"prometheus_build_info{instance=\\\"$instance\\\"}\",\n                  \"format\": \"table\",\n                  \"instant\": true,\n                  \"intervalFactor\": 2,\n                  \"refId\": \"A\"\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"Version\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"avg\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": false,\n              \"colors\": [\n                \"#299c46\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"#d44a3a\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"format\": \"ms\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 5,\n                \"w\": 4,\n                \"x\": 16,\n                \"y\": 1\n              },\n              \"id\": 49,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"prometheus_tsdb_head_max_time{instance=\\\"$instance\\\"} - prometheus_tsdb_head_min_time{instance=\\\"$instance\\\"}\",\n                  \"format\": \"time_series\",\n                  \"instant\": true,\n                  \"intervalFactor\": 2,\n                  \"refId\": \"A\"\n                }\n              ],\n              \"thresholds\": \"\",\n              \"title\": \"Actual head block length\",\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"80%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"current\"\n            },\n            {\n              \"content\": \"<img src=\\\"https://cdn.worldvectorlogo.com/logos/prometheus.svg\\\"/ height=\\\"140px\\\">\",\n              \"gridPos\": {\n                \"h\": 5,\n                \"w\": 2,\n                \"x\": 20,\n                \"y\": 1\n              },\n              \"height\": \"\",\n              \"id\": 50,\n              \"links\": [],\n              \"mode\": \"html\",\n              \"options\": {},\n              \"title\": \"\",\n              \"transparent\": true,\n              \"type\": \"text\"\n            },\n            {\n              \"cacheTimeout\": null,\n              \"colorBackground\": false,\n              \"colorValue\": true,\n              \"colors\": [\n                \"#e6522c\",\n                \"rgba(237, 129, 40, 0.89)\",\n                \"#299c46\"\n              ],\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 1,\n              \"format\": \"none\",\n              \"gauge\": {\n                \"maxValue\": 100,\n                \"minValue\": 0,\n                \"show\": false,\n                \"thresholdLabels\": false,\n                \"thresholdMarkers\": true\n              },\n              \"gridPos\": {\n                \"h\": 5,\n                \"w\": 2,\n                \"x\": 22,\n                \"y\": 1\n              },\n              \"id\": 52,\n              \"interval\": null,\n              \"links\": [],\n              \"mappingType\": 1,\n              \"mappingTypes\": [\n                {\n                  \"name\": \"value to text\",\n                  \"value\": 1\n                },\n                {\n                  \"name\": \"range to text\",\n                  \"value\": 2\n                }\n              ],\n              \"maxDataPoints\": 100,\n              \"nullPointMode\": \"connected\",\n              \"nullText\": null,\n              \"options\": {},\n              \"postfix\": \"\",\n              \"postfixFontSize\": \"50%\",\n              \"prefix\": \"\",\n              \"prefixFontSize\": \"50%\",\n              \"rangeMaps\": [\n                {\n                  \"from\": \"null\",\n                  \"text\": \"N/A\",\n                  \"to\": \"null\"\n                }\n              ],\n              \"sparkline\": {\n                \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n                \"full\": false,\n                \"lineColor\": \"rgb(31, 120, 193)\",\n                \"show\": false\n              },\n              \"tableColumn\": \"\",\n              \"targets\": [\n                {\n                  \"expr\": \"2\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"refId\": \"A\"\n                }\n              ],\n              \"thresholds\": \"10,20\",\n              \"title\": \"\",\n              \"transparent\": true,\n              \"type\": \"singlestat\",\n              \"valueFontSize\": \"200%\",\n              \"valueMaps\": [\n                {\n                  \"op\": \"=\",\n                  \"text\": \"N/A\",\n                  \"value\": \"null\"\n                }\n              ],\n              \"valueName\": \"avg\"\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 6\n              },\n              \"id\": 54,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Main info\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 8,\n                \"x\": 0,\n                \"y\": 7\n              },\n              \"id\": 15,\n              \"legend\": {\n                \"avg\": true,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": true,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"max(prometheus_engine_query_duration_seconds{instance=\\\"$instance\\\"}) by (instance, slice)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"max duration for {{slice}}\",\n                  \"metric\": \"prometheus_local_storage_rushed_mode\",\n                  \"refId\": \"A\",\n                  \"step\": 900\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Query elapsed time\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"s\",\n                  \"label\": \"\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": \"0\",\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {\n                \"Chunks\": \"#1F78C1\",\n                \"Chunks to persist\": \"#508642\",\n                \"Max chunks\": \"#052B51\",\n                \"Max to persist\": \"#3F6833\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 8,\n                \"x\": 8,\n                \"y\": 7\n              },\n              \"id\": 17,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(increase(prometheus_tsdb_head_series_created_total{instance=\\\"$instance\\\"}[$aggregation_interval])) by (instance)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"created on {{ instance }}\",\n                  \"metric\": \"prometheus_local_storage_maintain_series_duration_seconds_count\",\n                  \"refId\": \"A\",\n                  \"step\": 1800\n                },\n                {\n                  \"expr\": \"sum(increase(prometheus_tsdb_head_series_removed_total{instance=\\\"$instance\\\"}[$aggregation_interval])) by (instance) * -1\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"removed on {{ instance }}\",\n                  \"refId\": \"B\"\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Head series created/deleted\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {\n                \"Chunks\": \"#1F78C1\",\n                \"Chunks to persist\": \"#508642\",\n                \"Max chunks\": \"#052B51\",\n                \"Max to persist\": \"#3F6833\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 8,\n                \"x\": 16,\n                \"y\": 7\n              },\n              \"id\": 13,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(increase(prometheus_target_scrapes_exceeded_sample_limit_total{instance=\\\"$instance\\\"}[$aggregation_interval])) by (instance) > 0\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"exceeded_sample_limit on {{ instance }}\",\n                  \"metric\": \"prometheus_local_storage_chunk_ops_total\",\n                  \"refId\": \"A\",\n                  \"step\": 1800\n                },\n                {\n                  \"expr\": \"sum(increase(prometheus_target_scrapes_sample_duplicate_timestamp_total{instance=\\\"$instance\\\"}[$aggregation_interval])) by (instance) > 0\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"duplicate_timestamp on {{ instance }}\",\n                  \"metric\": \"prometheus_local_storage_chunk_ops_total\",\n                  \"refId\": \"B\",\n                  \"step\": 1800\n                },\n                {\n                  \"expr\": \"sum(increase(prometheus_target_scrapes_sample_out_of_bounds_total{instance=\\\"$instance\\\"}[$aggregation_interval])) by (instance) > 0\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"out_of_bounds on {{ instance }}\",\n                  \"metric\": \"prometheus_local_storage_chunk_ops_total\",\n                  \"refId\": \"C\",\n                  \"step\": 1800\n                },\n                {\n                  \"expr\": \"sum(increase(prometheus_target_scrapes_sample_out_of_order_total{instance=\\\"$instance\\\"}[$aggregation_interval])) by (instance) > 0\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"out_of_order on {{ instance }}\",\n                  \"metric\": \"prometheus_local_storage_chunk_ops_total\",\n                  \"refId\": \"D\",\n                  \"step\": 1800\n                },\n                {\n                  \"expr\": \"sum(increase(prometheus_rule_evaluation_failures_total{instance=\\\"$instance\\\"}[$aggregation_interval])) by (instance) > 0\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"rule_evaluation_failure on {{ instance }}\",\n                  \"metric\": \"prometheus_local_storage_chunk_ops_total\",\n                  \"refId\": \"G\",\n                  \"step\": 1800\n                },\n                {\n                  \"expr\": \"sum(increase(prometheus_tsdb_compactions_failed_total{instance=\\\"$instance\\\"}[$aggregation_interval])) by (instance) > 0\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"tsdb_compactions_failed on {{ instance }}\",\n                  \"metric\": \"prometheus_local_storage_chunk_ops_total\",\n                  \"refId\": \"K\",\n                  \"step\": 1800\n                },\n                {\n                  \"expr\": \"sum(increase(prometheus_tsdb_reloads_failures_total{instance=\\\"$instance\\\"}[$aggregation_interval])) by (instance) > 0\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"tsdb_reloads_failures on {{ instance }}\",\n                  \"metric\": \"prometheus_local_storage_chunk_ops_total\",\n                  \"refId\": \"L\",\n                  \"step\": 1800\n                },\n                {\n                  \"expr\": \"sum(increase(prometheus_tsdb_head_series_not_found{instance=\\\"$instance\\\"}[$aggregation_interval])) by (instance) > 0\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"head_series_not_found on {{ instance }}\",\n                  \"metric\": \"prometheus_local_storage_chunk_ops_total\",\n                  \"refId\": \"E\",\n                  \"step\": 1800\n                },\n                {\n                  \"expr\": \"sum(increase(prometheus_evaluator_iterations_missed_total{instance=\\\"$instance\\\"}[$aggregation_interval])) by (instance) > 0\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"evaluator_iterations_missed on {{ instance }}\",\n                  \"metric\": \"prometheus_local_storage_chunk_ops_total\",\n                  \"refId\": \"O\",\n                  \"step\": 1800\n                },\n                {\n                  \"expr\": \"sum(increase(prometheus_evaluator_iterations_skipped_total{instance=\\\"$instance\\\"}[$aggregation_interval])) by (instance) > 0\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"evaluator_iterations_skipped on {{ instance }}\",\n                  \"metric\": \"prometheus_local_storage_chunk_ops_total\",\n                  \"refId\": \"P\",\n                  \"step\": 1800\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Prometheus errors\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": \"0\",\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 14\n              },\n              \"id\": 55,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Scrape & rule duration\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"description\": \"\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"grid\": {},\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 0,\n                \"y\": 15\n              },\n              \"id\": 25,\n              \"legend\": {\n                \"alignAsTable\": true,\n                \"avg\": true,\n                \"current\": true,\n                \"max\": true,\n                \"min\": false,\n                \"show\": false,\n                \"sort\": \"max\",\n                \"sortDesc\": true,\n                \"total\": false,\n                \"values\": true\n              },\n              \"lines\": true,\n              \"linewidth\": 2,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"prometheus_target_interval_length_seconds{instance=\\\"$instance\\\",quantile=\\\"0.99\\\"} - 60\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"2m\",\n                  \"intervalFactor\": 1,\n                  \"legendFormat\": \"{{instance}}\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 300\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Scrape delay (counts with 1m scrape interval)\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"cumulative\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"s\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {\n                \"Chunks\": \"#1F78C1\",\n                \"Chunks to persist\": \"#508642\",\n                \"Max chunks\": \"#052B51\",\n                \"Max to persist\": \"#3F6833\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 12,\n                \"x\": 12,\n                \"y\": 15\n              },\n              \"id\": 14,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"Queue length\",\n                  \"yaxis\": 2\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(prometheus_evaluator_duration_seconds{instance=\\\"$instance\\\"}) by (instance, quantile)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"Queue length\",\n                  \"metric\": \"prometheus_local_storage_indexing_queue_length\",\n                  \"refId\": \"B\",\n                  \"step\": 1800\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Rule evaulation duration\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"s\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": \"0\",\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": \"0\",\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 22\n              },\n              \"id\": 56,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Requests & queries\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {\n                \"Chunks\": \"#1F78C1\",\n                \"Chunks to persist\": \"#508642\",\n                \"Max chunks\": \"#052B51\",\n                \"Max to persist\": \"#3F6833\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 6,\n                \"x\": 0,\n                \"y\": 23\n              },\n              \"id\": 18,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(increase(http_requests_total{instance=\\\"$instance\\\"}[$aggregation_interval])) by (instance, handler) > 0\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ handler }} on {{ instance }}\",\n                  \"metric\": \"\",\n                  \"refId\": \"A\",\n                  \"step\": 1800\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Request count\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"none\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {\n                \"Chunks\": \"#1F78C1\",\n                \"Chunks to persist\": \"#508642\",\n                \"Max chunks\": \"#052B51\",\n                \"Max to persist\": \"#3F6833\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 6,\n                \"x\": 6,\n                \"y\": 23\n              },\n              \"id\": 16,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"hideEmpty\": true,\n                \"hideZero\": true,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"max(sum(http_request_duration_microseconds{instance=\\\"$instance\\\"}) by (instance, handler, quantile)) by (instance, handler) > 0\",\n                  \"format\": \"time_series\",\n                  \"hide\": false,\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ handler }} on {{ instance }}\",\n                  \"refId\": \"B\"\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Request duration per handler\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"µs\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": \"0\",\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {\n                \"Chunks\": \"#1F78C1\",\n                \"Chunks to persist\": \"#508642\",\n                \"Max chunks\": \"#052B51\",\n                \"Max to persist\": \"#3F6833\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 6,\n                \"x\": 12,\n                \"y\": 23\n              },\n              \"id\": 19,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(increase(http_request_size_bytes{instance=\\\"$instance\\\", quantile=\\\"0.99\\\"}[$aggregation_interval])) by (instance, handler) > 0\",\n                  \"format\": \"time_series\",\n                  \"hide\": false,\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ handler }} in {{ instance }}\",\n                  \"refId\": \"B\"\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Request size by handler\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": \"0\",\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {\n                \"Allocated bytes\": \"#F9BA8F\",\n                \"Chunks\": \"#1F78C1\",\n                \"Chunks to persist\": \"#508642\",\n                \"Max chunks\": \"#052B51\",\n                \"Max count collector\": \"#bf1b00\",\n                \"Max count harvester\": \"#bf1b00\",\n                \"Max to persist\": \"#3F6833\",\n                \"RSS\": \"#890F02\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 6,\n                \"x\": 18,\n                \"y\": 23\n              },\n              \"id\": 8,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"/Max.*/\",\n                  \"fill\": 0,\n                  \"linewidth\": 2\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(prometheus_engine_queries{instance=\\\"$instance\\\"}) by (instance, handler)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"Current count \",\n                  \"metric\": \"last\",\n                  \"refId\": \"A\",\n                  \"step\": 1800\n                },\n                {\n                  \"expr\": \"sum(prometheus_engine_queries_concurrent_max{instance=\\\"$instance\\\"}) by (instance, handler)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"Max count\",\n                  \"metric\": \"last\",\n                  \"refId\": \"B\",\n                  \"step\": 1800\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Cont of concurent queries\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": \"0\",\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 30\n              },\n              \"id\": 57,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Alerting\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {\n                \"Alert queue capacity on o collector\": \"#bf1b00\",\n                \"Alert queue capacity on o harvester\": \"#bf1b00\",\n                \"Chunks\": \"#1F78C1\",\n                \"Chunks to persist\": \"#508642\",\n                \"Max chunks\": \"#052B51\",\n                \"Max to persist\": \"#3F6833\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 8,\n                \"x\": 0,\n                \"y\": 31\n              },\n              \"id\": 20,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"/.*capacity.*/\",\n                  \"fill\": 0,\n                  \"linewidth\": 2\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(prometheus_notifications_queue_capacity{instance=\\\"$instance\\\"})by (instance)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"Alert queue capacity \",\n                  \"metric\": \"prometheus_local_storage_checkpoint_last_size_bytes\",\n                  \"refId\": \"A\",\n                  \"step\": 1800\n                },\n                {\n                  \"expr\": \"sum(prometheus_notifications_queue_length{instance=\\\"$instance\\\"})by (instance)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"Alert queue size on \",\n                  \"metric\": \"prometheus_local_storage_checkpoint_last_size_bytes\",\n                  \"refId\": \"B\",\n                  \"step\": 1800\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Alert queue size\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": \"0\",\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {\n                \"Chunks\": \"#1F78C1\",\n                \"Chunks to persist\": \"#508642\",\n                \"Max chunks\": \"#052B51\",\n                \"Max to persist\": \"#3F6833\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 8,\n                \"x\": 8,\n                \"y\": 31\n              },\n              \"id\": 21,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(prometheus_notifications_alertmanagers_discovered{instance=\\\"$instance\\\"}) by (instance)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"Checkpoint chunks written/s\",\n                  \"metric\": \"prometheus_local_storage_checkpoint_series_chunks_written_sum\",\n                  \"refId\": \"A\",\n                  \"step\": 1800\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Count of discovered alertmanagers\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"none\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": \"0\",\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"aliasColors\": {\n                \"Chunks\": \"#1F78C1\",\n                \"Chunks to persist\": \"#508642\",\n                \"Max chunks\": \"#052B51\",\n                \"Max to persist\": \"#3F6833\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 8,\n                \"x\": 16,\n                \"y\": 31\n              },\n              \"id\": 39,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(increase(prometheus_notifications_dropped_total{instance=\\\"$instance\\\"}[$aggregation_interval])) by (instance) > 0\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"notifications_dropped on {{ instance }}\",\n                  \"metric\": \"prometheus_local_storage_chunk_ops_total\",\n                  \"refId\": \"F\",\n                  \"step\": 1800\n                },\n                {\n                  \"expr\": \"sum(increase(prometheus_rule_evaluation_failures_total{rule_type=\\\"alerting\\\",instance=\\\"$instance\\\"}[$aggregation_interval])) by (rule_type,instance) > 0\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"rule_evaluation_failures on {{ instance }}\",\n                  \"metric\": \"prometheus_local_storage_chunk_ops_total\",\n                  \"refId\": \"A\",\n                  \"step\": 1800\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeRegions\": [],\n              \"timeShift\": null,\n              \"title\": \"Alerting errors\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": \"0\",\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ],\n              \"yaxis\": {\n                \"align\": false,\n                \"alignLevel\": null\n              }\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 38\n              },\n              \"id\": 58,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Service discovery\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 6,\n                \"x\": 0,\n                \"y\": 39\n              },\n              \"id\": 45,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": true,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"increase(prometheus_target_sync_length_seconds_count{scrape_job=\\\"kubernetes-service-endpoints\\\"}[$aggregation_interval])\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"Count of target synces\",\n                  \"refId\": \"A\",\n                  \"step\": 240\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Kubernetes SD sync count\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {\n                \"Chunks\": \"#1F78C1\",\n                \"Chunks to persist\": \"#508642\",\n                \"Max chunks\": \"#052B51\",\n                \"Max to persist\": \"#3F6833\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 6,\n                \"x\": 6,\n                \"y\": 39\n              },\n              \"id\": 46,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(increase(prometheus_target_scrapes_exceeded_sample_limit_total{instance=\\\"$instance\\\"}[$aggregation_interval])) by (instance) > 0\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"exceeded_sample_limit on {{ instance }}\",\n                  \"metric\": \"prometheus_local_storage_chunk_ops_total\",\n                  \"refId\": \"A\",\n                  \"step\": 1800\n                },\n                {\n                  \"expr\": \"sum(increase(prometheus_sd_file_read_errors_total{instance=\\\"$instance\\\"}[$aggregation_interval])) by (instance) > 0\",\n                  \"format\": \"time_series\",\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"sd_file_read_error on {{ instance }}\",\n                  \"metric\": \"prometheus_local_storage_chunk_ops_total\",\n                  \"refId\": \"E\",\n                  \"step\": 1800\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Service discovery errors\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": \"0\",\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 46\n              },\n              \"id\": 59,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"TSDB stats\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 6,\n                \"x\": 0,\n                \"y\": 47\n              },\n              \"id\": 36,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(increase(prometheus_tsdb_reloads_total{instance=\\\"$instance\\\"}[30m])) by (instance)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ instance }}\",\n                  \"refId\": \"A\"\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Reloaded block from disk\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {\n                \"Chunks\": \"#1F78C1\",\n                \"Chunks to persist\": \"#508642\",\n                \"Max chunks\": \"#052B51\",\n                \"Max to persist\": \"#3F6833\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 6,\n                \"x\": 6,\n                \"y\": 47\n              },\n              \"id\": 5,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(prometheus_tsdb_blocks_loaded{instance=\\\"$instance\\\"}) by (instance)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"Loaded data blocks\",\n                  \"metric\": \"prometheus_local_storage_memory_chunkdescs\",\n                  \"refId\": \"A\",\n                  \"step\": 1800\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Loaded data blocks\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": \"0\",\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {\n                \"Chunks\": \"#1F78C1\",\n                \"Chunks to persist\": \"#508642\",\n                \"Max chunks\": \"#052B51\",\n                \"Max to persist\": \"#3F6833\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 6,\n                \"x\": 12,\n                \"y\": 47\n              },\n              \"id\": 3,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"prometheus_tsdb_head_series{instance=\\\"$instance\\\"}\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"Time series count\",\n                  \"metric\": \"prometheus_local_storage_memory_series\",\n                  \"refId\": \"A\",\n                  \"step\": 1800\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Time series total count\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": \"0\",\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 6,\n                \"x\": 18,\n                \"y\": 47\n              },\n              \"id\": 1,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(rate(prometheus_tsdb_head_samples_appended_total{instance=\\\"$instance\\\"}[$aggregation_interval])) by (instance)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"samples/s {{instance}}\",\n                  \"metric\": \"prometheus_local_storage_ingested_samples_total\",\n                  \"refId\": \"A\",\n                  \"step\": 1800\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Samples Appended per second\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": \"\",\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": \"0\",\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 54\n              },\n              \"id\": 60,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Head block stats\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {\n                \"Chunks\": \"#1F78C1\",\n                \"Chunks to persist\": \"#508642\",\n                \"Max chunks\": \"#052B51\",\n                \"Max to persist\": \"#3F6833\",\n                \"To persist\": \"#9AC48A\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 8,\n                \"x\": 0,\n                \"y\": 55\n              },\n              \"id\": 2,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"/Max.*/\",\n                  \"fill\": 0\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(prometheus_tsdb_head_chunks{instance=\\\"$instance\\\"}) by (instance)\",\n                  \"format\": \"time_series\",\n                  \"hide\": false,\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"Head chunk count\",\n                  \"metric\": \"prometheus_local_storage_memory_chunks\",\n                  \"refId\": \"A\",\n                  \"step\": 1800\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Head chunks count\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": \"0\",\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 8,\n                \"x\": 8,\n                \"y\": 55\n              },\n              \"id\": 35,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"max(prometheus_tsdb_head_max_time{instance=\\\"$instance\\\"}) by (instance) - min(prometheus_tsdb_head_min_time{instance=\\\"$instance\\\"}) by (instance)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ instance }}\",\n                  \"refId\": \"A\"\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Length of head block\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"ms\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {\n                \"Chunks\": \"#1F78C1\",\n                \"Chunks to persist\": \"#508642\",\n                \"Max chunks\": \"#052B51\",\n                \"Max to persist\": \"#3F6833\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 8,\n                \"x\": 16,\n                \"y\": 55\n              },\n              \"id\": 4,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(rate(prometheus_tsdb_head_chunks_created_total{instance=\\\"$instance\\\"}[$aggregation_interval])) by (instance)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"created on {{ instance }}\",\n                  \"refId\": \"B\"\n                },\n                {\n                  \"expr\": \"sum(rate(prometheus_tsdb_head_chunks_removed_total{instance=\\\"$instance\\\"}[$aggregation_interval])) by (instance) * -1\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"deleted on {{ instance }}\",\n                  \"refId\": \"C\"\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Head Chunks Created/Deleted per second\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 62\n              },\n              \"id\": 61,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Data maintenance\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 6,\n                \"x\": 0,\n                \"y\": 63\n              },\n              \"id\": 33,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(increase(prometheus_tsdb_compaction_duration_sum{instance=\\\"$instance\\\"}[30m]) / increase(prometheus_tsdb_compaction_duration_count{instance=\\\"$instance\\\"}[30m])) by (instance)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ instance }}\",\n                  \"refId\": \"B\"\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Compaction duration\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"s\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 6,\n                \"x\": 6,\n                \"y\": 63\n              },\n              \"id\": 34,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(prometheus_tsdb_head_gc_duration_seconds{instance=\\\"$instance\\\"}) by (instance, quantile)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ quantile }} on {{ instance }}\",\n                  \"refId\": \"A\"\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Go Garbage collection duration\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 0,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"s\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 6,\n                \"x\": 12,\n                \"y\": 63\n              },\n              \"id\": 37,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(prometheus_tsdb_wal_truncate_duration_seconds{instance=\\\"$instance\\\"}) by (instance, quantile)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ quantile }} on {{ instance }}\",\n                  \"refId\": \"A\"\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"WAL truncate duration seconds\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 6,\n                \"x\": 18,\n                \"y\": 63\n              },\n              \"id\": 38,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"connected\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(tsdb_wal_fsync_duration_seconds{instance=\\\"$instance\\\"}) by (instance, quantile)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"{{ quantile }} {{ instance }}\",\n                  \"refId\": \"A\"\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"WAL fsync duration seconds\",\n              \"tooltip\": {\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"s\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 70\n              },\n              \"id\": 62,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"RAM&CPU\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {\n                \"Allocated bytes\": \"#7EB26D\",\n                \"Allocated bytes - 1m max\": \"#BF1B00\",\n                \"Allocated bytes - 1m min\": \"#BF1B00\",\n                \"Allocated bytes - 5m max\": \"#BF1B00\",\n                \"Allocated bytes - 5m min\": \"#BF1B00\",\n                \"Chunks\": \"#1F78C1\",\n                \"Chunks to persist\": \"#508642\",\n                \"Max chunks\": \"#052B51\",\n                \"Max to persist\": \"#3F6833\",\n                \"RSS\": \"#447EBC\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": null,\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 8,\n                \"x\": 0,\n                \"y\": 71\n              },\n              \"id\": 6,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [\n                {\n                  \"alias\": \"/-/\",\n                  \"fill\": 0\n                },\n                {\n                  \"alias\": \"collector heap size\",\n                  \"color\": \"#E0752D\",\n                  \"fill\": 0,\n                  \"linewidth\": 2\n                },\n                {\n                  \"alias\": \"collector kubernetes memory limit\",\n                  \"color\": \"#BF1B00\",\n                  \"fill\": 0,\n                  \"linewidth\": 3\n                }\n              ],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(process_resident_memory_bytes{instance=\\\"$instance\\\"}) by (instance)\",\n                  \"format\": \"time_series\",\n                  \"hide\": false,\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"Total resident memory - {{instance}}\",\n                  \"metric\": \"process_resident_memory_bytes\",\n                  \"refId\": \"B\",\n                  \"step\": 1800\n                },\n                {\n                  \"expr\": \"sum(go_memstats_alloc_bytes{instance=\\\"$instance\\\"}) by (instance)\",\n                  \"format\": \"time_series\",\n                  \"hide\": false,\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"Total llocated bytes - {{instance}}\",\n                  \"metric\": \"go_memstats_alloc_bytes\",\n                  \"refId\": \"A\",\n                  \"step\": 1800\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Memory\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": \"0\",\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {\n                \"Allocated bytes\": \"#F9BA8F\",\n                \"Chunks\": \"#1F78C1\",\n                \"Chunks to persist\": \"#508642\",\n                \"Max chunks\": \"#052B51\",\n                \"Max to persist\": \"#3F6833\",\n                \"RSS\": \"#890F02\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 8,\n                \"x\": 8,\n                \"y\": 71\n              },\n              \"id\": 7,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"rate(go_memstats_alloc_bytes_total{instance=\\\"$instance\\\"}[$aggregation_interval])\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"Allocated Bytes/s\",\n                  \"metric\": \"go_memstats_alloc_bytes\",\n                  \"refId\": \"A\",\n                  \"step\": 1800\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Allocations per second\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"bytes\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": \"0\",\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"aliasColors\": {},\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"decimals\": 2,\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 8,\n                \"x\": 16,\n                \"y\": 71\n              },\n              \"id\": 9,\n              \"legend\": {\n                \"alignAsTable\": false,\n                \"avg\": false,\n                \"current\": false,\n                \"hideEmpty\": false,\n                \"max\": false,\n                \"min\": false,\n                \"rightSide\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(rate(process_cpu_seconds_total{instance=\\\"$instance\\\"}[$aggregation_interval])) by (instance)\",\n                  \"format\": \"time_series\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"CPU/s\",\n                  \"metric\": \"prometheus_local_storage_ingested_samples_total\",\n                  \"refId\": \"B\",\n                  \"step\": 1800\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"CPU per second\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": [\n                  \"avg\"\n                ]\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"none\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": \"0\",\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            },\n            {\n              \"collapsed\": false,\n              \"gridPos\": {\n                \"h\": 1,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 78\n              },\n              \"id\": 63,\n              \"panels\": [],\n              \"repeat\": null,\n              \"title\": \"Contrac errors\",\n              \"type\": \"row\"\n            },\n            {\n              \"aliasColors\": {\n                \"Chunks\": \"#1F78C1\",\n                \"Chunks to persist\": \"#508642\",\n                \"Max chunks\": \"#052B51\",\n                \"Max to persist\": \"#3F6833\"\n              },\n              \"bars\": false,\n              \"dashLength\": 10,\n              \"dashes\": false,\n              \"datasource\": \"${DS_PROMETHEUS}\",\n              \"editable\": true,\n              \"error\": false,\n              \"fill\": 1,\n              \"gridPos\": {\n                \"h\": 7,\n                \"w\": 24,\n                \"x\": 0,\n                \"y\": 79\n              },\n              \"id\": 47,\n              \"legend\": {\n                \"avg\": false,\n                \"current\": false,\n                \"max\": false,\n                \"min\": false,\n                \"show\": false,\n                \"total\": false,\n                \"values\": false\n              },\n              \"lines\": true,\n              \"linewidth\": 1,\n              \"links\": [],\n              \"nullPointMode\": \"null\",\n              \"options\": {},\n              \"percentage\": false,\n              \"pointradius\": 5,\n              \"points\": false,\n              \"renderer\": \"flot\",\n              \"seriesOverrides\": [],\n              \"spaceLength\": 10,\n              \"stack\": false,\n              \"steppedLine\": false,\n              \"targets\": [\n                {\n                  \"expr\": \"sum(increase(net_conntrack_dialer_conn_failed_total{instance=\\\"$instance\\\"}[$aggregation_interval])) by (instance) > 0\",\n                  \"format\": \"time_series\",\n                  \"hide\": false,\n                  \"interval\": \"\",\n                  \"intervalFactor\": 2,\n                  \"legendFormat\": \"conntrack_dialer_conn_failed on {{ instance }}\",\n                  \"metric\": \"prometheus_local_storage_chunk_ops_total\",\n                  \"refId\": \"M\",\n                  \"step\": 1800\n                }\n              ],\n              \"thresholds\": [],\n              \"timeFrom\": null,\n              \"timeShift\": null,\n              \"title\": \"Net errors\",\n              \"tooltip\": {\n                \"msResolution\": false,\n                \"shared\": true,\n                \"sort\": 2,\n                \"value_type\": \"individual\"\n              },\n              \"type\": \"graph\",\n              \"xaxis\": {\n                \"buckets\": null,\n                \"mode\": \"time\",\n                \"name\": null,\n                \"show\": true,\n                \"values\": []\n              },\n              \"yaxes\": [\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": \"0\",\n                  \"show\": true\n                },\n                {\n                  \"format\": \"short\",\n                  \"label\": null,\n                  \"logBase\": 1,\n                  \"max\": null,\n                  \"min\": null,\n                  \"show\": true\n                }\n              ]\n            }\n          ],\n          \"refresh\": \"5m\",\n          \"schemaVersion\": 18,\n          \"style\": \"dark\",\n          \"tags\": [\n            \"prometheus\"\n          ],\n          \"templating\": {\n            \"list\": [\n              {\n                \"auto\": true,\n                \"auto_count\": 30,\n                \"auto_min\": \"2m\",\n                \"current\": {\n                  \"text\": \"auto\",\n                  \"value\": \"$__auto_interval_aggregation_interval\"\n                },\n                \"hide\": 0,\n                \"label\": \"aggregation intarval\",\n                \"name\": \"aggregation_interval\",\n                \"options\": [\n                  {\n                    \"selected\": true,\n                    \"text\": \"auto\",\n                    \"value\": \"$__auto_interval_aggregation_interval\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"1m\",\n                    \"value\": \"1m\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"10m\",\n                    \"value\": \"10m\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"30m\",\n                    \"value\": \"30m\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"1h\",\n                    \"value\": \"1h\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"6h\",\n                    \"value\": \"6h\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"12h\",\n                    \"value\": \"12h\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"1d\",\n                    \"value\": \"1d\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"7d\",\n                    \"value\": \"7d\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"14d\",\n                    \"value\": \"14d\"\n                  },\n                  {\n                    \"selected\": false,\n                    \"text\": \"30d\",\n                    \"value\": \"30d\"\n                  }\n                ],\n                \"query\": \"1m,10m,30m,1h,6h,12h,1d,7d,14d,30d\",\n                \"refresh\": 2,\n                \"skipUrlSync\": false,\n                \"type\": \"interval\"\n              },\n              {\n                \"allValue\": null,\n                \"current\": {},\n                \"datasource\": \"${DS_PROMETHEUS}\",\n                \"definition\": \"\",\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Instance\",\n                \"multi\": false,\n                \"name\": \"instance\",\n                \"options\": [],\n                \"query\": \"label_values(prometheus_build_info, instance)\",\n                \"refresh\": 2,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"sort\": 2,\n                \"tagValuesQuery\": \"\",\n                \"tags\": [],\n                \"tagsQuery\": \"\",\n                \"type\": \"query\",\n                \"useTags\": false\n              },\n              {\n                \"current\": {\n                  \"text\": \"prometheus\",\n                  \"value\": \"prometheus\"\n                },\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"Prometheus datasource\",\n                \"multi\": false,\n                \"name\": \"DS_PROMETHEUS\",\n                \"options\": [],\n                \"query\": \"prometheus\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"type\": \"datasource\"\n              },\n              {\n                \"current\": {\n                  \"text\": \"No data sources found\",\n                  \"value\": \"\"\n                },\n                \"hide\": 0,\n                \"includeAll\": false,\n                \"label\": \"InfluxDB datasource\",\n                \"multi\": false,\n                \"name\": \"influx_datasource\",\n                \"options\": [],\n                \"query\": \"influxdb\",\n                \"refresh\": 1,\n                \"regex\": \"\",\n                \"skipUrlSync\": false,\n                \"type\": \"datasource\"\n              }\n            ]\n          },\n          \"time\": {\n            \"from\": \"now-1h\",\n            \"to\": \"now\"\n          },\n          \"timepicker\": {\n            \"refresh_intervals\": [\n              \"5s\",\n              \"10s\",\n              \"30s\",\n              \"1m\",\n              \"5m\",\n              \"15m\",\n              \"30m\",\n              \"1h\",\n              \"2h\",\n              \"1d\"\n            ],\n            \"time_options\": [\n              \"5m\",\n              \"15m\",\n              \"1h\",\n              \"6h\",\n              \"12h\",\n              \"24h\",\n              \"2d\",\n              \"7d\",\n              \"30d\"\n            ]\n          },\n          \"timezone\": \"browser\",\n          \"title\": \"Prometheus2.0 (v1.0.0 by FUSAKLA)\",\n          \"version\": 1\n        }\n...\n"
  },
  {
    "path": "values_overrides/grafana/sqlite3.yaml",
    "content": "---\ndependencies:\n  static:\n    grafana:\n      jobs: null\n      services: null\nmanifests:\n  job_db_init: false\n  job_db_init_session: false\n  job_db_session_sync: false\n  job_image_repo_sync: true\n  job_run_migrator: false\n  job_set_admin_user: false\n  secret_db: false\n  secret_db_session: false\nconf:\n  grafana:\n    database:\n      type: sqlite3\n      path: /var/lib/grafana/data/sqlite3.db\n    session:\n      provider: file\n      provider_config: sessions\n...\n"
  },
  {
    "path": "values_overrides/grafana/tls.yaml",
    "content": "---\nconf:\n  grafana:\n    database:\n      ssl_mode: true\n      ca_cert_path: /etc/mysql/certs/ca.crt\n      client_key_path: /etc/mysql/certs/tls.key\n      client_cert_path: /etc/mysql/certs/tls.crt\n  provisioning:\n    datasources:\n      template: |\n        {{ $prom_host := tuple \"monitoring\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }}\n        {{ $prom_uri := printf \"https://%s\" $prom_host }}\n        apiVersion: 1\n        datasources:\n        - name: prometheus\n          type: prometheus\n          access: proxy\n          orgId: 1\n          editable: true\n          basicAuth: true\n          basicAuthUser: {{ .Values.endpoints.monitoring.auth.user.username }}\n          jsonData:\n            tlsAuthWithCACert: true\n          secureJsonData:\n            basicAuthPassword: {{ .Values.endpoints.monitoring.auth.user.password }}\n            tlsCACert: $CACERT\n          url: {{ $prom_uri }}\nendpoints:\n  grafana:\n    host_fqdn_override:\n      default:\n        tls:\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/heat/2024.2-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    db_init: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    db_drop: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    ks_user: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    ks_service: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    heat_db_sync: \"quay.io/airshipit/heat:2024.2-ubuntu_jammy\"\n    heat_api: \"quay.io/airshipit/heat:2024.2-ubuntu_jammy\"\n    heat_cfn: \"quay.io/airshipit/heat:2024.2-ubuntu_jammy\"\n    heat_engine: \"quay.io/airshipit/heat:2024.2-ubuntu_jammy\"\n    heat_engine_cleaner: \"quay.io/airshipit/heat:2024.2-ubuntu_jammy\"\n    heat_purge_deleted: \"quay.io/airshipit/heat:2024.2-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/heat/2025.1-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    heat_db_sync: \"quay.io/airshipit/heat:2025.1-ubuntu_jammy\"\n    heat_api: \"quay.io/airshipit/heat:2025.1-ubuntu_jammy\"\n    heat_cfn: \"quay.io/airshipit/heat:2025.1-ubuntu_jammy\"\n    heat_engine: \"quay.io/airshipit/heat:2025.1-ubuntu_jammy\"\n    heat_engine_cleaner: \"quay.io/airshipit/heat:2025.1-ubuntu_jammy\"\n    heat_purge_deleted: \"quay.io/airshipit/heat:2025.1-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/heat/2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    heat_db_sync: \"quay.io/airshipit/heat:2025.1-ubuntu_noble\"\n    heat_api: \"quay.io/airshipit/heat:2025.1-ubuntu_noble\"\n    heat_cfn: \"quay.io/airshipit/heat:2025.1-ubuntu_noble\"\n    heat_engine: \"quay.io/airshipit/heat:2025.1-ubuntu_noble\"\n    heat_engine_cleaner: \"quay.io/airshipit/heat:2025.1-ubuntu_noble\"\n    heat_purge_deleted: \"quay.io/airshipit/heat:2025.1-ubuntu_noble\"\n...\n"
  },
  {
    "path": "values_overrides/heat/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    heat_db_sync: \"quay.io/airshipit/heat:2025.2-ubuntu_noble\"\n    heat_api: \"quay.io/airshipit/heat:2025.2-ubuntu_noble\"\n    heat_cfn: \"quay.io/airshipit/heat:2025.2-ubuntu_noble\"\n    heat_engine: \"quay.io/airshipit/heat:2025.2-ubuntu_noble\"\n    heat_engine_cleaner: \"quay.io/airshipit/heat:2025.2-ubuntu_noble\"\n    heat_purge_deleted: \"quay.io/airshipit/heat:2025.2-ubuntu_noble\"\n...\n"
  },
  {
    "path": "values_overrides/heat/annotations.yaml",
    "content": "---\nannotations:\n  pod:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    heat_api:\n      another.tld/foo: \"bar\"\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      heat:\n        custom.tld/key: \"value\"\n    tls:\n      orchestration_api_public:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/heat/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    heat:\n      container:\n        heat_api:\n          appArmorProfile:\n            type: RuntimeDefault\n        heat_cfn:\n          appArmorProfile:\n            type: RuntimeDefault\n        heat_engine:\n          appArmorProfile:\n            type: RuntimeDefault\n    engine_cleaner:\n      container:\n        heat_engine_cleaner:\n          appArmorProfile:\n            type: RuntimeDefault\n    ks_user:\n      container:\n        heat_ks_domain_user:\n          appArmorProfile:\n            type: RuntimeDefault\n    trusts:\n      container:\n        heat_trusts:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/heat/gateway.yaml",
    "content": "# Gateway API overrides for Heat.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  orchestration:\n    host_fqdn_override:\n      public:\n        host: heat.openstack-helm.org\n  cloudformation:\n    host_fqdn_override:\n      public:\n        host: cloudformation.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n  ingress_cfn: false\n  service_ingress_api: false\n  service_ingress_cfn: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: heat-api-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.orchestration.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: heat-api\n              port: 8004\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: heat-cfn-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.cloudformation.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: heat-cfn\n              port: 8000\n...\n"
  },
  {
    "path": "values_overrides/heat/loci-2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/heat:2025.1-ubuntu_noble_loci\"\n    db_init: \"quay.io/airshipit/heat:2025.1-ubuntu_noble_loci\"\n    db_drop: \"quay.io/airshipit/heat:2025.1-ubuntu_noble_loci\"\n    ks_user: \"quay.io/airshipit/heat:2025.1-ubuntu_noble_loci\"\n    ks_service: \"quay.io/airshipit/heat:2025.1-ubuntu_noble_loci\"\n    ks_endpoints: \"quay.io/airshipit/heat:2025.1-ubuntu_noble_loci\"\n    heat_db_sync: \"quay.io/airshipit/heat:2025.1-ubuntu_noble_loci\"\n    heat_api: \"quay.io/airshipit/heat:2025.1-ubuntu_noble_loci\"\n    heat_cfn: \"quay.io/airshipit/heat:2025.1-ubuntu_noble_loci\"\n    heat_engine: \"quay.io/airshipit/heat:2025.1-ubuntu_noble_loci\"\n    heat_engine_cleaner: \"quay.io/airshipit/heat:2025.1-ubuntu_noble_loci\"\n    heat_purge_deleted: \"quay.io/airshipit/heat:2025.1-ubuntu_noble_loci\"\n...\n"
  },
  {
    "path": "values_overrides/heat/loci-2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/heat:2025.2-ubuntu_noble_loci\"\n    db_init: \"quay.io/airshipit/heat:2025.2-ubuntu_noble_loci\"\n    db_drop: \"quay.io/airshipit/heat:2025.2-ubuntu_noble_loci\"\n    ks_user: \"quay.io/airshipit/heat:2025.2-ubuntu_noble_loci\"\n    ks_service: \"quay.io/airshipit/heat:2025.2-ubuntu_noble_loci\"\n    ks_endpoints: \"quay.io/airshipit/heat:2025.2-ubuntu_noble_loci\"\n    heat_db_sync: \"quay.io/airshipit/heat:2025.2-ubuntu_noble_loci\"\n    heat_api: \"quay.io/airshipit/heat:2025.2-ubuntu_noble_loci\"\n    heat_cfn: \"quay.io/airshipit/heat:2025.2-ubuntu_noble_loci\"\n    heat_engine: \"quay.io/airshipit/heat:2025.2-ubuntu_noble_loci\"\n    heat_engine_cleaner: \"quay.io/airshipit/heat:2025.2-ubuntu_noble_loci\"\n    heat_purge_deleted: \"quay.io/airshipit/heat:2025.2-ubuntu_noble_loci\"\n...\n"
  },
  {
    "path": "values_overrides/heat/mariadb-operator.yaml",
    "content": "---\nconf:\n  heat:\n    database:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  heat_api:\n    - heat-db-conn\n  heat_db_sync:\n    - heat-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: heat\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: heat\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: heat-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: heat-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"heat\"\n      table: \"*\"\n      username: heat\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: heat-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: heat\n      passwordSecretKeyRef:\n        name: heat-db-password\n        key: password\n      database: heat\n      secretName: heat-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/heat/netpol.yaml",
    "content": "---\nmanifests:\n  network_policy: true\nnetwork_policy:\n  heat:\n    ingress:\n      - from:\n        - podSelector:\n            matchLabels:\n              application: heat\n        - podSelector:\n            matchLabels:\n              application: ingress\n        - podSelector:\n            matchLabels:\n              application: horizon\n        ports:\n        - protocol: TCP\n          port: 8000\n        - protocol: TCP\n          port: 8003\n        - protocol: TCP\n          port: 8004\n    egress:\n      - to:\n        - podSelector:\n            matchLabels:\n              application: neutron\n      - to:\n        - podSelector:\n            matchLabels:\n              application: nova\n      - to:\n        - podSelector:\n            matchLabels:\n              application: glance\n      - to:\n        - podSelector:\n            matchLabels:\n              application: cinder\n      - to:\n        - ipBlock:\n            cidr: %%%REPLACE_API_ADDR%%%/32\n        ports:\n          - protocol: TCP\n            port: %%%REPLACE_API_PORT%%%\n...\n"
  },
  {
    "path": "values_overrides/heat/rabbitmq4.yaml",
    "content": "---\n# Upgrading from rabbitmq 3.x to 4.x requires:\n# 1: upgrading to the latest rabbitmq 3.x release and enabling all feature flags\n# 2: removing all rabbitmq 3.x openstack vhost ha policies\n# 3: setting rabbit_ha_queues to false in all openstack component configs\n# 4: wiping the rabbitmq database if rabbit_ha_queues and/or vhost ha policies were used with 3.x\nconf:\n  heat:\n    oslo_messaging_rabbit:\n      rabbit_ha_queues: false\n\n# Note: rabbit_ha_queues is true by default for all openstack components in openstack-helm\n\n# Steps to wipe rabbitmq database:\n# 1: rabbitmqctl stop_app\n# 2: rabbitmqctl force_reset\n# 3: rabbitmqctl start_app\n# 4: rerun all openstack component rabbit-init jobs to recreate rabbitmq vhosts and users\n\n# Note: rabbitmq classic v2 vs quorum queues\n# With rabbitmq 4.x classic queues have been replaced with classic v2 queues. Classic v2 queues\n# do not support high availability. For HA, quorum queues must be used. Quorum queues are HA by default.\n# Classic v2 queues are the default in Rabbitmq 4.x.\n#\n# To enable quorum queues with rabbitmq 4.x you can use:\n#\n# conf:\n#   heat:\n#     oslo_messaging_rabbit:\n#       rabbit_ha_queues: false\n#       rabbit_quorum_queues: true\n#       rabbit_transient_quorum_queue: true\n#       use_queue_manager: true\n...\n"
  },
  {
    "path": "values_overrides/heat/tls-offloading.yaml",
    "content": "---\nendpoints:\n  identity:\n    auth:\n      admin:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      heat:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      heat_trustee:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      test:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n\ntls:\n  identity: true\n...\n"
  },
  {
    "path": "values_overrides/heat/tls.yaml",
    "content": "---\nconf:\n  software:\n    apache2:\n      binary: apache2\n      start_parameters: -DFOREGROUND\n      site_dir: /etc/apache2/sites-enabled\n      conf_dir: /etc/apache2/conf-enabled\n      mods_dir: /etc/apache2/mods-available\n      a2enmod:\n        - ssl\n      a2dismod: null\n  mpm_event: |\n    <IfModule mpm_event_module>\n      ServerLimit         1024\n      StartServers        32\n      MinSpareThreads     32\n      MaxSpareThreads     256\n      ThreadsPerChild     25\n      MaxRequestsPerChild 128\n      ThreadLimit         720\n    </IfModule>\n  wsgi_heat: |\n    {{- $portInt := tuple \"orchestration\" \"internal\" \"api\" $ | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    Listen {{ $portInt }}\n    <Directory /var/lib/openstack/bin>\n        Require all granted\n    </Directory>\n    <VirtualHost *:{{ $portInt }}>\n      ServerName {{ printf \"%s.%s.svc.%s\" \"heat-api\" .Release.Namespace .Values.endpoints.cluster_domain_suffix }}\n      WSGIDaemonProcess heat-api processes=1 threads=1 user=heat display-name=%{GROUP}\n      WSGIProcessGroup heat-api\n      WSGIScriptAlias /  /var/lib/openstack/bin/heat-wsgi-api\n      WSGIApplicationGroup %{GLOBAL}\n      WSGIPassAuthorization On\n      AllowEncodedSlashes On\n      SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n      ErrorLogFormat \"%{cu}t %M\"\n      ErrorLog /dev/stdout\n      CustomLog /dev/stdout combined env=!forwarded\n      CustomLog /dev/stdout proxy env=forwarded\n\n      SSLEngine on\n      SSLCertificateFile      /etc/heat/certs/tls.crt\n      SSLCertificateKeyFile   /etc/heat/certs/tls.key\n      SSLProtocol             all -SSLv3 -TLSv1 -TLSv1.1\n      SSLCipherSuite          ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256\n      SSLHonorCipherOrder     on\n    </VirtualHost>\n\n  wsgi_cfn: |\n    {{- $portInt := tuple \"cloudformation\" \"internal\" \"api\" $ | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    Listen {{ $portInt }}\n    <Directory /var/lib/openstack/bin>\n        Require all granted\n    </Directory>\n    <VirtualHost *:{{ $portInt }}>\n      ServerName {{ printf \"%s.%s.svc.%s\" \"heat-api-cfn\" .Release.Namespace .Values.endpoints.cluster_domain_suffix }}\n      WSGIDaemonProcess heat-api-cfn processes=1 threads=1 user=heat display-name=%{GROUP}\n      WSGIProcessGroup heat-api-cfn\n      WSGIScriptAlias / /var/lib/openstack/bin/heat-wsgi-api-cfn\n      WSGIApplicationGroup %{GLOBAL}\n      WSGIPassAuthorization On\n      AllowEncodedSlashes On\n      SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n      ErrorLogFormat \"%{cu}t %M\"\n      ErrorLog /dev/stdout\n      CustomLog /dev/stdout combined env=!forwarded\n      CustomLog /dev/stdout proxy env=forwarded\n\n      SSLEngine on\n      SSLCertificateFile      /etc/heat/certs/tls.crt\n      SSLCertificateKeyFile   /etc/heat/certs/tls.key\n      SSLProtocol             all -SSLv3 -TLSv1 -TLSv1.1\n      SSLCipherSuite          ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256\n      SSLHonorCipherOrder     on\n    </VirtualHost>\n\n  heat:\n    clients_neutron:\n      ca_file: /etc/heat/certs/ca.crt\n    clients_cinder:\n      ca_file: /etc/heat/certs/ca.crt\n    clients_glance:\n      ca_file: /etc/heat/certs/ca.crt\n    clients_nova:\n      ca_file: /etc/heat/certs/ca.crt\n    clients_swift:\n      ca_file: /etc/heat/certs/ca.crt\n    ssl:\n      ca_file: /etc/heat/certs/ca.crt\n    keystone_authtoken:\n      cafile: /etc/heat/certs/ca.crt\n    clients:\n      ca_file: /etc/heat/certs/ca.crt\n    clients_keystone:\n      ca_file: /etc/heat/certs/ca.crt\n    oslo_messaging_rabbit:\n      ssl: true\n      ssl_ca_file: /etc/rabbitmq/certs/ca.crt\n      ssl_cert_file: /etc/rabbitmq/certs/tls.crt\n      ssl_key_file: /etc/rabbitmq/certs/tls.key\n\nnetwork:\n  api:\n    ingress:\n      annotations:\n        nginx.ingress.kubernetes.io/backend-protocol: \"https\"\n  cfn:\n    ingress:\n      annotations:\n        nginx.ingress.kubernetes.io/backend-protocol: \"https\"\n\npod:\n  security_context:\n    heat:\n      container:\n        heat_api:\n          readOnlyRootFilesystem: false\n          runAsUser: 0\n        heat_cfn:\n          readOnlyRootFilesystem: false\n          runAsUser: 0\n\nendpoints:\n  identity:\n    auth:\n      admin:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      heat:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      heat_trustee:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      heat_stack_user:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      test:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n    scheme:\n      default: https\n    port:\n      api:\n        default: 443\n  orchestration:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: heat-tls-api\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\n    scheme:\n      default: https\n      service: https\n    port:\n      api:\n        public: 443\n  cloudformation:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: heat-tls-cfn\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\n    scheme:\n      default: https\n      service: https\n    port:\n      api:\n        public: 443\n  ingress:\n    port:\n      ingress:\n        default: 443\n  oslo_messaging:\n    port:\n      https:\n        default: 15680\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/horizon/2024.2-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    db_init: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    db_drop: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    horizon_db_sync: quay.io/airshipit/horizon:2024.2-ubuntu_jammy\n    horizon: quay.io/airshipit/horizon:2024.2-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/horizon/2025.1-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    horizon_db_sync: quay.io/airshipit/horizon:2025.1-ubuntu_jammy\n    horizon: quay.io/airshipit/horizon:2025.1-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/horizon/2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    horizon_db_sync: quay.io/airshipit/horizon:2025.1-ubuntu_noble\n    horizon: quay.io/airshipit/horizon:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/horizon/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    horizon_db_sync: quay.io/airshipit/horizon:2025.2-ubuntu_noble\n    horizon: quay.io/airshipit/horizon:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/horizon/annotations.yaml",
    "content": "---\nannotations:\n  pod:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    horizon:\n      another.tld/foo: \"bar\"\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      horizon:\n        custom.tld/key: \"value\"\n    tls:\n      dashboard_dashboard_public:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/horizon/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    horizon:\n      container:\n        horizon:\n          appArmorProfile:\n            type: RuntimeDefault\n    db_sync:\n      container:\n        horizon_db_sync:\n          appArmorProfile:\n            type: RuntimeDefault\n    test:\n      container:\n        horizon_test:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/horizon/gateway.yaml",
    "content": "# Gateway API overrides for Horizon.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  dashboard:\n    host_fqdn_override:\n      public:\n        host: horizon.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n  service_ingress: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: horizon-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.dashboard.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: horizon-int\n              port: 80\n...\n"
  },
  {
    "path": "values_overrides/horizon/loci-2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    horizon_db_sync: quay.io/airshipit/horizon:2025.1-ubuntu_noble_loci\n    horizon: quay.io/airshipit/horizon:2025.1-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/horizon/loci-2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    db_drop: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    horizon_db_sync: quay.io/airshipit/horizon:2025.2-ubuntu_noble_loci\n    horizon: quay.io/airshipit/horizon:2025.2-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/horizon/logo.yaml",
    "content": "---\nmanifests:\n  configmap_logo: true\nconf:\n  horizon:\n    branding:\n      favicon: |\n        AAABAAMAMDAAAAEAIACoJQAANgAAACAgAgABAAEAMAEAAN4lAAAQEAIAAQABALAAAAAOJwAAKAAA\n        ADAAAABgAAAAAQAgAAAAAAAAJAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAQxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f8AAAAAAAAAAAAAAABDGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/AAAAAEMY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAEMY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/AAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABDGO3/Qxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AABDGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAABDGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MX7ZlDF+2ZQxftmUMX7ZlDF+2ZQxftmUMX7ZlDF+2ZQxftmUMX7ZlDF+2ZAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABDF+2ZQxftmUMX7ZlD\n        F+2ZQxftmUMX7ZlDF+2ZQxftmUMX7ZlDF+2ZQxftmQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEMY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAABDGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/AAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABDGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABD\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAABDGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/AAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABDGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAABDGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/AAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABDGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABDGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/0MY7f9DGO3/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAABDGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/AAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABDGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEMX7ZlDF+2ZQxftmUMX7ZlDF+2ZQxftmUMX7ZlDF+2Z\n        QxftmUMX7ZlDF+2ZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AABDF+2ZQxftmUMX7ZlDF+2ZQxftmUMX7ZlDF+2ZQxftmUMX7ZlDF+2ZQxftmUMY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAABDGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/AAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABDGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f9DGO3/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAABDGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/wAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEMY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/wAAAABDGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY\n        7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/AAAAAAAAAAAAAAAAQxjt/0MY7f9DGO3/Qxjt\n        /0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/\n        Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9D\n        GO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f9DGO3/Qxjt/0MY7f8AAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAP///////wAAwAAAAAADAACAAAAAAAEAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD///8AAA\n        AAAf///4AAAAAB////gAAAAAH///+AAAAAAf///4AAAA////////AAD///////8AAP///////wAA\n        AB////gAAAAAH///+AAAAAAf///4AAAAAB////gAAAAAH///+AAAAAAf///4AAAAAB////gAAAAA\n        H///+AAAAAAf///4AAAAAB////gAAAD///////8AAP///////wAA////////AAAAH///+AAAAAAf\n        ///4AAAAAB////gAAAAAH///+AAAAAAP///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAQAAwAAAAAADAAD/////\n        //8AACgAAAAgAAAAQAAAAAEAAQAAAAAAgAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAQxjtAD////x/\n        ///+/////////////////////////////////gAAf/4AAH8AAAAAAAAAAP4AAH/+AAB//gAAf/4A\n        AH/+AAB//gAAf/4AAH/+AAB/AAAAAAAAAAD+AAB//gAAf///////////////////////////////\n        /3////4////8wAAAA4AAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB//+AAf//gP//////////\n        Af//gAH//4AB//+AAf//gAH//4AB//+AAf//gAH//4D//////////wH//4AB//+AAAAAAAAAAAAA\n        AAAAAAAAAAAAAAAAAAAAgAAAAcAAAAMoAAAAEAAAACAAAAABAAEAAAAAAEAAAAAAAAAAAAAAAAIA\n        AAAAAAAAAAAAAEMY7QB//gAA//8AAP//AAD//wAA8A8AAAAAAADwDwAA8A8AAPAPAADwDwAAAAAA\n        APAPAAD//wAA//8AAP//AAB//gAAgAEAAAAAAAAAAAAAAAAAAA/wAAD//wAAD/AAAA/wAAAP8AAA\n        D/AAAP//AAAP8AAAAAAAAAAAAAAAAAAAgAEAAA==\n      logo: |\n        PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiIHN0YW5kYWxvbmU9Im5vIj8+Cjxz\n        dmcgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMj\n        IiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZlcnNpb249IjEuMSIgeG1sbnM6\n        Y2M9Imh0dHA6Ly9jcmVhdGl2ZWNvbW1vbnMub3JnL25zIyIgeG1sbnM6ZGM9Imh0dHA6Ly9wdXJs\n        Lm9yZy9kYy9lbGVtZW50cy8xLjEvIiB2aWV3Qm94PSIwIDAgNjAwLjc3NTg5IDEwNi43MzczNyI+\n        CiA8ZGVmcz4KICA8Y2xpcFBhdGggaWQ9ImEiIGNsaXBQYXRoVW5pdHM9InVzZXJTcGFjZU9uVXNl\n        Ij4KICAgPHBhdGggZD0ibTAgNjEyaDc5MnYtNjEyaC03OTJ2NjEyeiIvPgogIDwvY2xpcFBhdGg+\n        CiA8L2RlZnM+CiA8ZyB0cmFuc2Zvcm09InRyYW5zbGF0ZSgtMzMuODk4IC01My4yNzkpIj4KICA8\n        ZyB0cmFuc2Zvcm09Im1hdHJpeCgxLjI1IDAgMCAtMS4yNSAtMTYwLjE0IDQ5MC42OSkiPgogICA8\n        ZyBjbGlwLXBhdGg9InVybCgjYSkiPgogICAgPGcgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoMjM0LjM3\n        IDM0OS45MykiPgogICAgIDxwYXRoIGQ9Im0wIDBoLTcxLjAzNmMtNC40NTggMC04LjEwNi0zLjY0\n        OC04LjEwNi04LjEwN3YtMTkuMTAyaDE5LjcwM3YzLjEyMWMwIDIuNDIyIDEuOTYzIDQuMzg1IDQu\n        Mzg1IDQuMzg1aDM5LjA3M2MyLjQyMSAwIDQuMzg0LTEuOTYzIDQuMzg0LTQuMzg1di0zLjEyMWgx\n        OS43MDR2MTkuMTAyYzAgNC40NTktMy42NDggOC4xMDctOC4xMDcgOC4xMDciIGZpbGw9IiNkYTFh\n        MzEiLz4KICAgIDwvZz4KICAgIDxnIHRyYW5zZm9ybT0idHJhbnNsYXRlKDIyMi43NyAyODguNjMp\n        Ij4KICAgICA8cGF0aCBkPSJtMCAwYzAtMi40MjItMS45NjMtNC4zODUtNC4zODUtNC4zODVoLTM5\n        LjA3MmMtMi40MjIgMC00LjM4NSAxLjk2My00LjM4NSA0LjM4NXYzLjEyMWgtMTkuNzAzdi0xOS4x\n        MDNjMC00LjQ1OCAzLjY0OC04LjEwNiA4LjEwNi04LjEwNmg3MS4wMzZjNC40NTkgMCA4LjEwNiAz\n        LjY0OCA4LjEwNiA4LjEwNnYxOS4xMDNoLTE5LjcwM3YtMy4xMjF6IiBmaWxsPSIjZGExYTMxIi8+\n        CiAgICA8L2c+CiAgICA8cGF0aCBkPSJtMTc0LjkzIDMxNy4wOWgtMTkuNzAzdi0xOS43MDNoMTku\n        NzAzdjE5LjcwM3oiIGZpbGw9IiNkYTFhMzEiLz4KICAgIDxwYXRoIGQ9Im0yNDIuNDggMzE3LjA5\n        aC0xOS43MDN2LTE5LjcwM2gxOS43MDN2MTkuNzAzeiIgZmlsbD0iI2RhMWEzMSIvPgogICAgPGcg\n        dHJhbnNmb3JtPSJ0cmFuc2xhdGUoNjMwLjMxIDI4Ni41KSI+CiAgICAgPHBhdGggZD0ibTAgMGMt\n        Mi4zMDUgMC00LjE4MSAxLjg3NS00LjE4MSA0LjE4MSAwIDIuMzA1IDEuODc2IDQuMTggNC4xODEg\n        NC4xOHM0LjE4MS0xLjg3NSA0LjE4MS00LjE4YzAtMi4zMDYtMS44NzYtNC4xODEtNC4xODEtNC4x\n        ODFtNS41NDMgNC4xODFjMCAzLjA1Ni0yLjQ4NyA1LjU0Mi01LjU0MyA1LjU0MnMtNS41NDMtMi40\n        ODYtNS41NDMtNS41NDJjMC0zLjA1NyAyLjQ4Ny01LjU0MyA1LjU0My01LjU0M3M1LjU0MyAyLjQ4\n        NiA1LjU0MyA1LjU0MyIgZmlsbD0iIzRlNDY0MCIvPgogICAgPC9nPgogICAgPGcgdHJhbnNmb3Jt\n        PSJ0cmFuc2xhdGUoNjMxLjI1IDI5MS42NikiPgogICAgIDxwYXRoIGQ9Im0wIDBjMC0wLjQ4OS0w\n        LjMxLTAuODE1LTAuODg1LTAuODE1aC0wLjgzdjEuNjM4aDAuODA3YzAuNTc1IDAgMC45MDgtMC4y\n        NzIgMC45MDgtMC44MTV2LTAuMDA4em0xLjEzMyAwLjAzMXYwLjAzMWMwIDAuNTM2LTAuMTYzIDAu\n        OTYyLTAuNDU4IDEuMjU3LTAuMzQyIDAuMzQyLTAuODQ1IDAuNTItMS40NzQgMC41MmgtMi4wNHYt\n        NS40MzFoMS4xMjR2MS43ODRoMC43MDZsMS4wMDEtMS43ODRoMS4yODhsLTEuMTU2IDIuMDE3YzAu\n        NTk4IDAuMjY0IDEuMDA5IDAuNzg0IDEuMDA5IDEuNjA2IiBmaWxsPSIjNGU0NjQwIi8+CiAgICA8\n        L2c+CiAgICA8ZyB0cmFuc2Zvcm09InRyYW5zbGF0ZSgyNzEuNjIgMzA3LjAyKSI+CiAgICAgPHBh\n        dGggZD0ibTAgMHYwLjUwMWMwIDguMjcgNC4yNiAxNC4zNjYgMTAuODU4IDE0LjM2NiA2LjQzMSAw\n        IDExLjAyNS02LjE4MSAxMS4wMjUtMTQuNDQ5di0wLjUwMWMwLTguMjY5LTQuMjYtMTQuNDQ5LTEw\n        Ljg1OC0xNC40NDktNi40MzIgMC0xMS4wMjUgNi4yNjQtMTEuMDI1IDE0LjUzMm0zMS4zMi0wLjE2\n        NnYwLjc1MmMwIDEzLjExMy04LjYwMiAyMi42MzQtMjAuMjk1IDIyLjYzNC0xMS43NzcgMC0yMC40\n        NjMtOS42ODktMjAuNDYzLTIyLjcxOXYtMC43NTFjMC0xMy4xMTMgOC42MDMtMjIuNTUxIDIwLjI5\n        Ni0yMi41NTEgMTEuNzc2IDAgMjAuNDYyIDkuNjA1IDIwLjQ2MiAyMi42MzUiIGZpbGw9IiM0ZTQ2\n        NDAiLz4KICAgIDwvZz4KICAgIDxnIHRyYW5zZm9ybT0idHJhbnNsYXRlKDMxNy41NSAzMDYuODYp\n        Ij4KICAgICA8cGF0aCBkPSJtMCAwdjAuODM1YzAgOC42ODYgNC44NDUgMTQuMTk4IDEwLjUyMyAx\n        NC4xOTggNS42OCAwIDEwLjI3NC01LjQyOSAxMC4yNzQtMTQuMjgxdi0wLjc1MmMwLTguODU0LTQu\n        NTExLTE0LjE5OS0xMC4yNzQtMTQuMTk5LTUuNjc4IDAtMTAuNTIzIDUuNTEzLTEwLjUyMyAxNC4x\n        OTltMzAuMzE5LTAuMTY4djEuMTdjMCAxNC45NTEtOC4zNTMgMjIuMzg0LTE3LjIwNiAyMi4zODQt\n        Ni4zNDggMC0xMC4xOS0zLjY3NS0xMi45NDYtNy45MzV2Ny4xaC05LjQzOHYtNTYuODc5aDkuNDM4\n        djE5LjI5NGMyLjY3My0zLjkyNSA2LjUxNS03LjUxNyAxMi45NDYtNy41MTcgOC45MzYgMCAxNy4y\n        MDYgNy41MTcgMTcuMjA2IDIyLjM4MyIgZmlsbD0iIzRlNDY0MCIvPgogICAgPC9nPgogICAgPGcg\n        dHJhbnNmb3JtPSJ0cmFuc2xhdGUoMzcxLjg5IDMyMi4zMSkiPgogICAgIDxwYXRoIGQ9Im0wIDBj\n        NS41MTMgMCA4LjY4Ni01LjA5NCA5LjE4OC0xMi4xMWgtMTguNzA5YzAuNjY4IDcuNTE3IDQuMzQz\n        IDEyLjExIDkuNTIxIDEyLjExbTE2Ljg3MS0zMS4yMzctNC45MjggNS44NDhjLTIuOTIzLTIuODQt\n        Ni4wMTMtNC42NzgtMTAuMTg5LTQuNjc4LTUuNzYzIDAtMTAuMzU3IDQuMDA5LTExLjE5MiAxMS40\n        NDJoMjcuNzI5YzAuMDgzIDEuMzM2IDAuMDgzIDIuNjc0IDAuMDgzIDMuMjU4IDAgMTMuMjc5LTYu\n        MzQ3IDIzLjMwMy0xOC4zNzQgMjMuMzAzLTEwLjg1OCAwLTE4Ljg3Ni05LjUyMi0xOC44NzYtMjIu\n        ODAzdi0wLjY2N2MwLTEzLjg2NSA4Ljg1My0yMi41NTEgMjAuMjEzLTIyLjU1MSA2LjU5OCAwIDEx\n        LjUyNiAyLjU4OSAxNS41MzQgNi44NDgiIGZpbGw9IiM0ZTQ2NDAiLz4KICAgIDwvZz4KICAgIDxn\n        IHRyYW5zZm9ybT0idHJhbnNsYXRlKDQxNy45OCAzMzAuMjQpIj4KICAgICA8cGF0aCBkPSJtMCAw\n        Yy02LjAxMyAwLTkuNjA1LTMuNDI1LTEyLjExMS03LjM1djYuNTE1aC05LjQzOHYtNDQuMjY3aDku\n        NDM4djI2LjgxYzAgNi4wOTcgMy40MjUgOS43NzIgOC4zNTMgOS43NzIgNS4wMTEgMCA3LjkzNC0z\n        LjQyMyA3LjkzNC05LjYwNHYtMjYuOTc4aDkuNDM4djI5LjY1YzAgOS40MzgtNS4wOTUgMTUuNDUy\n        LTEzLjYxNCAxNS40NTIiIGZpbGw9IiM0ZTQ2NDAiLz4KICAgIDwvZz4KICAgIDxnIHRyYW5zZm9y\n        bT0idHJhbnNsYXRlKDQ2OS42NCAyOTcuNzUpIj4KICAgICA8cGF0aCBkPSJtMCAwdjAuMDgzYzAg\n        Ni45MzMtNS4xNzkgMTAuMTA1LTEyLjAyNyAxMy4xOTYtNS4xNzkgMi4zMzktOC42ODYgMy44NDIt\n        OC42ODYgNi43NjV2MC4xNjhjMCAyLjQyMiAyLjA4NyA0LjM0MiA1LjU5NSA0LjM0MnM3LjE4My0x\n        LjY3IDEwLjUyNC0zLjkyNWw0LjAwOSA2LjkzM2MtNC4xNzYgMy4wMDYtOS40MzggNC43Ni0xNC4z\n        NjYgNC43Ni04LjI2OCAwLTE0LjYxNi01LjAxMS0xNC42MTYtMTIuOTQ1di0wLjE2OGMwLTcuMjY2\n        IDUuNTEzLTEwLjI3MyAxMi4xMTEtMTMuMTEyIDUuMjYxLTIuMjU2IDguNjg2LTMuNjc1IDguNjg2\n        LTYuODQ5di0wLjA4M2MwLTIuNzU3LTIuMjU1LTQuODQ1LTYuMTgxLTQuODQ1LTMuOTI1IDAtOC4x\n        ODUgMS45MjItMTIuMTkzIDUuMTc4bC00LjQyNy02Ljc2NWM1LjA5NS00LjI1OSAxMS4xOTItNi4x\n        OCAxNi40NTMtNi4xOCA4LjUyIDAgMTUuMTE4IDQuNzYxIDE1LjExOCAxMy40NDciIGZpbGw9IiM0\n        ZTQ2NDAiLz4KICAgIDwvZz4KICAgIDxnIHRyYW5zZm9ybT0idHJhbnNsYXRlKDQ5OC44NiAyODYu\n        NCkiPgogICAgIDxwYXRoIGQ9Im0wIDB2Ny43NjhjLTEuNTg3LTAuODM2LTMuMjU4LTEuMjUzLTUu\n        MDk1LTEuMjUzLTMuMDA3IDAtNC43NjEgMS40MTktNC43NjEgNC44NDR2MjMuNTUzaDkuOTM5djgu\n        MTAyaC05LjkzOXYxMS45NDNoLTkuNDM4di0xMS45NDNoLTQuODQ0bDEuNTUtOC4xMDJoMy4yOTR2\n        LTI1LjQ3NGMwLTguNjAzIDQuOTI4LTExLjM1OSAxMS40NDMtMTEuMzU5IDMuMTc0IDAgNS42Nzkg\n        MC43NTEgNy44NTEgMS45MjEiIGZpbGw9IiM0ZTQ2NDAiLz4KICAgIDwvZz4KICAgIDxnIHRyYW5z\n        Zm9ybT0idHJhbnNsYXRlKDUzMC4zNSAzMDAuMDEpIj4KICAgICA8cGF0aCBkPSJtMCAwYzAtNS4w\n        MTEtNC4xNzYtOC42ODYtOS42MDUtOC42MDMtNC4wOTIgMC4wODMtNy4xODIgMi41OS03LjE4MiA3\n        LjAxNnYwLjE2OGMwIDQuNjc2IDMuMTczIDcuNDMzIDguNjAyIDcuNDMzIDMuMTc0IDAgNi4wMTQt\n        MC42NjkgOC4xODUtMS41ODd2LTQuNDI3em00LjY3NyAyNS4zOTFjLTIuODM5IDIuOTIzLTcuMjY2\n        IDQuNTEtMTMuMDI5IDQuNTEtNS45MyAwLTEwLjM1Ny0xLjQyMS0xNC42MTctMy41MDlsMi42NzMt\n        Ny41MTZjMi45MjQgMS4zMzYgNi40MzIgMi42NzMgMTAuNjkxIDIuNjczIDYuMDE0IDAgOS41MjIt\n        My4wOTEgOS41MjItOS4wMnYtMS44MzhjLTMuMDA3IDEuMDAzLTYuMDE0IDEuNzUzLTEwLjE5IDEu\n        NzUzLTkuMzU0IDAtMTUuNzAyLTQuNjc2LTE1LjcwMi0xNC4xOTh2LTAuNDE3YzAtOC42MDMgNi4x\n        ODEtMTMuNTMxIDEzLjUzMS0xMy41MzEgNS43NjMgMCA5LjY4OCAyLjUwNiAxMi4yNzcgNi4yNjR2\n        LTUuNDI5aDkuMjcxdjI3LjQ3OWMwIDUuNTEyLTEuNTA0IDkuODU2LTQuNDI3IDEyLjc3OSIgZmls\n        bD0iIzRlNDY0MCIvPgogICAgPC9nPgogICAgPGcgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoNTc5LjU3\n        IDI5MC45MSkiPgogICAgIDxwYXRoIGQ9Im0wIDAtNS4wOTQgNi4yNjRjLTIuNTktMi41ODktNS4x\n        NzktNC41MS05LjAyMS00LjUxLTYuNDMxIDAtMTAuOTQxIDUuNzYzLTEwLjk0MSAxNC4zNjV2MC40\n        MThjMCA4LjM1MyA0LjUxIDE0LjI4MiAxMC42OTEgMTQuMjgyIDQuMDA5IDAgNi41OTgtMi4wMDQg\n        OC45MzYtNC40MjdsNS4yNjMgNi45MzNjLTMuNjc1IDMuNjc1LTcuODUyIDYuMDE0LTE0LjE5OSA2\n        LjAxNC0xMS42MSAwLTIwLjIxMy05LjUyMS0yMC4yMTMtMjIuNzE5di0wLjgzNWMwLTEzLjE5NiA4\n        LjM1My0yMi40NjYgMTkuODc5LTIyLjQ2NiA2Ljc2NSAwIDExLjE5MSAyLjc1NSAxNC42OTkgNi42\n        ODEiIGZpbGw9IiM0ZTQ2NDAiLz4KICAgIDwvZz4KICAgIDxnIHRyYW5zZm9ybT0idHJhbnNsYXRl\n        KDYwOC40NiAzMDguMTIpIj4KICAgICA8cGF0aCBkPSJtMCAwLTYuNjI1LTcuMTIxIDguODg2LTE1\n        Ljg1NGgxMC45NDFsLTEzLjIwMiAyMi45NzV6IiBmaWxsPSIjNGU0NjQwIi8+CiAgICA8L2c+CiAg\n        ICA8ZyB0cmFuc2Zvcm09InRyYW5zbGF0ZSg2MTAuMDUgMzI5LjQxKSI+CiAgICAgPHBhdGggZD0i\n        bTAgMC0xNS4xMTctMTkuNDYxdjM1LjQ5N2gtOS40Mzh2LTYwLjMwM2g5LjQzOHYxMi43NzlsMjYu\n        MzA5IDMxLjQ4OGgtMTEuMTkyeiIgZmlsbD0iIzRlNDY0MCIvPgogICAgPC9nPgogICA8L2c+CiAg\n        PC9nPgogPC9nPgo8L3N2Zz4K\n      logo_splash: |\n        PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiIHN0YW5kYWxvbmU9Im5vIj8+Cjxz\n        dmcgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMj\n        IiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZlcnNpb249IjEuMSIgeG1sbnM6\n        Y2M9Imh0dHA6Ly9jcmVhdGl2ZWNvbW1vbnMub3JnL25zIyIgeG1sbnM6ZGM9Imh0dHA6Ly9wdXJs\n        Lm9yZy9kYy9lbGVtZW50cy8xLjEvIiB2aWV3Qm94PSIwIDAgNTg2LjY1NTc2IDI4My4wOTIxMyI+\n        CiA8ZGVmcz4KICA8Y2xpcFBhdGggaWQ9ImIiIGNsaXBQYXRoVW5pdHM9InVzZXJTcGFjZU9uVXNl\n        Ij4KICAgPHBhdGggZD0ibTAgNjEyaDc5MnYtNjEyaC03OTJ2NjEyeiIvPgogIDwvY2xpcFBhdGg+\n        CiAgPGNsaXBQYXRoIGlkPSJhIiBjbGlwUGF0aFVuaXRzPSJ1c2VyU3BhY2VPblVzZSI+CiAgIDxw\n        YXRoIGQ9Im0zMzEuMDIgNDI0Ljg3aDExNi4yNHYtMTEzLjc2aC0xMTYuMjR2MTEzLjc2eiIvPgog\n        IDwvY2xpcFBhdGg+CiA8L2RlZnM+CiA8ZyB0cmFuc2Zvcm09InRyYW5zbGF0ZSgtLjk1Nzg0IC0y\n        LjI0NDcpIj4KICA8ZyB0cmFuc2Zvcm09Im1hdHJpeCgxLjI1IDAgMCAtMS4yNSAtMjAxLjg2IDUz\n        My4zNCkiPgogICA8ZyBjbGlwLXBhdGg9InVybCgjYikiPgogICAgPGcgdHJhbnNmb3JtPSJ0cmFu\n        c2xhdGUoNjI0LjYyIDIxNS45NikiPgogICAgIDxwYXRoIGQ9Im0wIDBjLTIuODk2IDAtNS4yNTIg\n        Mi4zNTYtNS4yNTIgNS4yNTIgMCAyLjg5NyAyLjM1NiA1LjI1MiA1LjI1MiA1LjI1MnM1LjI1Mi0y\n        LjM1NSA1LjI1Mi01LjI1MmMwLTIuODk2LTIuMzU2LTUuMjUyLTUuMjUyLTUuMjUybTYuOTYzIDUu\n        MjUyYzAgMy44NC0zLjEyMyA2Ljk2NC02Ljk2MyA2Ljk2NHMtNi45NjMtMy4xMjQtNi45NjMtNi45\n        NjQgMy4xMjMtNi45NjMgNi45NjMtNi45NjMgNi45NjMgMy4xMjMgNi45NjMgNi45NjMiIGZpbGw9\n        IiM0ZTQ2NDAiLz4KICAgIDwvZz4KICAgIDxnIHRyYW5zZm9ybT0idHJhbnNsYXRlKDYyNS44IDIy\n        Mi40MykiPgogICAgIDxwYXRoIGQ9Im0wIDBjMC0wLjYxNC0wLjM5LTEuMDI0LTEuMTEyLTEuMDI0\n        aC0xLjA0MnYyLjA1OGgxLjAxM2MwLjcyMiAwIDEuMTQxLTAuMzQyIDEuMTQxLTEuMDI0di0wLjAx\n        em0xLjQyNCAwLjAzOXYwLjAzOWMwIDAuNjczLTAuMjA1IDEuMjA5LTAuNTc2IDEuNTgtMC40Mjkg\n        MC40MjgtMS4wNjIgMC42NTItMS44NTIgMC42NTJoLTIuNTYzdi02LjgyM2gxLjQxM3YyLjI0Mmgw\n        Ljg4NmwxLjI1OC0yLjI0MmgxLjYxOWwtMS40NTMgMi41MzVjMC43NTEgMC4zMzEgMS4yNjggMC45\n        ODQgMS4yNjggMi4wMTciIGZpbGw9IiM0ZTQ2NDAiLz4KICAgIDwvZz4KICAgIDxnIHRyYW5zZm9y\n        bT0idHJhbnNsYXRlKDE3NC4xMSAyNDEuNzQpIj4KICAgICA8cGF0aCBkPSJtMCAwdjAuNjI5YzAg\n        MTAuMzkgNS4zNTIgMTguMDQ4IDEzLjY0MSAxOC4wNDggOC4wNzkgMCAxMy44NTEtNy43NjUgMTMu\n        ODUxLTE4LjE1MnYtMC42M2MwLTEwLjM4Ny01LjM1Mi0xOC4xNTItMTMuNjQxLTE4LjE1Mi04LjA4\n        IDAtMTMuODUxIDcuODY5LTEzLjg1MSAxOC4yNTdtMzkuMzQ4LTAuMjA5djAuOTQ1YzAgMTYuNDc0\n        LTEwLjgwNyAyOC40MzUtMjUuNDk3IDI4LjQzNS0xNC43OTYgMC0yNS43MDgtMTIuMTcyLTI1Ljcw\n        OC0yOC41NDJ2LTAuOTQzYzAtMTYuNDc0IDEwLjgwOC0yOC4zMzEgMjUuNDk4LTI4LjMzMSAxNC43\n        OTUgMCAyNS43MDcgMTIuMDY2IDI1LjcwNyAyOC40MzYiIGZpbGw9IiM0ZTQ2NDAiLz4KICAgIDwv\n        Zz4KICAgIDxnIHRyYW5zZm9ybT0idHJhbnNsYXRlKDIzMS44MSAyNDEuNTMpIj4KICAgICA8cGF0\n        aCBkPSJtMCAwdjEuMDQ5YzAgMTAuOTEzIDYuMDg2IDE3LjgzNyAxMy4yMjEgMTcuODM3czEyLjkw\n        Ni02LjgyIDEyLjkwNi0xNy45NDF2LTAuOTQ1YzAtMTEuMTIzLTUuNjY2LTE3LjgzOS0xMi45MDYt\n        MTcuODM5LTcuMTM1IDAtMTMuMjIxIDYuOTI3LTEzLjIyMSAxNy44MzltMzguMDktMC4yMTF2MS40\n        NjljMCAxOC43ODQtMTAuNDk0IDI4LjEyMi0yMS42MTYgMjguMTIyLTcuOTc1IDAtMTIuODAyLTQu\n        NjE3LTE2LjI2NC05Ljk2OXY4LjkyaC0xMS44NTd2LTcxLjQ1OGgxMS44NTd2MjQuMjM5YzMuMzU4\n        LTQuOTMxIDguMTg1LTkuNDQzIDE2LjI2NC05LjQ0MyAxMS4yMjcgMCAyMS42MTYgOS40NDMgMjEu\n        NjE2IDI4LjEyIiBmaWxsPSIjNGU0NjQwIi8+CiAgICA8L2c+CiAgICA8ZyB0cmFuc2Zvcm09InRy\n        YW5zbGF0ZSgzMDAuMDkgMjYwLjk0KSI+CiAgICAgPHBhdGggZD0ibTAgMGM2LjkyNSAwIDEwLjkx\n        Mi02LjQgMTEuNTQyLTE1LjIxNGgtMjMuNTA0YzAuODQgOS40NDMgNS40NTcgMTUuMjE0IDExLjk2\n        MiAxNS4yMTRtMjEuMTk2LTM5LjI0NC02LjE5MSA3LjM0N2MtMy42NzMtMy41NjgtNy41NTUtNS44\n        NzctMTIuODAxLTUuODc3LTcuMjQgMC0xMy4wMTIgNS4wMzctMTQuMDYxIDE0LjM3NWgzNC44MzZj\n        MC4xMDUgMS42NzkgMC4xMDUgMy4zNTkgMC4xMDUgNC4wOTMgMCAxNi42ODMtNy45NzQgMjkuMjc2\n        LTIzLjA4NCAyOS4yNzYtMTMuNjQxIDAtMjMuNzE0LTExLjk2Mi0yMy43MTQtMjguNjQ3di0wLjgz\n        OGMwLTE3LjQxOSAxMS4xMjItMjguMzMxIDI1LjM5My0yOC4zMzEgOC4yOSAwIDE0LjQ4IDMuMjUy\n        IDE5LjUxNyA4LjYwMiIgZmlsbD0iIzRlNDY0MCIvPgogICAgPC9nPgogICAgPGcgdHJhbnNmb3Jt\n        PSJ0cmFuc2xhdGUoMzU3Ljk5IDI3MC45MSkiPgogICAgIDxwYXRoIGQ9Im0wIDBjLTcuNTU1IDAt\n        MTIuMDY3LTQuMzAzLTE1LjIxNS05LjIzNHY4LjE4NWgtMTEuODU3di01NS42MTNoMTEuODU3djMz\n        LjY4MmMwIDcuNjYgNC4zMDMgMTIuMjc3IDEwLjQ5MyAxMi4yNzcgNi4yOTYgMCA5Ljk2OC00LjMw\n        MiA5Ljk2OC0xMi4wNjZ2LTMzLjg5M2gxMS44NTd2MzcuMjQ5YzAgMTEuODU3LTYuNDAxIDE5LjQx\n        My0xNy4xMDMgMTkuNDEzIiBmaWxsPSIjNGU0NjQwIi8+CiAgICA8L2c+CiAgICA8ZyB0cmFuc2Zv\n        cm09InRyYW5zbGF0ZSg0MjIuODkgMjMwLjA5KSI+CiAgICAgPHBhdGggZD0ibTAgMHYwLjEwNWMw\n        IDguNzA5LTYuNTA2IDEyLjY5NS0xNS4xMSAxNi41NzgtNi41MDYgMi45MzktMTAuOTEzIDQuODI2\n        LTEwLjkxMyA4LjQ5OXYwLjIxMWMwIDMuMDQzIDIuNjIzIDUuNDU1IDcuMDMgNS40NTVzOS4wMjQt\n        Mi4wOTkgMTMuMjIxLTQuOTMxbDUuMDM3IDguNzA5Yy01LjI0NiAzLjc3Ny0xMS44NTcgNS45OC0x\n        OC4wNDggNS45OC0xMC4zODcgMC0xOC4zNjItNi4yOTUtMTguMzYyLTE2LjI2M3YtMC4yMTFjMC05\n        LjEyNyA2LjkyNS0xMi45MDYgMTUuMjE1LTE2LjQ3MiA2LjYxLTIuODM0IDEwLjkxMi00LjYxNyAx\n        MC45MTItOC42MDV2LTAuMTA0YzAtMy40NjMtMi44MzMtNi4wODctNy43NjUtNi4wODctNC45MzEg\n        MC0xMC4yODMgMi40MTQtMTUuMzE5IDYuNTA1bC01LjU2Mi04LjQ5OGM2LjQwMS01LjM1MSAxNC4w\n        NjEtNy43NjUgMjAuNjcxLTcuNzY1IDEwLjcwNCAwIDE4Ljk5MyA1Ljk4MiAxOC45OTMgMTYuODk0\n        IiBmaWxsPSIjNGU0NjQwIi8+CiAgICA8L2c+CiAgICA8ZyB0cmFuc2Zvcm09InRyYW5zbGF0ZSg0\n        NTkuNiAyMTUuODIpIj4KICAgICA8cGF0aCBkPSJtMCAwdjkuNzU5Yy0xLjk5NC0xLjA1LTQuMDky\n        LTEuNTc0LTYuNDAxLTEuNTc0LTMuNzc4IDAtNS45ODEgMS43ODMtNS45ODEgNi4wODZ2MjkuNTg5\n        aDEyLjQ4N3YxMC4xNzloLTEyLjQ4N3YxNS4wMDVoLTExLjg1N3YtMTUuMDA1aC02LjA4NmwxLjk0\n        OC0xMC4xNzloNC4xMzh2LTMyLjAwM2MwLTEwLjgwOCA2LjE5MS0xNC4yNzEgMTQuMzc2LTE0LjI3\n        MSAzLjk4NyAwIDcuMTM0IDAuOTQ1IDkuODYzIDIuNDE0IiBmaWxsPSIjNGU0NjQwIi8+CiAgICA8\n        L2c+CiAgICA8ZyB0cmFuc2Zvcm09InRyYW5zbGF0ZSg0OTkuMTYgMjMyLjkyKSI+CiAgICAgPHBh\n        dGggZD0ibTAgMGMwLTYuMjk1LTUuMjQ2LTEwLjkxMi0xMi4wNjctMTAuODA4LTUuMTQxIDAuMTA1\n        LTkuMDIzIDMuMjU0LTkuMDIzIDguODE0djAuMjExYzAgNS44NzUgMy45ODcgOS4zMzkgMTAuODA3\n        IDkuMzM5IDMuOTg4IDAgNy41NTUtMC44NDEgMTAuMjgzLTEuOTk0di01LjU2MnptNS44NzYgMzEu\n        ODk5Yy0zLjU2NyAzLjY3Mi05LjEyOCA1LjY2Ni0xNi4zNjggNS42NjYtNy40NSAwLTEzLjAxMi0x\n        Ljc4NS0xOC4zNjQtNC40MDhsMy4zNTgtOS40NDNjMy42NzMgMS42NzkgOC4wOCAzLjM1OSAxMy40\n        MzEgMy4zNTkgNy41NTYgMCAxMS45NjItMy44ODQgMTEuOTYyLTExLjMzM3YtMi4zMDljLTMuNzc3\n        IDEuMjYtNy41NTQgMi4yMDMtMTIuODAxIDIuMjAzLTExLjc1MiAwLTE5LjcyNy01Ljg3NS0xOS43\n        MjctMTcuODM3di0wLjUyNWMwLTEwLjgwNyA3Ljc2NS0xNi45OTggMTYuOTk5LTE2Ljk5OCA3LjI0\n        IDAgMTIuMTcyIDMuMTQ3IDE1LjQyNCA3Ljg2OXYtNi44MmgxMS42NDd2MzQuNTIyYzAgNi45MjQt\n        MS44ODggMTIuMzgyLTUuNTYxIDE2LjA1NCIgZmlsbD0iIzRlNDY0MCIvPgogICAgPC9nPgogICAg\n        PGcgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoNTYxIDIyMS40OCkiPgogICAgIDxwYXRoIGQ9Im0wIDAt\n        Ni40IDcuODY5Yy0zLjI1My0zLjI1Mi02LjUwNi01LjY2Ni0xMS4zMzItNS42NjYtOC4wODEgMC0x\n        My43NDYgNy4yNC0xMy43NDYgMTguMDQ4djAuNTI1YzAgMTAuNDk0IDUuNjY1IDE3Ljk0MyAxMy40\n        MzEgMTcuOTQzIDUuMDM2IDAgOC4yODktMi41MTkgMTEuMjI3LTUuNTYybDYuNjExIDguNzFjLTQu\n        NjE3IDQuNjE3LTkuODY0IDcuNTU1LTE3LjgzOCA3LjU1NS0xNC41ODYgMC0yNS4zOTMtMTEuOTYx\n        LTI1LjM5My0yOC41NDJ2LTEuMDQ5YzAtMTYuNTc5IDEwLjQ5My0yOC4yMjUgMjQuOTczLTI4LjIy\n        NSA4LjQ5OSAwIDE0LjA2IDMuNDYxIDE4LjQ2NyA4LjM5NCIgZmlsbD0iIzRlNDY0MCIvPgogICAg\n        PC9nPgogICAgPGcgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoNTk3LjI5IDI0My4xMSkiPgogICAgIDxw\n        YXRoIGQ9Im0wIDAtOC4zMjMtOC45NDYgMTEuMTY0LTE5LjkxOGgxMy43NDVsLTE2LjU4NiAyOC44\n        NjR6IiBmaWxsPSIjNGU0NjQwIi8+CiAgICA8L2c+CiAgICA8ZyB0cmFuc2Zvcm09InRyYW5zbGF0\n        ZSg1OTkuMjkgMjY5Ljg2KSI+CiAgICAgPHBhdGggZD0ibTAgMC0xOC45OTItMjQuNDV2NDQuNTk2\n        aC0xMS44NTd2LTc1Ljc1OWgxMS44NTd2MTYuMDU0bDMzLjA1MyAzOS41NTloLTE0LjA2MXoiIGZp\n        bGw9IiM0ZTQ2NDAiLz4KICAgIDwvZz4KICAgIDxnIG9wYWNpdHk9IjAuOTgiIGNsaXAtcGF0aD0i\n        dXJsKCNhKSI+CiAgICAgPGcgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoNDM2LjQ2IDQyNC44NykiPgog\n        ICAgICA8cGF0aCBkPSJtMCAwaC05NC42MzhjLTUuOTQgMC0xMC44LTQuODYtMTAuOC0xMC44di0y\n        NS40NWgyNi4yNXY0LjE1OWMwIDMuMjI2IDIuNjE2IDUuODQxIDUuODQyIDUuODQxaDUyLjA1NWMz\n        LjIyNiAwIDUuODQxLTIuNjE1IDUuODQxLTUuODQxdi00LjE1OWgyNi4yNXYyNS40NWMwLjAwMiA1\n        Ljk0LTQuODU4IDEwLjgtMTAuNzk4IDEwLjgiIGZpbGw9IiNlZDE4NDQiLz4KICAgICA8L2c+CiAg\n        ICAgPGcgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoNDIxLjAxIDM0My4yKSI+CiAgICAgIDxwYXRoIGQ9\n        Im0wIDBjMC0zLjIyNi0yLjYxNS01Ljg0Mi01Ljg0MS01Ljg0MmgtNTIuMDU1Yy0zLjIyNiAwLTUu\n        ODQyIDIuNjE2LTUuODQyIDUuODQydjQuMTU4aC0yNi4yNXYtMjUuNDQ5YzAtNS45NDEgNC44Ni0x\n        MC44IDEwLjgtMTAuOGg5NC42MzhjNS45NCAwIDEwLjggNC44NTkgMTAuOCAxMC44djI1LjQ0OWgt\n        MjYuMjV2LTQuMTU4eiIgZmlsbD0iI2VkMTg0NCIvPgogICAgIDwvZz4KICAgICA8cGF0aCBkPSJt\n        MzU3LjI4IDM4MS4xMmgtMjYuMjV2LTI2LjI1aDI2LjI1djI2LjI1eiIgZmlsbD0iI2VkMTg0NCIv\n        PgogICAgIDxwYXRoIGQ9Im00NDcuMjYgMzgxLjEyaC0yNi4yNXYtMjYuMjVoMjYuMjV2MjYuMjV6\n        IiBmaWxsPSIjZWQxODQ0Ii8+CiAgICA8L2c+CiAgIDwvZz4KICA8L2c+CiA8L2c+Cjwvc3ZnPgo=\n...\n"
  },
  {
    "path": "values_overrides/horizon/netpol.yaml",
    "content": "---\nmanifests:\n  network_policy: true\nnetwork_policy:\n  horizon:\n    ingress:\n      - from:\n        - podSelector:\n            matchLabels:\n              application: horizon\n      - from:\n        - podSelector:\n            matchLabels:\n              application: prometheus-openstack-exporter\n      - from:\n        - podSelector:\n            matchLabels:\n              application: ingress\n        ports:\n          - port: 80\n            protocol: TCP\n          - port: 443\n            protocol: TCP\n    egress:\n      - to:\n        - podSelector:\n            matchLabels:\n              application: neutron\n      - to:\n        - podSelector:\n            matchLabels:\n              application: nova\n      - to:\n        - podSelector:\n            matchLabels:\n              application: glance\n      - to:\n        - podSelector:\n            matchLabels:\n              application: cinder\n      - to:\n        - podSelector:\n            matchLabels:\n              application: keystone\n      - to:\n        - podSelector:\n            matchLabels:\n              application: heat\n      - to:\n        - ipBlock:\n            cidr: %%%REPLACE_API_ADDR%%%/32\n        ports:\n          - protocol: TCP\n            port: %%%REPLACE_API_PORT%%%\n...\n"
  },
  {
    "path": "values_overrides/horizon/tls.yaml",
    "content": "---\nnetwork:\n  dashboard:\n    ingress:\n      annotations:\n        nginx.ingress.kubernetes.io/backend-protocol: \"https\"\nconf:\n  software:\n    apache2:\n      a2enmod:\n        - headers\n        - rewrite\n        - ssl\n  horizon:\n    apache: |\n      LogFormat \"%h %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" combined\n      LogFormat \"%{X-Forwarded-For}i %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" proxy\n\n      SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n      CustomLog /dev/stdout combined env=!forwarded\n      CustomLog /dev/stdout proxy env=forwarded\n\n      <VirtualHost *:80>\n        ServerName horizon-int.openstack.svc.cluster.local\n        RewriteEngine On\n        RewriteCond %{HTTPS} off\n        RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI} [R,L]\n      </Virtualhost>\n\n      <VirtualHost *:{{ tuple \"dashboard\" \"internal\" \"web\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}>\n        ServerName horizon-int.openstack.svc.cluster.local\n        WSGIScriptReloading On\n        WSGIDaemonProcess horizon-http processes=5 threads=1 user=horizon group=horizon display-name=%{GROUP} python-path=/var/lib/kolla/venv/lib/python2.7/site-packages\n        WSGIProcessGroup horizon-http\n        WSGIScriptAlias / /var/www/cgi-bin/horizon/django.wsgi\n        WSGIPassAuthorization On\n\n        RewriteEngine On\n        RewriteCond %{REQUEST_METHOD} !^(POST|PUT|GET|DELETE|PATCH)\n        RewriteRule .* - [F]\n\n        <Location \"/\">\n          Require all granted\n        </Location>\n\n        Alias /static /var/www/html/horizon\n        <Location \"/static\">\n          SetHandler static\n        </Location>\n\n        ErrorLogFormat \"%{cu}t %M\"\n        ErrorLog /dev/stdout\n        TransferLog /dev/stdout\n\n        SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n        CustomLog /dev/stdout combined env=!forwarded\n        CustomLog /dev/stdout proxy env=forwarded\n\n        ErrorLog /dev/stdout\n        SSLEngine on\n        SSLCertificateFile      /etc/openstack-dashboard/certs/tls.crt\n        SSLCertificateKeyFile   /etc/openstack-dashboard/certs/tls.key\n        SSLProtocol             all -SSLv3 -TLSv1 -TLSv1.1\n        SSLCipherSuite          ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256\n        SSLHonorCipherOrder     on\n      </VirtualHost>\n    local_settings:\n      config:\n        use_ssl: \"True\"\n        csrf_cookie_secure: \"True\"\n        csrf_cookie_httponly: \"True\"\n        enforce_password_check: \"True\"\n        session_cookie_secure: \"True\"\n        session_cookie_httponly: \"True\"\nendpoints:\n  identity:\n    auth:\n      admin:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n    scheme:\n      default: https\n    port:\n      api:\n        default: 443\n  dashboard:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: horizon-tls-web\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\n    scheme:\n      default: https\n      public: https\n    port:\n      web:\n        default: 443\n        public: 443\n  ingress:\n    port:\n      ingress:\n        default: 443\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/ironic/annotations.yaml",
    "content": "---\nannotations:\n  pod:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    ironic_api:\n      another.tld/foo: \"bar\"\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      ironic:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/ironic/gateway.yaml",
    "content": "# Gateway API overrides for Ironic.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  baremetal:\n    host_fqdn_override:\n      public:\n        host: ironic.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n  service_ingress_api: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: ironic-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.baremetal.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: ironic-api\n              port: 6385\n...\n"
  },
  {
    "path": "values_overrides/ironic/mariadb-operator.yaml",
    "content": "---\nconf:\n  ironic:\n    database:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  ironic_api:\n    - ironic-db-conn\n  ironic_db_sync:\n    - ironic-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: ironic\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: ironic\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: ironic-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: ironic-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"ironic\"\n      table: \"*\"\n      username: ironic\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: ironic-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: ironic\n      passwordSecretKeyRef:\n        name: ironic-db-password\n        key: password\n      database: ironic\n      secretName: ironic-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/ironic/standalone.yaml",
    "content": "---\nconf:\n  ironic:\n    DEFAULT:\n      auth_strategy: noauth\n    conductor:\n      automated_clean: false\n    dhcp:\n      dhcp_provider: none\n\nnetwork:\n  pxe:\n    device: br-simulator\n\nbootstrap:\n  image:\n    enabled: false\n    openstack:\n      enabled: false\n  network:\n    enabled: false\n    openstack:\n      enabled: false\n  object_store:\n    enabled: false\n    openstack:\n      enabled: false\n\ndependencies:\n  static:\n    api:\n      jobs:\n        - ironic-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: oslo_messaging\n    bootstrap:\n      jobs: null\n      services: null\n    conductor:\n      jobs:\n        - ironic-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: oslo_messaging\n\nsecrets:\n  identity:\n    admin: ironic-keystone-admin\n    ironic: ironic-keystone-user\n\nmanifests:\n  job_ks_endpoints: false\n  job_ks_service: false\n  job_ks_user: false\n  job_manage_cleaning_network: false\n  secret_keystone: false\n...\n"
  },
  {
    "path": "values_overrides/keystone/2024.2-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    db_init: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    db_drop: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    keystone_api: \"quay.io/airshipit/keystone:2024.2-ubuntu_jammy\"\n    keystone_bootstrap: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    keystone_credential_rotate: \"quay.io/airshipit/keystone:2024.2-ubuntu_jammy\"\n    keystone_credential_setup: \"quay.io/airshipit/keystone:2024.2-ubuntu_jammy\"\n    keystone_db_sync: \"quay.io/airshipit/keystone:2024.2-ubuntu_jammy\"\n    keystone_domain_manage: \"quay.io/airshipit/keystone:2024.2-ubuntu_jammy\"\n    keystone_fernet_rotate: \"quay.io/airshipit/keystone:2024.2-ubuntu_jammy\"\n    keystone_fernet_setup: \"quay.io/airshipit/keystone:2024.2-ubuntu_jammy\"\n    ks_user: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/keystone/2025.1-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    keystone_api: \"quay.io/airshipit/keystone:2025.1-ubuntu_jammy\"\n    keystone_bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    keystone_credential_rotate: \"quay.io/airshipit/keystone:2025.1-ubuntu_jammy\"\n    keystone_credential_setup: \"quay.io/airshipit/keystone:2025.1-ubuntu_jammy\"\n    keystone_db_sync: \"quay.io/airshipit/keystone:2025.1-ubuntu_jammy\"\n    keystone_domain_manage: \"quay.io/airshipit/keystone:2025.1-ubuntu_jammy\"\n    keystone_fernet_rotate: \"quay.io/airshipit/keystone:2025.1-ubuntu_jammy\"\n    keystone_fernet_setup: \"quay.io/airshipit/keystone:2025.1-ubuntu_jammy\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/keystone/2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    keystone_api: \"quay.io/airshipit/keystone:2025.1-ubuntu_noble\"\n    keystone_bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    keystone_credential_rotate: \"quay.io/airshipit/keystone:2025.1-ubuntu_noble\"\n    keystone_credential_setup: \"quay.io/airshipit/keystone:2025.1-ubuntu_noble\"\n    keystone_db_sync: \"quay.io/airshipit/keystone:2025.1-ubuntu_noble\"\n    keystone_domain_manage: \"quay.io/airshipit/keystone:2025.1-ubuntu_noble\"\n    keystone_fernet_rotate: \"quay.io/airshipit/keystone:2025.1-ubuntu_noble\"\n    keystone_fernet_setup: \"quay.io/airshipit/keystone:2025.1-ubuntu_noble\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n...\n"
  },
  {
    "path": "values_overrides/keystone/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    keystone_api: \"quay.io/airshipit/keystone:2025.2-ubuntu_noble\"\n    keystone_bootstrap: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    keystone_credential_rotate: \"quay.io/airshipit/keystone:2025.2-ubuntu_noble\"\n    keystone_credential_setup: \"quay.io/airshipit/keystone:2025.2-ubuntu_noble\"\n    keystone_db_sync: \"quay.io/airshipit/keystone:2025.2-ubuntu_noble\"\n    keystone_domain_manage: \"quay.io/airshipit/keystone:2025.2-ubuntu_noble\"\n    keystone_fernet_rotate: \"quay.io/airshipit/keystone:2025.2-ubuntu_noble\"\n    keystone_fernet_setup: \"quay.io/airshipit/keystone:2025.2-ubuntu_noble\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n...\n"
  },
  {
    "path": "values_overrides/keystone/annotations.yaml",
    "content": "---\nannotations:\n  pod:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    keystone_api:\n      another.tld/foo: \"bar\"\n  job:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    keystone_credential_setup:\n      another.tld/foo: \"bar\"\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      keystone:\n        custom.tld/key: \"value\"\n    tls:\n      identity_api_public:\n        custom.tld/key: \"value\"\n    ldap:\n      tls:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/keystone/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    keystone:\n      container:\n        keystone_api:\n          appArmorProfile:\n            type: RuntimeDefault\n    credential_setup:\n      container:\n        keystone_credential_setup:\n          appArmorProfile:\n            type: RuntimeDefault\n    fernet_setup:\n      container:\n        keystone_fernet_setup:\n          appArmorProfile:\n            type: RuntimeDefault\n    domain_manage:\n      container:\n        keystone_domain_manage:\n          appArmorProfile:\n            type: RuntimeDefault\n        keystone_domain_manage_init:\n          appArmorProfile:\n            type: RuntimeDefault\n    test:\n      container:\n        keystone_test:\n          appArmorProfile:\n            type: RuntimeDefault\n        keystone_test_ks_user:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/keystone/gateway.yaml",
    "content": "# Gateway API overrides for Keystone.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  identity:\n    host_fqdn_override:\n      public:\n        host: keystone.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n  service_ingress_api: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: keystone-route\n      namespace: openstack\n    spec:\n      parentRefs:\n      - name: gateway-default\n        namespace: envoy-gateway-system\n      hostnames:\n      - \"{{ .Values.endpoints.identity.host_fqdn_override.public.host }}\"\n      rules:\n      - matches:\n        - path:\n            type: PathPrefix\n            value: /\n        backendRefs:\n        - name: keystone-api\n          port: 5000\n...\n"
  },
  {
    "path": "values_overrides/keystone/internal-reverse-proxy.yaml",
    "content": "---\nendpoints:\n  identity:\n    host_fqdn_override:\n      public: example.com\n    scheme:\n      default: https\n      public: https\n      internal: https\n      service: http\n    port:\n      api:\n        default: 443\n        internal: 443\n        service: 5000\n...\n"
  },
  {
    "path": "values_overrides/keystone/ldap.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nconf:\n  keystone:\n    identity:\n      driver: sql\n      default_domain_id: default\n      domain_specific_drivers_enabled: True\n      domain_configurations_from_database: True\n      domain_config_dir: /etc/keystone/domains\n  ks_domains:\n    ldapdomain:\n      identity:\n        driver: ldap\n      ldap:\n        url: \"ldap://ldap.openstack.svc.cluster.local:389\"\n        user: \"cn=admin,dc=cluster,dc=local\"\n        password: password\n        suffix: \"dc=cluster,dc=local\"\n        user_attribute_ignore: \"enabled,email,tenants,default_project_id\"\n        query_scope: sub\n        user_enabled_emulation: True\n        user_enabled_emulation_dn: \"cn=overwatch,ou=Groups,dc=cluster,dc=local\"\n        user_tree_dn: \"ou=People,dc=cluster,dc=local\"\n        user_enabled_mask: 2\n        user_enabled_default: 512\n        user_name_attribute: cn\n        user_id_attribute: sn\n        user_mail_attribute: mail\n        user_pass_attribute: userPassword\n        group_tree_dn: \"ou=Groups,dc=cluster,dc=local\"\n        group_filter: \"\"\n        group_objectclass: posixGroup\n        group_id_attribute: cn\n        group_name_attribute: cn\n        group_desc_attribute: description\n        group_member_attribute: memberUID\n        use_pool: true\n        pool_size: 27\n        pool_retry_max: 3\n        pool_retry_delay: 0.1\n        pool_connection_timeout: 15\n        pool_connection_lifetime: 600\n        use_auth_pool: true\n        auth_pool_size: 100\n        auth_pool_connection_lifetime: 60\n...\n"
  },
  {
    "path": "values_overrides/keystone/loci-2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    keystone_api: \"quay.io/airshipit/keystone:2025.1-ubuntu_noble_loci\"\n    keystone_bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    keystone_credential_rotate: \"quay.io/airshipit/keystone:2025.1-ubuntu_noble_loci\"\n    keystone_credential_setup: \"quay.io/airshipit/keystone:2025.1-ubuntu_noble_loci\"\n    keystone_db_sync: \"quay.io/airshipit/keystone:2025.1-ubuntu_noble_loci\"\n    keystone_domain_manage: \"quay.io/airshipit/keystone:2025.1-ubuntu_noble_loci\"\n    keystone_fernet_rotate: \"quay.io/airshipit/keystone:2025.1-ubuntu_noble_loci\"\n    keystone_fernet_setup: \"quay.io/airshipit/keystone:2025.1-ubuntu_noble_loci\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n...\n"
  },
  {
    "path": "values_overrides/keystone/loci-2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    keystone_api: \"quay.io/airshipit/keystone:2025.2-ubuntu_noble_loci\"\n    keystone_bootstrap: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    keystone_credential_rotate: \"quay.io/airshipit/keystone:2025.2-ubuntu_noble_loci\"\n    keystone_credential_setup: \"quay.io/airshipit/keystone:2025.2-ubuntu_noble_loci\"\n    keystone_db_sync: \"quay.io/airshipit/keystone:2025.2-ubuntu_noble_loci\"\n    keystone_domain_manage: \"quay.io/airshipit/keystone:2025.2-ubuntu_noble_loci\"\n    keystone_fernet_rotate: \"quay.io/airshipit/keystone:2025.2-ubuntu_noble_loci\"\n    keystone_fernet_setup: \"quay.io/airshipit/keystone:2025.2-ubuntu_noble_loci\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n...\n"
  },
  {
    "path": "values_overrides/keystone/mariadb-operator.yaml",
    "content": "---\nconf:\n  keystone:\n    database:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  keystone_api:\n    - keystone-db-conn\n  keystone_db_sync:\n    - keystone-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: keystone\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: keystone\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: keystone-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: keystone-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"keystone\"\n      table: \"*\"\n      username: keystone\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: keystone-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: keystone\n      passwordSecretKeyRef:\n        name: keystone-db-password\n        key: password\n      database: keystone\n      secretName: keystone-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/keystone/netpol.yaml",
    "content": "---\nmanifests:\n  network_policy: true\nnetwork_policy:\n  keystone:\n    ingress:\n      - from:\n        - podSelector:\n            matchLabels:\n              application: ceph\n        - podSelector:\n            matchLabels:\n              application: ingress\n        - podSelector:\n            matchLabels:\n              application: keystone\n        - podSelector:\n            matchLabels:\n              application: heat\n        - podSelector:\n            matchLabels:\n              application: glance\n        - podSelector:\n            matchLabels:\n              application: cinder\n        - podSelector:\n            matchLabels:\n              application: barbican\n        - podSelector:\n            matchLabels:\n              application: ceilometer\n        - podSelector:\n            matchLabels:\n              application: horizon\n        - podSelector:\n            matchLabels:\n              application: ironic\n        - podSelector:\n            matchLabels:\n              application: magnum\n        - podSelector:\n            matchLabels:\n              application: mistral\n        - podSelector:\n            matchLabels:\n              application: nova\n        - podSelector:\n            matchLabels:\n              application: neutron\n        - podSelector:\n            matchLabels:\n              application: placement\n        - podSelector:\n            matchLabels:\n              application: prometheus-openstack-exporter\n        ports:\n        - protocol: TCP\n          port: 5000\n    egress:\n      - to:\n        - ipBlock:\n            cidr: %%%REPLACE_API_ADDR%%%/32\n        ports:\n          - protocol: TCP\n            port: %%%REPLACE_API_PORT%%%\n...\n"
  },
  {
    "path": "values_overrides/keystone/rabbitmq4.yaml",
    "content": "---\n# Upgrading from rabbitmq 3.x to 4.x requires:\n# 1: upgrading to the latest rabbitmq 3.x release and enabling all feature flags\n# 2: removing all rabbitmq 3.x openstack vhost ha policies\n# 3: setting rabbit_ha_queues to false in all openstack component configs\n# 4: wiping the rabbitmq database if rabbit_ha_queues and/or vhost ha policies were used with 3.x\nconf:\n  keystone:\n    oslo_messaging_rabbit:\n      rabbit_ha_queues: false\n\n# Note: rabbit_ha_queues is true by default for all openstack components in openstack-helm\n\n# Steps to wipe rabbitmq database:\n# 1: rabbitmqctl stop_app\n# 2: rabbitmqctl force_reset\n# 3: rabbitmqctl start_app\n# 4: rerun all openstack component rabbit-init jobs to recreate rabbitmq vhosts and users\n\n# Note: rabbitmq classic v2 vs quorum queues\n# With rabbitmq 4.x classic queues have been replaced with classic v2 queues. Classic v2 queues\n# do not support high availability. For HA, quorum queues must be used. Quorum queues are HA by default.\n# Classic v2 queues are the default in Rabbitmq 4.x.\n#\n# To enable quorum queues with rabbitmq 4.x you can use:\n#\n# conf:\n#   keystone:\n#     oslo_messaging_rabbit:\n#       rabbit_ha_queues: false\n#       rabbit_quorum_queues: true\n#       rabbit_transient_quorum_queue: true\n#       use_queue_manager: true\n...\n"
  },
  {
    "path": "values_overrides/keystone/tls-custom.yaml",
    "content": "---\nendpoints:\n  identity:\n    auth:\n      admin:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      test:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n\nsecrets:\n  tls:\n    identity:\n      api:\n        # manually created\n        internal: keystone-tls-api\n\ntls:\n  identity: true\n...\n"
  },
  {
    "path": "values_overrides/keystone/tls.yaml",
    "content": "---\nnetwork:\n  api:\n    ingress:\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: null\n        nginx.ingress.kubernetes.io/backend-protocol: \"https\"\npod:\n  security_context:\n    keystone:\n      pod:\n        runAsUser: 0\n      container:\n        keystone_api:\n          readOnlyRootFilesystem: false\n          allowPrivilegeEscalation: false\nconf:\n  software:\n    apache2:\n      a2enmod:\n        - ssl\n  keystone:\n    oslo_messaging_rabbit:\n      ssl: true\n      ssl_ca_file: /etc/rabbitmq/certs/ca.crt\n      ssl_cert_file: /etc/rabbitmq/certs/tls.crt\n      ssl_key_file: /etc/rabbitmq/certs/tls.key\n  wsgi_keystone: |\n    {{- $portInt := tuple \"identity\" \"service\" \"api\" $ | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n\n    Listen 0.0.0.0:{{ $portInt }}\n\n    LogFormat \"%h %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" combined\n    LogFormat \"%{X-Forwarded-For}i %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" proxy\n\n    SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n    CustomLog /dev/stdout combined env=!forwarded\n    CustomLog /dev/stdout proxy env=forwarded\n    ErrorLogFormat \"%{cu}t %M\"\n    ErrorLog /dev/stdout\n\n    <VirtualHost *:{{ $portInt }}>\n      ServerName {{ printf \"%s.%s.svc.%s\" \"keystone-api\" .Release.Namespace .Values.endpoints.cluster_domain_suffix }}\n      WSGIDaemonProcess keystone-public processes=1 threads=1 user=keystone group=keystone display-name=%{GROUP}\n      WSGIProcessGroup keystone-public\n      WSGIScriptAlias / /var/www/cgi-bin/keystone/wsgi.py\n      WSGIApplicationGroup %{GLOBAL}\n      WSGIPassAuthorization On\n      ErrorLogFormat \"%{cu}t %M\"\n      ErrorLog /dev/stdout\n\n      SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n      CustomLog /dev/stdout combined env=!forwarded\n      CustomLog /dev/stdout proxy env=forwarded\n\n      SSLEngine on\n      SSLCertificateFile      /etc/keystone/certs/tls.crt\n      SSLCertificateKeyFile   /etc/keystone/certs/tls.key\n      SSLProtocol             all -SSLv3 -TLSv1 -TLSv1.1\n      SSLCipherSuite          ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256\n      SSLHonorCipherOrder     on\n    </VirtualHost>\nendpoints:\n  identity:\n    auth:\n      admin:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      test:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: keystone-tls-api\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\n    scheme:\n      default: https\n      public: https\n      service: https\n    port:\n      api:\n        default: 443\n  oslo_messaging:\n    port:\n      https:\n        default: 15680\nmanifests:\n  certificates: true\ntls:\n  identity: true\n  oslo_messaging: true\n  oslo_db: true\n...\n"
  },
  {
    "path": "values_overrides/kibana/2024.2-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    register_kibana_indexes: quay.io/airshipit/heat:2024.2-ubuntu_jammy\n    flush_kibana_metadata: quay.io/airshipit/heat:2024.2-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/kibana/2025.1-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    register_kibana_indexes: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    flush_kibana_metadata: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/kibana/2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    register_kibana_indexes: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    flush_kibana_metadata: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/kibana/2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    register_kibana_indexes: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    flush_kibana_metadata: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/kibana/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    dashboard:\n      container:\n        kibana:\n          appArmorProfile:\n            type: RuntimeDefault\n        apache_proxy:\n          appArmorProfile:\n            type: RuntimeDefault\n    register_kibana_indexes:\n      container:\n        register_kibana_indexes:\n          appArmorProfile:\n            type: RuntimeDefault\n    flush_kibana_metadata:\n      container:\n        flush_kibana_metadata:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/kibana/gateway.yaml",
    "content": "# Gateway API overrides for Kibana.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  kibana:\n    host_fqdn_override:\n      public:\n        host: kibana.openstack-helm.org\n\nmanifests:\n  ingress: false\n  service_ingress: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: kibana-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.kibana.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: kibana-dash\n              port: 5601\n...\n"
  },
  {
    "path": "values_overrides/kibana/loci-2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    register_kibana_indexes: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    flush_kibana_metadata: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/kibana/loci-2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    register_kibana_indexes: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    flush_kibana_metadata: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/kibana/tls.yaml",
    "content": "---\nconf:\n  kibana:\n    elasticsearch:\n      ssl:\n        certificateAuthorities: [\"/etc/elasticsearch/certs/ca.crt\"]\n        verificationMode: certificate\nendpoints:\n  elasticsearch:\n    scheme:\n      default: \"https\"\n    port:\n      http:\n        default: 443\n  kibana:\n    host_fqdn_override:\n      default:\n        tls:\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssue\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/kubernetes-keystone-webhook/2024.2-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    scripted_test: quay.io/airshipit/heat:2024.2-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/kubernetes-keystone-webhook/2025.1-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    scripted_test: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/kubernetes-keystone-webhook/2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    scripted_test: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/kubernetes-keystone-webhook/2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    scripted_test: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/kubernetes-keystone-webhook/gateway.yaml",
    "content": "# Gateway API overrides for Kubernetes Keystone Webhook.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  kubernetes_keystone_webhook:\n    host_fqdn_override:\n      public:\n        host: k8sksauth.openstack-helm.org\n\nmanifests:\n  ingress_webhook: false\n  service_ingress_api: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: k8sksauth-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.kubernetes_keystone_webhook.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: k8sksauth-api\n              port: 8443\n...\n"
  },
  {
    "path": "values_overrides/kubernetes-keystone-webhook/loci-2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    scripted_test: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/kubernetes-keystone-webhook/loci-2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    scripted_test: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/kubernetes-node-problem-detector/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    node_problem_detector:\n      container:\n        node_problem_detector:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/libvirt/2024.2-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    libvirt: quay.io/airshipit/libvirt:2024.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/libvirt/2025.1-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    libvirt: quay.io/airshipit/libvirt:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/libvirt/2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    libvirt: quay.io/airshipit/libvirt:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/libvirt/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    libvirt: quay.io/airshipit/libvirt:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/libvirt/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    libvirt:\n      container:\n        libvirt:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/libvirt/cinder-external-ceph-backend.yaml",
    "content": "# Note: This yaml file serves as an example for overriding the manifest\n# to enable additional externally managed Ceph Cinder backend. When additional\n# externally managed Ceph Cinder backend is provisioned as shown in\n# cinder/values_overrides/external-ceph-backend.yaml of repo openstack-helm,\n# below override is needed to store the secret key of the cinder user in\n# libvirt.\n---\nconf:\n  ceph:\n    cinder:\n      external_ceph:\n        enabled: true\n        user: cinder2\n        secret_uuid: 3f0133e4-8384-4743-9473-fecacc095c74\n        user_secret_name: cinder-volume-external-rbd-keyring\n...\n"
  },
  {
    "path": "values_overrides/libvirt/inovex_exporter.yaml",
    "content": "---\nlibvirt:\n  extraContainers:\n    - name: libvirt-exporter\n      image: ghcr.io/inovex/prometheus-libvirt-exporter:2.3.0\n      imagePullPolicy: IfNotPresent\n      args:\n        - --libvirt.uri=/run/libvirt/libvirt-sock-ro\n      ports:\n        - name: metrics\n          protocol: TCP\n          containerPort: 9177\n      livenessProbe:\n        httpGet:\n          path: /metrics\n          port: 9177\n        initialDelaySeconds: 30\n        periodSeconds: 60\n        timeoutSeconds: 5\n      readinessProbe:\n        httpGet:\n          path: /metrics\n          port: 9177\n        initialDelaySeconds: 15\n        periodSeconds: 60\n        timeoutSeconds: 5\n      securityContext:\n        privileged: true\n      volumeMounts:\n        - name: run\n          mountPath: /run\n          mountPropagation: Bidirectional\n...\n"
  },
  {
    "path": "values_overrides/libvirt/netpol.yaml",
    "content": "---\nmanifests:\n  network_policy: true\n...\n"
  },
  {
    "path": "values_overrides/libvirt/node_overrides.yaml",
    "content": "---\n# We have two nodes labeled with node-nics-type=4nics and node-nics-type=2nics\n# on first node we pick up libvirt bind address from ens3 interface\n# on second node we pick up libvirt bind address from ens0 interface\noverrides:\n  libvirt_libvirt:\n    overrides_default: false\n    labels:\n      node-nics-type::4nics:\n        values:\n          conf:\n            dynamic_options:\n              libvirt:\n                listen_interface: ens3\n      node-nics-type::2nics:\n        values:\n          conf:\n            dynamic_options:\n              libvirt:\n                listen_interface: ens0\n...\n"
  },
  {
    "path": "values_overrides/libvirt/ovn.yaml",
    "content": "---\ndependencies:\n  dynamic:\n    targeted:\n      openvswitch:\n        libvirt:\n          pod: []\n...\n"
  },
  {
    "path": "values_overrides/libvirt/ssl.yaml",
    "content": "---\nconf:\n  libvirt:\n    listen_tcp: \"0\"\n    listen_tls: \"1\"\n    listen_addr: 0.0.0.0\n...\n"
  },
  {
    "path": "values_overrides/libvirt/vexxhost_exporter.yaml",
    "content": "---\nlibvirt:\n  extraContainers:\n    - name: libvirt-exporter\n      image: vexxhost/libvirtd-exporter:latest\n      imagePullPolicy: IfNotPresent\n      args:\n        - \"--libvirt.nova\"\n      ports:\n        - name: metrics\n          protocol: TCP\n          containerPort: 9474\n      livenessProbe:\n        httpGet:\n          path: /metrics\n          port: 9474\n        initialDelaySeconds: 30\n        periodSeconds: 60\n        timeoutSeconds: 5\n      readinessProbe:\n        httpGet:\n          path: /metrics\n          port: 9474\n        initialDelaySeconds: 15\n        periodSeconds: 60\n        timeoutSeconds: 5\n      securityContext:\n        privileged: true\n      volumeMounts:\n        - name: run\n          mountPath: /run\n          mountPropagation: Bidirectional\n...\n"
  },
  {
    "path": "values_overrides/local-storage/local-storage.yaml",
    "content": "---\nconf:\n  persistent_volumes:\n    - name: local-persistent-volume-0\n      reclaim_policy: Delete\n      storage_capacity: \"1Gi\"\n      access_modes: [\"ReadWriteOnce\"]\n      local_path: /srv/local-volume-0\n    - name: local-persistent-volume-1\n      reclaim_policy: Delete\n      storage_capacity: \"1Gi\"\n      access_modes: [\"ReadWriteOnce\"]\n      local_path: /srv/local-volume-1\n    - name: local-persistent-volume-2\n      reclaim_policy: Delete\n      storage_capacity: \"1Gi\"\n      access_modes: [\"ReadWriteOnce\"]\n      local_path: /srv/local-volume-2\n    - name: local-persistent-volume-3\n      reclaim_policy: Delete\n      storage_capacity: \"1Gi\"\n      access_modes: [\"ReadWriteOnce\"]\n      local_path: /srv/local-volume-3\n    - name: local-persistent-volume-4\n      reclaim_policy: Delete\n      storage_capacity: \"1Gi\"\n      access_modes: [\"ReadWriteOnce\"]\n      local_path: /srv/local-volume-4\n    - name: local-persistent-volume-5\n      reclaim_policy: Delete\n      storage_capacity: \"1Gi\"\n      access_modes: [\"ReadWriteOnce\"]\n      local_path: /srv/local-volume-5\nmanifests:\n  storage_class: true\n  persistent_volumes: true\n...\n"
  },
  {
    "path": "values_overrides/magnum/annotations.yaml",
    "content": "---\nannotations:\n  pod:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    magnum_api:\n      another.tld/foo: \"bar\"\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      magnum:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/magnum/gateway.yaml",
    "content": "# Gateway API overrides for Magnum.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  container_infra:\n    host_fqdn_override:\n      public:\n        host: magnum.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n  service_ingress_api: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: magnum-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.container_infra.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: magnum-api\n              port: 9511\n...\n"
  },
  {
    "path": "values_overrides/magnum/mariadb-operator.yaml",
    "content": "---\nconf:\n  magnum:\n    database:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  magnum_api:\n    - magnum-db-conn\n  magnum_db_sync:\n    - magnum-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: magnum\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: magnum\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: magnum-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: magnum-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"magnum\"\n      table: \"*\"\n      username: magnum\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: magnum-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: magnum\n      passwordSecretKeyRef:\n        name: magnum-db-password\n        key: password\n      database: magnum\n      secretName: magnum-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/manila/2024.2-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    manila_db_sync: quay.io/airshipit/manila:2024.2-ubuntu_jammy\n    db_drop: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    ks_user: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    ks_service: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    ks_endpoints: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    manila_api: quay.io/airshipit/manila:2024.2-ubuntu_jammy\n    manila_data: quay.io/airshipit/manila:2024.2-ubuntu_jammy\n    manila_scheduler: quay.io/airshipit/manila:2024.2-ubuntu_jammy\n    manila_share: quay.io/airshipit/manila:2024.2-ubuntu_jammy\nconf:\n  manila_api_uwsgi:\n    uwsgi:\n      # in 2025.2 the wsgi script was removed\n      wsgi-file: /var/lib/openstack/bin/manila-wsgi\n...\n"
  },
  {
    "path": "values_overrides/manila/2025.1-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    manila_db_sync: quay.io/airshipit/manila:2025.1-ubuntu_jammy\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    manila_api: quay.io/airshipit/manila:2025.1-ubuntu_jammy\n    manila_data: quay.io/airshipit/manila:2025.1-ubuntu_jammy\n    manila_scheduler: quay.io/airshipit/manila:2025.1-ubuntu_jammy\n    manila_share: quay.io/airshipit/manila:2025.1-ubuntu_jammy\nconf:\n  manila_api_uwsgi:\n    uwsgi:\n      # in 2025.2 the wsgi script was removed\n      wsgi-file: /var/lib/openstack/bin/manila-wsgi\n...\n"
  },
  {
    "path": "values_overrides/manila/2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    manila_db_sync: quay.io/airshipit/manila:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    manila_api: quay.io/airshipit/manila:2025.1-ubuntu_noble\n    manila_data: quay.io/airshipit/manila:2025.1-ubuntu_noble\n    manila_scheduler: quay.io/airshipit/manila:2025.1-ubuntu_noble\n    manila_share: quay.io/airshipit/manila:2025.1-ubuntu_noble\nconf:\n  manila_api_uwsgi:\n    uwsgi:\n      # in 2025.2 the wsgi script was removed\n      wsgi-file: /var/lib/openstack/bin/manila-wsgi\n...\n"
  },
  {
    "path": "values_overrides/manila/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    manila_db_sync: quay.io/airshipit/manila:2025.2-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    manila_api: quay.io/airshipit/manila:2025.2-ubuntu_noble\n    manila_data: quay.io/airshipit/manila:2025.2-ubuntu_noble\n    manila_scheduler: quay.io/airshipit/manila:2025.2-ubuntu_noble\n    manila_share: quay.io/airshipit/manila:2025.2-ubuntu_noble\nconf:\n  manila_api_uwsgi:\n    uwsgi:\n      # in 2025.2 the wsgi script was removed\n      wsgi-file: /var/lib/openstack/bin/manila-wsgi\n...\n"
  },
  {
    "path": "values_overrides/manila/annotations.yaml",
    "content": "---\nannotations:\n  pod:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    manila_api:\n      another.tld/foo: \"bar\"\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      manila:\n        custom.tld/key: \"value\"\n    tls:\n      share_api_public:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/manila/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    manila:\n      container:\n        manila_api:\n          appArmorProfile:\n            type: RuntimeDefault\n    test:\n      container:\n        manila_test:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/manila/gateway.yaml",
    "content": "# Gateway API overrides for Manila.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  sharev2:\n    host_fqdn_override:\n      public:\n        host: manila.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n  service_ingress_api: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: manila-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.sharev2.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: manila-api\n              port: 8786\n...\n"
  },
  {
    "path": "values_overrides/manila/loci-2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    manila_db_sync: quay.io/airshipit/manila:2025.1-ubuntu_noble_loci\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    manila_api: quay.io/airshipit/manila:2025.1-ubuntu_noble_loci\n    manila_data: quay.io/airshipit/manila:2025.1-ubuntu_noble_loci\n    manila_scheduler: quay.io/airshipit/manila:2025.1-ubuntu_noble_loci\n    manila_share: quay.io/airshipit/manila:2025.1-ubuntu_noble_loci\nconf:\n  manila_api_uwsgi:\n    uwsgi:\n      # in 2025.2 the wsgi script was removed\n      wsgi-file: /var/lib/openstack/bin/manila-wsgi\n...\n"
  },
  {
    "path": "values_overrides/manila/loci-2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    manila_db_sync: quay.io/airshipit/manila:2025.2-ubuntu_noble_loci\n    db_drop: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_service: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    manila_api: quay.io/airshipit/manila:2025.2-ubuntu_noble_loci\n    manila_data: quay.io/airshipit/manila:2025.2-ubuntu_noble_loci\n    manila_scheduler: quay.io/airshipit/manila:2025.2-ubuntu_noble_loci\n    manila_share: quay.io/airshipit/manila:2025.2-ubuntu_noble_loci\nconf:\n  manila_api_uwsgi:\n    uwsgi:\n      # in 2025.2 the wsgi script was removed\n      wsgi-file: /var/lib/openstack/bin/manila-wsgi\n...\n"
  },
  {
    "path": "values_overrides/manila/mariadb-operator.yaml",
    "content": "---\nconf:\n  manila:\n    database:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  manila_api:\n    - manila-db-conn\n  manila_db_sync:\n    - manila-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: manila\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: manila\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: manila-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: manila-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"manila\"\n      table: \"*\"\n      username: manila\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: manila-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: manila\n      passwordSecretKeyRef:\n        name: manila-db-password\n        key: password\n      database: manila\n      secretName: manila-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/manila/rabbitmq4.yaml",
    "content": "---\n# Upgrading from rabbitmq 3.x to 4.x requires:\n# 1: upgrading to the latest rabbitmq 3.x release and enabling all feature flags\n# 2: removing all rabbitmq 3.x openstack vhost ha policies\n# 3: setting rabbit_ha_queues to false in all openstack component configs\n# 4: wiping the rabbitmq database if rabbit_ha_queues and/or vhost ha policies were used with 3.x\nconf:\n  manila:\n    oslo_messaging_rabbit:\n      rabbit_ha_queues: false\n\n# Note: rabbit_ha_queues is true by default for all openstack components in openstack-helm\n\n# Steps to wipe rabbitmq database:\n# 1: rabbitmqctl stop_app\n# 2: rabbitmqctl force_reset\n# 3: rabbitmqctl start_app\n# 4: rerun all openstack component rabbit-init jobs to recreate rabbitmq vhosts and users\n\n# Note: rabbitmq classic v2 vs quorum queues\n# With rabbitmq 4.x classic queues have been replaced with classic v2 queues. Classic v2 queues\n# do not support high availability. For HA, quorum queues must be used. Quorum queues are HA by default.\n# Classic v2 queues are the default in Rabbitmq 4.x.\n#\n# To enable quorum queues with rabbitmq 4.x you can use:\n#\n# conf:\n#   manila:\n#     oslo_messaging_rabbit:\n#       rabbit_ha_queues: false\n#       rabbit_quorum_queues: true\n#       rabbit_transient_quorum_queue: true\n#       use_queue_manager: true\n...\n"
  },
  {
    "path": "values_overrides/manila/tls-offloading.yaml",
    "content": "---\nendpoints:\n  identity:\n    auth:\n      admin:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      manila:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n\ntls:\n  identity: true\n...\n"
  },
  {
    "path": "values_overrides/manila/tls.yaml",
    "content": "---\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/mariadb/2024.2-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/heat:2024.2-ubuntu_jammy\n    ks_user: quay.io/airshipit/heat:2024.2-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/mariadb/2025.1-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/mariadb/2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/mariadb/2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/mariadb/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    server:\n      container:\n        mariadb:\n          appArmorProfile:\n            type: RuntimeDefault\n        exporter:\n          appArmorProfile:\n            type: RuntimeDefault\n        perms:\n          appArmorProfile:\n            type: RuntimeDefault\n    mariadb_backup:\n      container:\n        mariadb_backup:\n          appArmorProfile:\n            type: RuntimeDefault\n        verify_perms:\n          appArmorProfile:\n            type: RuntimeDefault\n        backup_perms:\n          appArmorProfile:\n            type: RuntimeDefault\n    tests:\n      container:\n        test:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/mariadb/backups.yaml",
    "content": "---\nconf:\n  backup:\n    enabled: true\n    remote_backup:\n      enabled: true\nvolume:\n  backup:\n    enabled: true\nmanifests:\n  pvc_backup: true\n  job_ks_user: false\n  cron_job_mariadb_backup: true\n  secret_backup_restore: true\nendpoints:\n  identity:\n    auth:\n      mariadb:\n        auth_url: null\n        role: admin\n        region_name: RegionOne\n        username: mariadb-backup-user\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      mariadb_failover:\n        auth_url: null\n        role: admin\n        region_name: RegionOne\n        username: mariadb-backup-user-failover\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n...\n"
  },
  {
    "path": "values_overrides/mariadb/local-storage.yaml",
    "content": "---\npod:\n  replicas:\n    server: 1\nvolume:\n  size: 1Gi\n  class_name: local-storage\nmonitoring:\n  prometheus:\n    enabled: false\n...\n"
  },
  {
    "path": "values_overrides/mariadb/loci-2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/mariadb/loci-2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/mariadb/netpol.yaml",
    "content": "---\nmanifests:\n  network_policy: true\nnetwork_policy:\n  mariadb:\n    egress:\n      - to:\n        - ipBlock:\n            cidr: %%%REPLACE_API_ADDR%%%/32\n        ports:\n          - protocol: TCP\n            port: %%%REPLACE_API_PORT%%%\n    ingress:\n      - from:\n        - podSelector:\n            matchLabels:\n              application: keystone\n        - podSelector:\n            matchLabels:\n              application: heat\n        - podSelector:\n            matchLabels:\n              application: glance\n        - podSelector:\n            matchLabels:\n              application: cinder\n        - podSelector:\n            matchLabels:\n              application: aodh\n        - podSelector:\n            matchLabels:\n              application: barbican\n        - podSelector:\n            matchLabels:\n              application: ceilometer\n        - podSelector:\n            matchLabels:\n              application: designate\n        - podSelector:\n            matchLabels:\n              application: horizon\n        - podSelector:\n            matchLabels:\n              application: ironic\n        - podSelector:\n            matchLabels:\n              application: magnum\n        - podSelector:\n            matchLabels:\n              application: mistral\n        - podSelector:\n            matchLabels:\n              application: nova\n        - podSelector:\n            matchLabels:\n              application: neutron\n        - podSelector:\n            matchLabels:\n              application: rally\n        - podSelector:\n            matchLabels:\n              application: senlin\n        - podSelector:\n            matchLabels:\n              application: placement\n        - podSelector:\n            matchLabels:\n              application: prometheus-mysql-exporter\n        - podSelector:\n            matchLabels:\n              application: mariadb\n        - podSelector:\n            matchLabels:\n              application: mariadb-backup\n        ports:\n        - protocol: TCP\n          port: 3306\n        - protocol: TCP\n          port: 4567\n        - protocol: TCP\n          port: 80\n        - protocol: TCP\n          port: 8080\n...\n"
  },
  {
    "path": "values_overrides/mariadb/remote_backups.yaml",
    "content": "---\nconf:\n  backup:\n    enabled: true\n    remote_backup:\n      enabled: true\nvolume:\n  backup:\n    enabled: true\nendpoints:\n  identity:\n    auth:\n      mariadb:\n        # Auth URL of null indicates local authentication\n        # HTK will form the URL unless specified here\n        auth_url: https://rgw-1.test.local\n        region_name: RegionOne\n        username: mariadb-backup-user\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      mariadb_failover:\n        # # Auth URL of null indicates local authentication\n        # # HTK will form the URL unless specified here\n        auth_url: https://rgw-2.test.local\n        region_name: RegionOne\n        username: mariadb-backup-user\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\nmanifests:\n  pvc_backup: true\n  job_ks_user: true\n  cron_job_mariadb_backup: true\n  secret_backup_restore: true\n...\n"
  },
  {
    "path": "values_overrides/mariadb/staggered-backups.yaml",
    "content": "---\nconf:\n  backup:\n    enabled: true\n    remote_backup:\n      enabled: false\npod:\n  labels:\n    backup:\n      staggered_backups: enabled\n  affinity:\n    mariadb_backup:\n      podAntiAffinity:\n        requiredDuringSchedulingIgnoredDuringExecution:\n          - labelSelector:\n              matchExpressions:\n                - key: status.phase\n                  operator: NotIn\n                  values:\n                    - Running\n                - key: staggered-backups\n                  operator: In\n                  values:\n                    - enabled\n            namespaces:\n              - openstack\n              - osh-infra\n              - ucp\n            topologyKey: kubernetes.io/os\nvolume:\n  backup:\n    enabled: true\nmanifests:\n  pvc_backup: true\n  job_ks_user: false\n  cron_job_mariadb_backup: true\n  secret_backup_restore: true\n...\n"
  },
  {
    "path": "values_overrides/mariadb/tls.yaml",
    "content": "---\npod:\n  security_context:\n    server:\n      container:\n        perms:\n          readOnlyRootFilesystem: false\n        mariadb:\n          runAsUser: 0\n          allowPrivilegeEscalation: true\n          readOnlyRootFilesystem: false\nendpoints:\n  oslo_db:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: mariadb-tls-direct\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/mariadb/wait-for-cluster.yaml",
    "content": "---\nmanifests:\n  job_cluster_wait: true\n...\n"
  },
  {
    "path": "values_overrides/mariadb-backup/2024.2-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/heat:2024.2-ubuntu_jammy\n    ks_user: quay.io/airshipit/heat:2024.2-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/mariadb-backup/2025.1-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/mariadb-backup/2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/mariadb-backup/2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/mariadb-backup/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    mariadb_backup:\n      container:\n        mariadb_backup:\n          appArmorProfile:\n            type: RuntimeDefault\n        verify_perms:\n          appArmorProfile:\n            type: RuntimeDefault\n        backup_perms:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/mariadb-backup/backups.yaml",
    "content": "---\nconf:\n  backup:\n    enabled: true\n    remote_backup:\n      enabled: false\nvolume:\n  backup:\n    enabled: true\nmanifests:\n  pvc_backup: true\n  job_ks_user: false\n  cron_job_mariadb_backup: true\n  secret_backup_restore: true\n...\n"
  },
  {
    "path": "values_overrides/mariadb-backup/loci-2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/mariadb-backup/loci-2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/mariadb-backup/staggered-backups.yaml",
    "content": "---\nconf:\n  backup:\n    enabled: true\n    remote_backup:\n      enabled: false\npod:\n  labels:\n    backup:\n      staggered_backups: enabled\n  affinity:\n    mariadb_backup:\n      podAntiAffinity:\n        requiredDuringSchedulingIgnoredDuringExecution:\n          - labelSelector:\n              matchExpressions:\n                - key: status.phase\n                  operator: NotIn\n                  values:\n                    - Running\n                - key: staggered-backups\n                  operator: In\n                  values:\n                    - enabled\n            namespaces:\n              - openstack\n              - osh-infra\n              - ucp\n            topologyKey: kubernetes.io/os\nvolume:\n  backup:\n    enabled: true\nmanifests:\n  pvc_backup: true\n  job_ks_user: false\n  cron_job_mariadb_backup: true\n  secret_backup_restore: true\n...\n"
  },
  {
    "path": "values_overrides/mariadb-backup/tls.yaml",
    "content": "---\nendpoints:\n  oslo_db:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: mariadb-tls-direct\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/mariadb-cluster/2024.2-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/heat:2024.2-ubuntu_jammy\n    ks_user: quay.io/airshipit/heat:2024.2-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/mariadb-cluster/2025.1-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/mariadb-cluster/2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/mariadb-cluster/2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/mariadb-cluster/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    server:\n      container:\n        mariadb:\n          appArmorProfile:\n            type: RuntimeDefault\n        agent:\n          appArmorProfile:\n            type: RuntimeDefault\n        perms:\n          appArmorProfile:\n            type: RuntimeDefault\n    tests:\n      container:\n        test:\n          appArmorProfile:\n            type: RuntimeDefault\n    mariadb_cluster_refresh_statefulset:\n      container:\n        main:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/mariadb-cluster/downscaled.yaml",
    "content": "---\nconf:\n  galera:\n    enabled: false\npod:\n  replicas:\n    server: 1\n...\n"
  },
  {
    "path": "values_overrides/mariadb-cluster/local-storage.yaml",
    "content": "---\npod:\n  replicas:\n    server: 1\nvolume:\n  size: 1Gi\n  class_name: local-storage\nmonitoring:\n  prometheus:\n    enabled: false\n...\n"
  },
  {
    "path": "values_overrides/mariadb-cluster/loci-2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/mariadb-cluster/loci-2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/mariadb-cluster/netpol.yaml",
    "content": "---\nmanifests:\n  network_policy: true\nnetwork_policy:\n  mariadb:\n    egress:\n      - to:\n        - ipBlock:\n            cidr: %%%REPLACE_API_ADDR%%%/32\n        ports:\n          - protocol: TCP\n            port: %%%REPLACE_API_PORT%%%\n    ingress:\n      - from:\n        - podSelector:\n            matchLabels:\n              application: keystone\n        - podSelector:\n            matchLabels:\n              application: heat\n        - podSelector:\n            matchLabels:\n              application: glance\n        - podSelector:\n            matchLabels:\n              application: cinder\n        - podSelector:\n            matchLabels:\n              application: aodh\n        - podSelector:\n            matchLabels:\n              application: barbican\n        - podSelector:\n            matchLabels:\n              application: ceilometer\n        - podSelector:\n            matchLabels:\n              application: designate\n        - podSelector:\n            matchLabels:\n              application: horizon\n        - podSelector:\n            matchLabels:\n              application: ironic\n        - podSelector:\n            matchLabels:\n              application: magnum\n        - podSelector:\n            matchLabels:\n              application: mistral\n        - podSelector:\n            matchLabels:\n              application: nova\n        - podSelector:\n            matchLabels:\n              application: neutron\n        - podSelector:\n            matchLabels:\n              application: rally\n        - podSelector:\n            matchLabels:\n              application: senlin\n        - podSelector:\n            matchLabels:\n              application: placement\n        - podSelector:\n            matchLabels:\n              application: prometheus-mysql-exporter\n        - podSelector:\n            matchLabels:\n              application: mariadb\n        - podSelector:\n            matchLabels:\n              application: mariadb-backup\n        ports:\n        - protocol: TCP\n          port: 3306\n        - protocol: TCP\n          port: 4567\n        - protocol: TCP\n          port: 80\n        - protocol: TCP\n          port: 8080\n...\n"
  },
  {
    "path": "values_overrides/mariadb-cluster/prometheus.yaml",
    "content": "---\nmonitoring:\n  prometheus:\n    enabled: true\nmanifests:\n  monitoring:\n    prometheus:\n      configmap_bin: true\n      deployment_exporter: true\n      job_user_create: true\n      secret_etc: true\n      service_exporter: true\n      network_policy_exporter: true\n...\n"
  },
  {
    "path": "values_overrides/mariadb-cluster/tls.yaml",
    "content": "---\nendpoints:\n  oslo_db:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: mariadb-tls-direct\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/mariadb-cluster/upscaled.yaml",
    "content": "---\nconf:\n  galera:\n    enabled: true\npod:\n  replicas:\n    server: 3\n...\n"
  },
  {
    "path": "values_overrides/masakari/annotations.yaml",
    "content": "---\nannotations:\n  pod:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    masakari_api:\n      another.tld/foo: \"bar\"\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      masakari:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/masakari/mariadb-operator.yaml",
    "content": "---\nconf:\n  masakari:\n    database:\n      connection: null\n    taskflow:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  masakari_api:\n    - masakari-db-conn\n  masakari_db_sync:\n    - masakari-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: masakari\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: masakari\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: masakari-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: masakari-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"masakari\"\n      table: \"*\"\n      username: masakari\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: masakari-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: masakari\n      passwordSecretKeyRef:\n        name: masakari-db-password\n        key: password\n      database: masakari\n      secretName: masakari-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/memcached/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    server:\n      container:\n        memcached:\n          appArmorProfile:\n            type: RuntimeDefault\n        memcached_exporter:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/memcached/exporter.yaml",
    "content": "---\nmemcached:\n  extraServicePorts:\n    - name: metrics\n      port: 9150\n      targetPort: 9150\n      protocol: TCP\n  extraContainers:\n    - name: memcached-exporter\n      image: docker.io/prom/memcached-exporter:v0.15.5\n      imagePullPolicy: IfNotPresent\n      command:\n        - /bin/memcached_exporter\n      args:\n        - --memcached.address=127.0.0.1:11211\n      ports:\n        - name: metrics\n          containerPort: 9150\n      livenessProbe:\n        httpGet:\n          path: /metrics\n          port: 9150\n        initialDelaySeconds: 15\n        periodSeconds: 60\n        timeoutSeconds: 10\n      readinessProbe:\n        httpGet:\n          path: /metrics\n          port: 9150\n        initialDelaySeconds: 5\n        periodSeconds: 60\n        timeoutSeconds: 10\n      securityContext:\n        allowPrivilegeEscalation: false\n        readOnlyRootFilesystem: true\n        capabilities:\n          drop:\n            - ALL\n      resources:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"500m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n...\n"
  },
  {
    "path": "values_overrides/memcached/netpol.yaml",
    "content": "---\nmanifests:\n  network_policy: true\nnetwork_policy:\n  memcached:\n    ingress:\n      - from:\n        - podSelector:\n            matchLabels:\n              application: ingress\n        - podSelector:\n            matchLabels:\n              application: keystone\n        - podSelector:\n            matchLabels:\n              application: heat\n        - podSelector:\n            matchLabels:\n              application: glance\n        - podSelector:\n            matchLabels:\n              application: cinder\n        - podSelector:\n            matchLabels:\n              application: barbican\n        - podSelector:\n            matchLabels:\n              application: ceilometer\n        - podSelector:\n            matchLabels:\n              application: horizon\n        - podSelector:\n            matchLabels:\n              application: ironic\n        - podSelector:\n            matchLabels:\n              application: magnum\n        - podSelector:\n            matchLabels:\n              application: mistral\n        - podSelector:\n            matchLabels:\n              application: nova\n        - podSelector:\n            matchLabels:\n              application: neutron\n        - podSelector:\n            matchLabels:\n              application: senlin\n        - podSelector:\n            matchLabels:\n              application: placement\n        - podSelector:\n            matchLabels:\n              application: prometheus_memcached_exporter\n        - podSelector:\n            matchLabels:\n              application: aodh\n        - podSelector:\n            matchLabels:\n              application: rally\n        - podSelector:\n            matchLabels:\n              application: memcached\n        ports:\n        - port: 11211\n          protocol: TCP\n        - port: 9150\n          protocol: TCP\n    egress:\n      - to:\n        - ipBlock:\n            cidr: %%%REPLACE_API_ADDR%%%/32\n        ports:\n          - protocol: TCP\n            port: %%%REPLACE_API_PORT%%%\n...\n"
  },
  {
    "path": "values_overrides/mistral/2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    mistral_db_sync: quay.io/airshipit/mistral:2025.1-ubuntu_noble\n    mistral_api: quay.io/airshipit/mistral:2025.1-ubuntu_noble\n    mistral_engine: quay.io/airshipit/mistral:2025.1-ubuntu_noble\n    mistral_event_engine: quay.io/airshipit/mistral:2025.1-ubuntu_noble\n    mistral_executor: quay.io/airshipit/mistral:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/mistral/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    mistral_db_sync: quay.io/airshipit/mistral:2025.2-ubuntu_noble\n    mistral_api: quay.io/airshipit/mistral:2025.2-ubuntu_noble\n    mistral_engine: quay.io/airshipit/mistral:2025.2-ubuntu_noble\n    mistral_event_engine: quay.io/airshipit/mistral:2025.2-ubuntu_noble\n    mistral_executor: quay.io/airshipit/mistral:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/mistral/annotations.yaml",
    "content": "---\nannotations:\n  pod:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    mistral_api:\n      another.tld/foo: \"bar\"\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      mistral:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/mistral/gateway.yaml",
    "content": "# Gateway API overrides for Mistral.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  workflowv2:\n    host_fqdn_override:\n      public:\n        host: mistral.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n  service_ingress_api: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: mistral-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.workflowv2.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: mistral-api\n              port: 8989\n...\n"
  },
  {
    "path": "values_overrides/mistral/mariadb-operator.yaml",
    "content": "---\nconf:\n  mistral:\n    database:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  mistral_api:\n    - mistral-db-conn\n  mistral_db_sync:\n    - mistral-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: mistral\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: mistral\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: mistral-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: mistral-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"mistral\"\n      table: \"*\"\n      username: mistral\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: mistral-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: mistral\n      passwordSecretKeyRef:\n        name: mistral-db-password\n        key: password\n      database: mistral\n      secretName: mistral-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/nagios/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    monitoring:\n      container:\n        nagios:\n          appArmorProfile:\n            type: RuntimeDefault\n        define_nagios_hosts:\n          appArmorProfile:\n            type: RuntimeDefault\n        apache_proxy:\n          appArmorProfile:\n            type: RuntimeDefault\n        helm_tests:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/nagios/elasticsearch-objects.yaml",
    "content": "---\nconf:\n  nagios:\n    objects:\n      fluent:\n        template: |\n          define service {\n            check_command check_prom_alert!fluentd_not_running!CRITICAL- fluentd is not running on {instance}!OK- Flunetd is working on all nodes\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Fluentd_status\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!prom_exporter_fluentd_unavailable!CRITICAL- Fluentd exporter is not collecting metrics for alerting!OK- Fluentd exporter metrics are available.\n            hostgroup_name prometheus-hosts\n            service_description Prometheus-exporter_Fluentd\n            use generic-service\n          }\n      elasticsearch:\n        template: |\n          define command {\n            command_line $USER1$/query_elasticsearch.py $USER9$ '$ARG1$' '$ARG2$' '$ARG3$' '$ARG4$' '$ARG5$' --simple_query '$ARG6$' --simple_query_fields '$ARG7$' --match '$ARG8$' --range '$ARG9$'\n            command_name check_es_query\n          }\n\n          define command {\n            command_line $USER1$/query_elasticsearch.py $USER9$ '$ARG1$' '$ARG2$' '$ARG3$' '$ARG4$' '$ARG5$' --simple_query '$ARG6$' --simple_query_fields '$ARG7$' --query_file '/opt/nagios/etc/objects/query_es_clauses.json' --query_clause '$ARG8$' --match '$ARG9$' --range '$ARG10$'\n            command_name check_es_query_w_file\n          }\n\n          define service {\n            check_command check_prom_alert!prom_exporter_elasticsearch_unavailable!CRITICAL- Elasticsearch exporter is not collecting metrics for alerting!OK- Elasticsearch exporter metrics are available.\n            hostgroup_name prometheus-hosts\n            service_description Prometheus-exporter_Elasticsearch\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!es_high_process_open_files_count!CRITICAL- Elasticsearch {host} has high process open file count!OK- Elasticsearch process open file count is normal.\n            hostgroup_name prometheus-hosts\n            service_description ES_high-process-open-file-count\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!es_high_process_cpu_percent!CRITICAL- Elasticsearch {instance} has high process CPU percent!OK- Elasticsearch process cpu usage is normal.\n            hostgroup_name prometheus-hosts\n            service_description ES_high-process-cpu-percent\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!es_fs_usage_high!CRITICAL- Elasticsearch {instance} has high filesystem usage!OK- Elasticsearch filesystem usage is normal.\n            hostgroup_name prometheus-hosts\n            service_description ES_high-filesystem-usage\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!es_unassigned_shards!CRITICAL- Elasticsearch has unassinged shards!OK- Elasticsearch has no unassigned shards.\n            hostgroup_name prometheus-hosts\n            service_description ES_unassigned-shards\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!es_cluster_health_timed_out!CRITICAL- Elasticsearch Cluster health status call timedout!OK- Elasticsearch cluster health is retrievable.\n            hostgroup_name prometheus-hosts\n            service_description ES_cluster-health-timedout\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!es_cluster_health_status_alert!CRITICAL- Elasticsearch cluster health status is not green. One or more shards or replicas are unallocated!OK- Elasticsearch cluster health is green.\n            hostgroup_name prometheus-hosts\n            service_description ES_cluster-health-status\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!es_cluster_health_too_few_nodes_running!CRITICAL- Elasticsearch Cluster has < 3 nodes running!OK- Elasticsearch cluster has 3 or more nodes running.\n            hostgroup_name prometheus-hosts\n            service_description ES_cluster-running-node-count\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!es_cluster_health_too_few_data_nodes_running!CRITICAL- Elasticsearch Cluster has < 3 data nodes running!OK- Elasticsearch cluster has 3 or more data nodes running.\n            hostgroup_name prometheus-hosts\n            service_description ES_cluster-running-data-node-count\n            use generic-service\n          }\n...\n"
  },
  {
    "path": "values_overrides/nagios/gateway.yaml",
    "content": "# Gateway API overrides for Nagios.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  nagios:\n    host_fqdn_override:\n      public:\n        host: nagios.openstack-helm.org\n\nmanifests:\n  ingress: false\n  service_ingress: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: nagios-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.nagios.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: nagios-metrics\n              port: 8000\n...\n"
  },
  {
    "path": "values_overrides/nagios/openstack-objects.yaml",
    "content": "---\nconf:\n  nagios:\n    objects:\n      mariadb:\n        template: |\n          define service {\n            check_command check_prom_alert!prom_exporter_mariadb_unavailable!CRITICAL- MariaDB exporter is not collecting metrics for alerting!OK- MariaDB exporter metrics are available.\n            hostgroup_name prometheus-hosts\n            service_description Prometheus-exporter_MariaDB\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!mariadb_table_lock_wait_high!CRITICAL- Mariadb has high number of table lock waits!OK- No issues found with table lock waits.\n            hostgroup_name prometheus-hosts\n            service_description Mariadb_table-lock-waits-high\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!mariadb_node_not_ready!CRITICAL- Mariadb {instance} is not ready!OK- All galera cluster nodes are ready.\n            hostgroup_name prometheus-hosts\n            service_description Mariadb_node-ready\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!mariadb_galera_node_out_of_sync!CRITICAL- Mariadb {instance} is out of sync!OK- All galera cluster nodes are in sync\n            hostgroup_name prometheus-hosts\n            service_description Mariadb_node-synchronized\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!mariadb_innodb_replication_fallen_behind!CRITICAL- Innodb replication has fallen behind and not recovering!OK- innodb replication lag is nominal.\n            hostgroup_name prometheus-hosts\n            service_description Mariadb_innodb-replication-lag\n            use generic-service\n          }\n      rabbitmq:\n        template: |\n          define service {\n            check_command check_prom_alert!rabbitmq_network_pratitions_detected!CRITICAL- Rabbitmq instance {instance} has network partitions!OK- no network partitions detected in rabbitmq\n            hostgroup_name prometheus-hosts\n            service_description Rabbitmq_network-partitions-exist\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!rabbitmq_down!CRITICAL- Rabbitmq instance {instance} is down!OK- rabbitmq is available\n            hostgroup_name prometheus-hosts\n            service_description Rabbitmq_up\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!rabbitmq_file_descriptor_usage_high!CRITICAL- Rabbitmq instance {instance} has file desciptor usage more than 80 percent!OK- rabbitmq file descriptor usage is normal\n            hostgroup_name prometheus-hosts\n            service_description Rabbitmq_file-descriptor-usage\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!rabbitmq_node_disk_free_alarm!CRITICAL- Rabbitmq instance {instance} has a disk usage alarm!OK- rabbitmq node disk has no alarms\n            hostgroup_name prometheus-hosts\n            service_description Rabbitmq_node-disk-alarm\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!rabbitmq_node_memory_alarm!CRITICAL- Rabbitmq instance {instance} has a memory alarm!OK- rabbitmq node memory has no alarms\n            hostgroup_name prometheus-hosts\n            service_description Rabbitmq_node-memory-alarm\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!rabbitmq_less_than_3_nodes!CRITICAL- Rabbitmq has less than 3 nodes to serve!OK- rabbitmq has atleast 3 nodes serving\n            hostgroup_name prometheus-hosts\n            service_description Rabbitmq_high-availability\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!rabbitmq_queue_messages_returned_high!CRITICAL- Rabbitmq has high percent of messages being returned!OK- rabbitmq messages are consumed and low or no returns exist.\n            hostgroup_name prometheus-hosts\n            service_description Rabbitmq_message-return-percent\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!rabbitmq_consumers_low_utilization!CRITICAL- Rabbitmq consumer message consumption rate is slow!OK- rabbitmq message consumption speed is normal\n            hostgroup_name prometheus-hosts\n            service_description Rabbitmq_consumer-utilization\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!rabbitmq_high_message_load!CRITICAL- Rabbitmq unacknowledged message count is high!OK- rabbitmq unacknowledged message count is high\n            hostgroup_name prometheus-hosts\n            service_description Rabbitmq_rabbitmq-queue-health\n            use generic-service\n          }\n      openstack:\n        template: |\n          define service {\n            check_command check_prom_alert!os_glance_api_availability!CRITICAL- Glance API at {url} is not available!OK- Glance API is available\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description API_glance\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!os_nova_api_availability!CRITICAL- Nova API at {url} is not available!OK- Nova API is available\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description API_nova\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!os_keystone_api_availability!CRITICAL- Keystone API at {url} is not available!OK- Keystone API is available\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description API_keystone\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!os_neutron_api_availability!CRITICAL- Neutron API at {url} is not available!OK- Neutron API is available\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description API_neutron\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!os_neutron_metadata_agent_availability!CRITICAL- Some Neutron metadata agents are not available!OK- All the neutron metadata agents are up\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Service_neutron-metadata-agent\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!os_neutron_openvswitch_agent_availability!CRITICAL- Some Neutron openvswitch agents are not available!OK- All the neutron openvswitch agents are up\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Service_neutron-openvswitch-agent\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!os_neutron_dhcp_agent_availability!CRITICAL- Some Neutron dhcp agents are not available!OK- All the neutron dhcp agents are up\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Service_neutron-dhcp-agent\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!os_neutron_l3_agent_availability!CRITICAL- Some Neutron dhcp agents are not available!OK- All the neutron l3 agents are up\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Service_neutron-l3-agent\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!os_swift_api_availability!CRITICAL- Swift API at {url} is not available!OK- Swift API is available\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description API_swift\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!os_cinder_api_availability!CRITICAL- Cinder API at {url} is not available!OK- Cinder API is available\n            hostgroup_name prometheus-hosts\n            service_description API_cinder\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!os_heat_api_availability!CRITICAL- Heat API at {url} is not available!OK- Heat API is available\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description API_heat\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!os_cinder_api_availability!CRITICAL- Cinder API at {url} is not available!OK- Cinder API is available\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description API_cinder\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!os_cinder_scheduler_availability!CRITICAL- Cinder scheduler is not available!OK- Cinder scheduler is available\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Service_cinder-scheduler\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!os_nova_compute_down!CRITICAL- nova-compute services are down on certain hosts!OK- nova-compute services are up on all hosts\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Service_nova-compute\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!os_nova_conductor_down!CRITICAL- nova-conductor services are down on certain hosts!OK- nova-conductor services are up on all hosts\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Service_nova-conductor\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!os_nova_consoleauth_down!CRITICAL- nova-consoleauth services are down on certain hosts!OK- nova-consoleauth services are up on all hosts\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Service_nova-consoleauth\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!openstack_nova_scheduler_down!CRITICAL- nova-scheduler services are down on certain hosts!OK- nova-scheduler services are up on all hosts\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description Service_nova-scheduler\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!os_vm_vcpu_usage_high!CRITICAL- vcpu usage for openstack VMs is more than 80 percent of available!OK- Openstack VMs vcpu usage is less than 80 percent of available.\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description OS-Total-Quota_VCPU-usage\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!os_vm_ram_usage_high!CRITICAL- RAM usage for openstack VMs is more than 80 percent of available!OK- Openstack VMs RAM usage is less than 80 percent of available.\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description OS-Total-Quota_RAM-usage\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!os_vm_disk_usage_high!CRITICAL- Disk usage for openstack VMs is more than 80 percent of available!OK- Openstack VMs Disk usage is less than 80 percent of available.\n            check_interval 60\n            hostgroup_name prometheus-hosts\n            service_description OS-Total-Quota_Disk-usage\n            use notifying_service\n          }\n\n          define service {\n            check_command check_prom_alert!prom_exporter_openstack_unavailable!CRITICAL- Openstack exporter is not collecting metrics for alerting!OK- Openstack exporter metrics are available.\n            hostgroup_name prometheus-hosts\n            service_description Prometheus-exporter_Openstack\n            use generic-service\n          }\n...\n"
  },
  {
    "path": "values_overrides/nagios/postgresql-objects.yaml",
    "content": "---\nconf:\n  nagios:\n    objects:\n      postgresql:\n        template: |\n          define service {\n            check_command check_prom_alert!prom_exporter_postgresql_unavailable!CRITICAL- Postgresql exporter is not collecting metrics for alerting!OK- Postgresql exporter metrics are available.\n            hostgroup_name prometheus-hosts\n            service_description Prometheus-exporter_Postgresql\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!pg_replication_fallen_behind!CRITICAL- Postgres Replication lag is over 2 minutes!OK- postgresql replication lag is nominal.\n            hostgroup_name prometheus-hosts\n            service_description Postgresql_replication-lag\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!pg_connections_too_high!CRITICAL- Postgres has more than 95% of available connections in use.!OK- postgresql open connections are within bounds.\n            hostgroup_name prometheus-hosts\n            service_description Postgresql_connections\n            use generic-service\n          }\n\n          define service {\n            check_command check_prom_alert!pg_deadlocks_detected!CRITICAL- Postgres server is experiencing deadlocks!OK- postgresql is not showing any deadlocks.\n            hostgroup_name prometheus-hosts\n            service_description Postgresql_deadlocks\n            use generic-service\n          }\n...\n"
  },
  {
    "path": "values_overrides/nagios/tls.yaml",
    "content": "---\nendpoints:\n  monitoring:\n    scheme:\n      default: \"https\"\n    port:\n      http:\n        default: 443\n  elasticsearch:\n    scheme:\n      default: \"https\"\n    port:\n      http:\n        default: 443\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/neutron/2024.2-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    db_init: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    db_drop: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    ks_user: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    ks_service: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    neutron_db_sync: \"quay.io/airshipit/neutron:2024.2-ubuntu_jammy\"\n    neutron_dhcp: \"quay.io/airshipit/neutron:2024.2-ubuntu_jammy\"\n    neutron_l3: \"quay.io/airshipit/neutron:2024.2-ubuntu_jammy\"\n    neutron_l2gw: \"quay.io/airshipit/neutron:2024.2-ubuntu_jammy\"\n    neutron_linuxbridge_agent: \"quay.io/airshipit/neutron:2024.2-ubuntu_jammy\"\n    neutron_metadata: \"quay.io/airshipit/neutron:2024.2-ubuntu_jammy\"\n    neutron_ovn_metadata: \"quay.io/airshipit/neutron:2024.2-ubuntu_jammy\"\n    neutron_openvswitch_agent: \"quay.io/airshipit/neutron:2024.2-ubuntu_jammy\"\n    neutron_server: \"quay.io/airshipit/neutron:2024.2-ubuntu_jammy\"\n    neutron_rpc_server: \"quay.io/airshipit/neutron:2024.2-ubuntu_jammy\"\n    neutron_bagpipe_bgp: \"quay.io/airshipit/neutron:2024.2-ubuntu_jammy\"\n    neutron_netns_cleanup_cron: \"quay.io/airshipit/neutron:2024.2-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/neutron/2025.1-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    neutron_db_sync: \"quay.io/airshipit/neutron:2025.1-ubuntu_jammy\"\n    neutron_dhcp: \"quay.io/airshipit/neutron:2025.1-ubuntu_jammy\"\n    neutron_l3: \"quay.io/airshipit/neutron:2025.1-ubuntu_jammy\"\n    neutron_l2gw: \"quay.io/airshipit/neutron:2025.1-ubuntu_jammy\"\n    neutron_linuxbridge_agent: \"quay.io/airshipit/neutron:2025.1-ubuntu_jammy\"\n    neutron_metadata: \"quay.io/airshipit/neutron:2025.1-ubuntu_jammy\"\n    neutron_ovn_metadata: \"quay.io/airshipit/neutron:2025.1-ubuntu_jammy\"\n    neutron_openvswitch_agent: \"quay.io/airshipit/neutron:2025.1-ubuntu_jammy\"\n    neutron_server: \"quay.io/airshipit/neutron:2025.1-ubuntu_jammy\"\n    neutron_rpc_server: \"quay.io/airshipit/neutron:2025.1-ubuntu_jammy\"\n    neutron_bagpipe_bgp: \"quay.io/airshipit/neutron:2025.1-ubuntu_jammy\"\n    neutron_netns_cleanup_cron: \"quay.io/airshipit/neutron:2025.1-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/neutron/2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    neutron_db_sync: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble\"\n    neutron_dhcp: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble\"\n    neutron_l3: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble\"\n    neutron_l2gw: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble\"\n    neutron_linuxbridge_agent: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble\"\n    neutron_metadata: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble\"\n    neutron_ovn_metadata: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble\"\n    neutron_openvswitch_agent: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble\"\n    neutron_server: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble\"\n    neutron_rpc_server: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble\"\n    neutron_bagpipe_bgp: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble\"\n    neutron_netns_cleanup_cron: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble\"\n...\n"
  },
  {
    "path": "values_overrides/neutron/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    neutron_db_sync: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble\"\n    neutron_dhcp: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble\"\n    neutron_l3: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble\"\n    neutron_l2gw: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble\"\n    neutron_linuxbridge_agent: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble\"\n    neutron_metadata: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble\"\n    neutron_ovn_metadata: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble\"\n    neutron_openvswitch_agent: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble\"\n    neutron_server: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble\"\n    neutron_rpc_server: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble\"\n    neutron_bagpipe_bgp: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble\"\n    neutron_netns_cleanup_cron: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble\"\n...\n"
  },
  {
    "path": "values_overrides/neutron/annotations.yaml",
    "content": "---\nannotations:\n  pod:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    neutron_server:\n      another.tld/foo: \"bar\"\n    neutron_rpc_server:\n      another.tld/foo: \"bar\"\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      neutron:\n        custom.tld/key: \"value\"\n    tls:\n      network_server_public:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/neutron/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    neutron_dhcp_agent:\n      container:\n        neutron_dhcp_agent:\n          appArmorProfile:\n            type: RuntimeDefault\n        neutron_dhcp_agent_init:\n          appArmorProfile:\n            type: RuntimeDefault\n    neutron_l3_agent:\n      container:\n        neutron_l3_agent:\n          appArmorProfile:\n            type: RuntimeDefault\n        neutron_l3_agent_init:\n          appArmorProfile:\n            type: RuntimeDefault\n    neutron_lb_agent:\n      container:\n        neutron_lb_agent:\n          appArmorProfile:\n            type: RuntimeDefault\n        neutron_lb_agent_init:\n          appArmorProfile:\n            type: RuntimeDefault\n        neutron_lb_agent_kernel_modules:\n          appArmorProfile:\n            type: RuntimeDefault\n    neutron_metadata_agent:\n      container:\n        neutron_metadata_agent_init:\n          appArmorProfile:\n            type: RuntimeDefault\n    neutron_ovs_agent:\n      container:\n        neutron_ovs_agent:\n          appArmorProfile:\n            type: RuntimeDefault\n        neutron_openvswitch_agent_kernel_modules:\n          appArmorProfile:\n            type: RuntimeDefault\n        neutron_ovs_agent_init:\n          appArmorProfile:\n            type: RuntimeDefault\n        netoffload:\n          appArmorProfile:\n            type: RuntimeDefault\n    neutron_sriov_agent:\n      container:\n        neutron_sriov_agent:\n          appArmorProfile:\n            type: RuntimeDefault\n        neutron_sriov_agent_init:\n          appArmorProfile:\n            type: RuntimeDefault\n    neutron_netns_cleanup_cron:\n      container:\n        neutron_netns_cleanup_cron:\n          appArmorProfile:\n            type: RuntimeDefault\n    neutron_server:\n      container:\n        neutron_server:\n          appArmorProfile:\n            type: RuntimeDefault\n        nginx:\n          appArmorProfile:\n            type: RuntimeDefault\n    neutron_rpc_server:\n      container:\n        neutron_rpc_server:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/neutron/bagpipe_bgp.yaml",
    "content": "---\nconf:\n  neutron:\n    DEFAULT:\n      service_plugins: router, bgpvpn\n    service_providers:\n      service_provider: BGPVPN:BaGPipe:networking_bgpvpn.neutron.services.service_drivers.bagpipe.bagpipe_v2.BaGPipeBGPVPNDriver:default\n  plugins:\n    openvswitch_agent:\n      agent:\n        extensions: bagpipe_bgpvpn\n  bagpipe_bgp:\n    bgp:\n      local_address: 192.168.143.88  # IP address for mpls/gre tunnels\n      peers: 192.168.143.96  # IP addresses of BGP peers\n      my_as: 23242  # Autonomous System number\n      enable_rtc: true  # Enable RT Constraint (RFC4684)\n    common:\n      root_helper: sudo /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf\n    api:\n      host: localhost\n      port: 8082\n    dataplane_driver_ipvpn:\n      dataplane_driver: ovs\n      ovs_bridge: br-mpls\n      mpls_interface: '*gre*'\n      proxy_arp: false\n  auto_bridge_add:\n    br-mpls: null\n\nmanifests:\n  daemonset_bagpipe_bgp: true\n...\n"
  },
  {
    "path": "values_overrides/neutron/dpdk-bond.yaml",
    "content": "---\nnetwork:\n  interface:\n    tunnel: br-phy-bond0\nconf:\n  plugins:\n    openvswitch_agent:\n      agent:\n        tunnel_types: vxlan\n      ovs:\n        bridge_mappings: public:br-ex\n        datapath_type: netdev\n        vhostuser_socket_dir: /var/run/openvswitch/vhostuser\n  ovs_dpdk:\n    enabled: true\n    driver: uio_pci_generic\n    nics: []\n    bonds:\n      # CHANGE-ME: modify below parameters according to your hardware\n      - name: dpdkbond0\n        bridge: br-phy-bond0\n        # The IP from the first nic in nics list shall be used\n        migrate_ip: true\n        ovs_options: \"bond_mode=active-backup\"\n        nics:\n          - name: dpdk_b0s0\n            pci_id: '0000:00:05.0'\n          - name: dpdk_b0s1\n            pci_id: '0000:00:06.0'\n    bridges:\n      - name: br-phy-bond0\n...\n"
  },
  {
    "path": "values_overrides/neutron/dpdk.yaml",
    "content": "---\nnetwork:\n  interface:\n    tunnel: null\nconf:\n  plugins:\n    openvswitch_agent:\n      agent:\n        tunnel_types: vxlan\n      ovs:\n        bridge_mappings: public:br-ex\n        datapath_type: netdev\n        vhostuser_socket_dir: /var/run/openvswitch/vhostuser\n  ovs_dpdk:\n    enabled: true\n    driver: uio_pci_generic\n    nics: []\n      # CHANGE-ME: modify pci_id according to your hardware\n      # - name: dpdk0\n      #   pci_id: '0000:05:00.0'\n      #   bridge: br-tun\n      #   migrate_ip: true\n    bridges:\n      - name: br-tun\n    bonds: []\n...\n"
  },
  {
    "path": "values_overrides/neutron/gate.yaml",
    "content": "---\nnetwork:\n  interface:\n    tunnel: docker0\nconf:\n  neutron:\n    DEFAULT:\n      l3_ha: False\n      max_l3_agents_per_router: 1\n      l3_ha_network_type: vxlan\n      dhcp_agents_per_network: 1\n  plugins:\n    ml2_conf:\n      ml2_type_flat:\n        flat_networks: public\n    openvswitch_agent:\n      agent:\n        tunnel_types: vxlan\n      ovs:\n        bridge_mappings: public:br-ex\n    linuxbridge_agent:\n      linux_bridge:\n        bridge_mappings: public:br-ex\n...\n"
  },
  {
    "path": "values_overrides/neutron/gateway.yaml",
    "content": "# Gateway API overrides for Neutron.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  network:\n    host_fqdn_override:\n      public:\n        host: neutron.openstack-helm.org\n\nmanifests:\n  ingress_server: false\n  service_ingress_server: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: neutron-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.network.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: neutron-server\n              port: 9696\n...\n"
  },
  {
    "path": "values_overrides/neutron/l2gateway.yaml",
    "content": "---\nconf:\n  neutron:\n    DEFAULT:\n      service_plugins: router, networking_l2gw.services.l2gateway.plugin.L2GatewayPlugin\n  plugins:\n    l2gateway:\n      DEFAULT:\n        quota_l2_gateway: 10\n        periodic_monitoring_interval: 5\n      service_providers:\n        service_provider: L2GW:l2gw:networking_l2gw.services.l2gateway.service_drivers.rpc_l2gw.L2gwRpcDriver:default\n  l2gateway_agent:\n    DEFAULT:\n      debug: false\n    ovsdb:\n      # <ovsdb_name>:<ip address>:<port>[,<ovsdb_name>:<ip address>:<port>]\n      # - ovsdb_name: a symbolic name that helps identifies keys and certificate files\n      # - ip address: the address or dns name for the ovsdb server\n      # - port: the port (ssl is supported)\n      ovsdb_hosts: ovsdbx:127.0.0.1:6632\n      socket_timeout: 30\n\nmanifests:\n  daemonset_l2gw_agent: true\n...\n"
  },
  {
    "path": "values_overrides/neutron/loci-2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    neutron_db_sync: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble_loci\"\n    neutron_dhcp: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble_loci\"\n    neutron_l3: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble_loci\"\n    neutron_l2gw: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble_loci\"\n    neutron_linuxbridge_agent: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble_loci\"\n    neutron_metadata: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble_loci\"\n    neutron_ovn_metadata: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble_loci\"\n    neutron_openvswitch_agent: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble_loci\"\n    neutron_server: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble_loci\"\n    neutron_rpc_server: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble_loci\"\n    neutron_bagpipe_bgp: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble_loci\"\n    neutron_netns_cleanup_cron: \"quay.io/airshipit/neutron:2025.1-ubuntu_noble_loci\"\n...\n"
  },
  {
    "path": "values_overrides/neutron/loci-2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    neutron_db_sync: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble_loci\"\n    neutron_dhcp: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble_loci\"\n    neutron_l3: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble_loci\"\n    neutron_l2gw: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble_loci\"\n    neutron_linuxbridge_agent: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble_loci\"\n    neutron_metadata: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble_loci\"\n    neutron_ovn_metadata: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble_loci\"\n    neutron_openvswitch_agent: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble_loci\"\n    neutron_server: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble_loci\"\n    neutron_rpc_server: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble_loci\"\n    neutron_bagpipe_bgp: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble_loci\"\n    neutron_netns_cleanup_cron: \"quay.io/airshipit/neutron:2025.2-ubuntu_noble_loci\"\n...\n"
  },
  {
    "path": "values_overrides/neutron/mariadb-operator.yaml",
    "content": "---\nconf:\n  neutron:\n    database:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  neutron_api:\n    - neutron-db-conn\n  neutron_db_sync:\n    - neutron-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: neutron\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: neutron\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: neutron-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: neutron-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"neutron\"\n      table: \"*\"\n      username: neutron\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: neutron-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: neutron\n      passwordSecretKeyRef:\n        name: neutron-db-password\n        key: password\n      database: neutron\n      secretName: neutron-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/neutron/netpol.yaml",
    "content": "---\nmanifests:\n  network_policy: true\nnetwork_policy:\n  neutron:\n    egress:\n      - to:\n        - ipBlock:\n            cidr: %%%REPLACE_API_ADDR%%%/32\n        ports:\n          - protocol: TCP\n            port: %%%REPLACE_API_PORT%%%\n...\n"
  },
  {
    "path": "values_overrides/neutron/ovn.yaml",
    "content": "---\nnetwork:\n  backend:\n    - ovn\n\nconf:\n  neutron:\n    DEFAULT:\n      router_distributed: True\n      service_plugins: ovn-router\n      l3_ha_network_type: geneve\n  plugins:\n    ml2_conf:\n      ml2:\n        extension_drivers: port_security\n        type_drivers: flat,vxlan,geneve\n        tenant_network_types: geneve\n      ovn:\n        ovn_l3_scheduler: leastloaded\n        dns_servers: 8.8.8.8,1.1.1.1\n        neutron_sync_mode: repair\n\nmanifests:\n  daemonset_dhcp_agent: false\n  daemonset_l3_agent: false\n  daemonset_metadata_agent: false\n  daemonset_netns_cleanup_cron: false\n  daemonset_ovs_agent: false\n  deployment_rpc_server: false\n\n  daemonset_ovn_metadata_agent: true\n...\n"
  },
  {
    "path": "values_overrides/neutron/ovn_vpn.yaml",
    "content": "---\nnetwork:\n  backend:\n    - ovn\n\nconf:\n  neutron:\n    DEFAULT:\n      router_distributed: true\n      service_plugins: ovn-router,ovn-vpnaas\n      l3_ha_network_type: geneve\n  ovn_vpn_agent:\n    service_providers:\n      service_provider: VPN:strongswan:neutron_vpnaas.services.vpn.service_drivers.ovn_ipsec.IPsecOvnVPNDriver:default\n  plugins:\n    ml2_conf:\n      ml2:\n        extension_drivers: port_security\n        type_drivers: flat,vxlan,geneve\n        tenant_network_types: geneve\n      ovn:\n        ovn_l3_scheduler: leastloaded\n        dns_servers: 8.8.8.8,1.1.1.1\n        neutron_sync_mode: repair\n\nmanifests:\n  daemonset_dhcp_agent: false\n  daemonset_l3_agent: false\n  daemonset_metadata_agent: false\n  daemonset_netns_cleanup_cron: false\n  daemonset_ovs_agent: false\n  deployment_rpc_server: false\n\n  daemonset_ovn_metadata_agent: true\n  daemonset_ovn_vpn_agent: true\n...\n"
  },
  {
    "path": "values_overrides/neutron/rabbitmq4.yaml",
    "content": "---\n# Upgrading from rabbitmq 3.x to 4.x requires:\n# 1: upgrading to the latest rabbitmq 3.x release and enabling all feature flags\n# 2: removing all rabbitmq 3.x openstack vhost ha policies\n# 3: setting rabbit_ha_queues to false in all openstack component configs\n# 4: wiping the rabbitmq database if rabbit_ha_queues and/or vhost ha policies were used with 3.x\nconf:\n  neutron:\n    oslo_messaging_rabbit:\n      rabbit_ha_queues: false\n\n# Note: rabbit_ha_queues is true by default for all openstack components in openstack-helm\n\n# Steps to wipe rabbitmq database:\n# 1: rabbitmqctl stop_app\n# 2: rabbitmqctl force_reset\n# 3: rabbitmqctl start_app\n# 4: rerun all openstack component rabbit-init jobs to recreate rabbitmq vhosts and users\n\n# Note: rabbitmq classic v2 vs quorum queues\n# With rabbitmq 4.x classic queues have been replaced with classic v2 queues. Classic v2 queues\n# do not support high availability. For HA, quorum queues must be used. Quorum queues are HA by default.\n# Classic v2 queues are the default in Rabbitmq 4.x.\n#\n# To enable quorum queues with rabbitmq 4.x you can use:\n#\n# conf:\n#   neutron:\n#     oslo_messaging_rabbit:\n#       rabbit_ha_queues: false\n#       rabbit_quorum_queues: true\n#       rabbit_transient_quorum_queue: true\n#       use_queue_manager: true\n...\n"
  },
  {
    "path": "values_overrides/neutron/shared-sriov-ovs-dpdk-bond.yaml",
    "content": "---\nnetwork:\n  interface:\n    sriov:\n      - device: enp3s0f0\n        num_vfs: 32\n        promisc: false\n      - device: enp66s0f1\n        num_vfs: 32\n        promisc: false\n    tunnel: br-phy-bond0\n  backend:\n    - openvswitch\n    - sriov\nconf:\n  auto_bridge_add:\n    br-ex: null\n  neutron:\n    DEFAULT:\n      l3_ha: False\n      max_l3_agents_per_router: 1\n      l3_ha_network_type: vxlan\n      dhcp_agents_per_network: 1\n      service_plugins: router\n  plugins:\n    ml2_conf:\n      ml2:\n        mechanism_drivers: l2population,openvswitch,sriovnicswitch\n        type_drivers: vlan,flat,vxlan\n        tenant_network_types: vxlan\n      ml2_type_flat:\n        flat_networks: public\n      ml2_type_vlan:\n        network_vlan_ranges: ovsnet:2:4094,sriovnet1:100:4000,sriovnet2:100:4000\n    openvswitch_agent:\n      default:\n        ovs_vsctl_timeout: 30\n      agent:\n        tunnel_types: vxlan\n      securitygroup:\n        enable_security_group: False\n        firewall_driver: neutron.agent.firewall.NoopFirewallDriver\n      ovs:\n        bridge_mappings: public:br-ex,ovsnet:br-phy-bond0\n        datapath_type: netdev\n        vhostuser_socket_dir: /var/run/openvswitch/vhostuser\n        of_connect_timeout: 60\n        of_request_timeout: 30\n    sriov_agent:\n      securitygroup:\n        firewall_driver: neutron.agent.firewall.NoopFirewallDriver\n      sriov_nic:\n        physical_device_mappings: sriovnet1:enp3s0f0,sriovnet2:enp66s0f1\n        exclude_devices: enp3s0f0:0000:00:05.1,enp66s0f1:0000:00:06.1\n  ovs_dpdk:\n    enabled: true\n    driver: uio_pci_generic\n    nics: []\n    bonds:\n      # CHANGE-ME: modify below parameters according to your hardware\n      - name: dpdkbond0\n        bridge: br-phy-bond0\n        mtu: 9000\n        # The IP from the first nic in nics list shall be used\n        migrate_ip: true\n        n_rxq: 2\n        n_rxq_size: 1024\n        n_txq_size: 1024\n        ovs_options: \"bond_mode=active-backup\"\n        nics:\n          - name: dpdk_b0s0\n            pci_id: '0000:00:05.0'\n            vf_index: 0\n          - name: dpdk_b0s1\n            pci_id: '0000:00:06.0'\n            vf_index: 0\n    bridges:\n      - name: br-phy-bond0\n    modules:\n      - name: dpdk\n        log_level: info\n\n# In case of shared profile (sriov + ovs-dpdk), sriov agent should finish\n# first so as to let it configure the SRIOV VFs before ovs-agent tries to\n# bind it with DPDK driver.\ndependencies:\n  dynamic:\n    targeted:\n      openvswitch:\n        ovs_agent:\n          pod:\n            - requireSameNode: true\n              labels:\n                application: neutron\n                component: neutron-sriov-agent\n...\n"
  },
  {
    "path": "values_overrides/neutron/tls-offloading.yaml",
    "content": "---\nendpoints:\n  identity:\n    auth:\n      admin:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      neutron:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      test:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n\ntls:\n  identity: true\n...\n"
  },
  {
    "path": "values_overrides/neutron/tls.yaml",
    "content": "---\nimages:\n  tags:\n    nginx: docker.io/nginx:1.18.0\nnetwork:\n  server:\n    ingress:\n      annotations:\n        nginx.ingress.kubernetes.io/backend-protocol: \"https\"\npod:\n  security_context:\n    neutron_server:\n      pod:\n        runAsUser: 0\n      container:\n        neutron_server:\n          readOnlyRootFilesystem: false\n    neutron_rpc_server:\n      pod:\n        runAsUser: 0\n      container:\n        neutron_rpc_server:\n          readOnlyRootFilesystem: false\n  resources:\n    nginx:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\nconf:\n  neutron_api_uwsgi:\n    uwsgi:\n      http-socket: 127.0.0.1:9696\n  nginx: |\n    worker_processes 1;\n    daemon off;\n    user nginx;\n\n    events {\n      worker_connections 1024;\n    }\n\n    http {\n      include /etc/nginx/mime.types;\n      default_type application/octet-stream;\n\n      sendfile on;\n      keepalive_timeout 65s;\n      tcp_nodelay on;\n\n      log_format main '[nginx] method=$request_method path=$request_uri '\n                      'status=$status upstream_status=$upstream_status duration=$request_time size=$body_bytes_sent '\n                      '\"$remote_user\" \"$http_referer\" \"$http_user_agent\"';\n\n      access_log /dev/stdout  main;\n\n      upstream websocket {\n        server 127.0.0.1:$PORT;\n      }\n\n      server {\n        server_name {{ printf \"%s.%s.svc.%s\" \"${SHORTNAME}\" .Release.Namespace .Values.endpoints.cluster_domain_suffix }};\n        listen $POD_IP:$PORT ssl;\n\n        client_max_body_size  0;\n\n        ssl_certificate      /etc/nginx/certs/tls.crt;\n        ssl_certificate_key  /etc/nginx/certs/tls.key;\n        ssl_ciphers ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA384;\n\n        location / {\n          proxy_pass_request_headers on;\n\n          proxy_http_version  1.1;\n          proxy_pass          http://websocket;\n          proxy_read_timeout  90;\n        }\n      }\n    }\n  neutron:\n    DEFAULT:\n      bind_host: 127.0.0.1\n    nova:\n      cafile: /etc/neutron/certs/ca.crt\n    keystone_authtoken:\n      cafile: /etc/neutron/certs/ca.crt\n    oslo_messaging_rabbit:\n      ssl: true\n      ssl_ca_file: /etc/rabbitmq/certs/ca.crt\n      ssl_cert_file: /etc/rabbitmq/certs/tls.crt\n      ssl_key_file: /etc/rabbitmq/certs/tls.key\n  metadata_agent:\n    DEFAULT:\n      auth_ca_cert: /etc/ssl/certs/openstack-helm.crt\n      nova_metadata_port: 443\n      nova_metadata_protocol: https\nendpoints:\n  compute:\n    scheme:\n      default: https\n    port:\n      api:\n        public: 443\n  compute_metadata:\n    hosts:\n      default: metadata\n    scheme:\n      default: https\n    port:\n      metadata:\n        default: 443\n  identity:\n    auth:\n      admin:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      neutron:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      nova:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      test:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n    scheme:\n      default: https\n    port:\n      api:\n        default: 443\n  network:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: neutron-tls-server\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\n    scheme:\n      default: https\n    port:\n      api:\n        public: 443\n  ingress:\n    port:\n      ingress:\n        default: 443\n  oslo_messaging:\n    port:\n      https:\n        default: 15680\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/nova/2024.2-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    db_drop: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    db_init: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    ks_user: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    ks_service: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    nova_api: \"quay.io/airshipit/nova:2024.2-ubuntu_jammy\"\n    nova_cell_setup: \"quay.io/airshipit/nova:2024.2-ubuntu_jammy\"\n    nova_cell_setup_init: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    nova_compute: \"quay.io/airshipit/nova:2024.2-ubuntu_jammy\"\n    nova_compute_ssh: \"quay.io/airshipit/nova:2024.2-ubuntu_jammy\"\n    nova_conductor: \"quay.io/airshipit/nova:2024.2-ubuntu_jammy\"\n    nova_db_sync: \"quay.io/airshipit/nova:2024.2-ubuntu_jammy\"\n    nova_novncproxy: \"quay.io/airshipit/nova:2024.2-ubuntu_jammy\"\n    nova_novncproxy_assets: \"quay.io/airshipit/nova:2024.2-ubuntu_jammy\"\n    nova_scheduler: \"quay.io/airshipit/nova:2024.2-ubuntu_jammy\"\n    nova_spiceproxy: \"quay.io/airshipit/nova:2024.2-ubuntu_jammy\"\n    nova_spiceproxy_assets: \"quay.io/airshipit/nova:2024.2-ubuntu_jammy\"\n    nova_service_cleaner: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/nova/2025.1-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    nova_api: \"quay.io/airshipit/nova:2025.1-ubuntu_jammy\"\n    nova_cell_setup: \"quay.io/airshipit/nova:2025.1-ubuntu_jammy\"\n    nova_cell_setup_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    nova_compute: \"quay.io/airshipit/nova:2025.1-ubuntu_jammy\"\n    nova_compute_ssh: \"quay.io/airshipit/nova:2025.1-ubuntu_jammy\"\n    nova_conductor: \"quay.io/airshipit/nova:2025.1-ubuntu_jammy\"\n    nova_db_sync: \"quay.io/airshipit/nova:2025.1-ubuntu_jammy\"\n    nova_novncproxy: \"quay.io/airshipit/nova:2025.1-ubuntu_jammy\"\n    nova_novncproxy_assets: \"quay.io/airshipit/nova:2025.1-ubuntu_jammy\"\n    nova_scheduler: \"quay.io/airshipit/nova:2025.1-ubuntu_jammy\"\n    nova_spiceproxy: \"quay.io/airshipit/nova:2025.1-ubuntu_jammy\"\n    nova_spiceproxy_assets: \"quay.io/airshipit/nova:2025.1-ubuntu_jammy\"\n    nova_service_cleaner: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/nova/2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    nova_api: \"quay.io/airshipit/nova:2025.1-ubuntu_noble\"\n    nova_cell_setup: \"quay.io/airshipit/nova:2025.1-ubuntu_noble\"\n    nova_cell_setup_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    nova_compute: \"quay.io/airshipit/nova:2025.1-ubuntu_noble\"\n    nova_compute_ssh: \"quay.io/airshipit/nova:2025.1-ubuntu_noble\"\n    nova_conductor: \"quay.io/airshipit/nova:2025.1-ubuntu_noble\"\n    nova_db_sync: \"quay.io/airshipit/nova:2025.1-ubuntu_noble\"\n    nova_novncproxy: \"quay.io/airshipit/nova:2025.1-ubuntu_noble\"\n    nova_novncproxy_assets: \"quay.io/airshipit/nova:2025.1-ubuntu_noble\"\n    nova_scheduler: \"quay.io/airshipit/nova:2025.1-ubuntu_noble\"\n    nova_spiceproxy: \"quay.io/airshipit/nova:2025.1-ubuntu_noble\"\n    nova_spiceproxy_assets: \"quay.io/airshipit/nova:2025.1-ubuntu_noble\"\n    nova_service_cleaner: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/nova/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    nova_api: \"quay.io/airshipit/nova:2025.2-ubuntu_noble\"\n    nova_cell_setup: \"quay.io/airshipit/nova:2025.2-ubuntu_noble\"\n    nova_cell_setup_init: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    nova_compute: \"quay.io/airshipit/nova:2025.2-ubuntu_noble\"\n    nova_compute_ssh: \"quay.io/airshipit/nova:2025.2-ubuntu_noble\"\n    nova_conductor: \"quay.io/airshipit/nova:2025.2-ubuntu_noble\"\n    nova_db_sync: \"quay.io/airshipit/nova:2025.2-ubuntu_noble\"\n    nova_novncproxy: \"quay.io/airshipit/nova:2025.2-ubuntu_noble\"\n    nova_novncproxy_assets: \"quay.io/airshipit/nova:2025.2-ubuntu_noble\"\n    nova_scheduler: \"quay.io/airshipit/nova:2025.2-ubuntu_noble\"\n    nova_spiceproxy: \"quay.io/airshipit/nova:2025.2-ubuntu_noble\"\n    nova_spiceproxy_assets: \"quay.io/airshipit/nova:2025.2-ubuntu_noble\"\n    nova_service_cleaner: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/nova/annotations.yaml",
    "content": "---\nannotations:\n  pod:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    nova_api_osapi:\n      another.tld/foo: \"bar\"\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      nova:\n        custom.tld/key: \"value\"\n    tls:\n      compute_osapi_public:\n        custom.tld/key: \"value\"\n    ssh:\n      keys:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/nova/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    nova:\n      container:\n        nova_compute:\n          appArmorProfile:\n            type: RuntimeDefault\n        nova_compute_init:\n          appArmorProfile:\n            type: RuntimeDefault\n        nova_compute_vnc_init:\n          appArmorProfile:\n            type: RuntimeDefault\n        nova_api:\n          appArmorProfile:\n            type: RuntimeDefault\n        nova_api_metadata_init:\n          appArmorProfile:\n            type: RuntimeDefault\n        nova_osapi:\n          appArmorProfile:\n            type: RuntimeDefault\n        nova_conductor:\n          appArmorProfile:\n            type: RuntimeDefault\n        nova_novncproxy:\n          appArmorProfile:\n            type: RuntimeDefault\n        nova_novncproxy_init_assets:\n          appArmorProfile:\n            type: RuntimeDefault\n        nova_novncproxy_init:\n          appArmorProfile:\n            type: RuntimeDefault\n        nova_scheduler:\n          appArmorProfile:\n            type: RuntimeDefault\n    nova_cell_setup:\n      container:\n        nova_cell_setup:\n          appArmorProfile:\n            type: RuntimeDefault\n        nova_cell_setup_init:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/nova/cntt.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n---\nconf:\n  nova:\n    DEFAULT:\n      reserved_huge_pages:\n        type: multistring\n        values:\n          - node:0,size:1GB,count:4\n          - node:1,size:1GB,count:4\n      reserved_host_memory_mb: 512\n...\n"
  },
  {
    "path": "values_overrides/nova/dpdk.yaml",
    "content": "---\nconf:\n  nova:\n    libvirt:\n      virt_type: kvm\n      cpu_mode: host-model\n...\n"
  },
  {
    "path": "values_overrides/nova/gateway.yaml",
    "content": "# Gateway API overrides for Nova.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  compute:\n    host_fqdn_override:\n      public:\n        host: nova.openstack-helm.org\n  compute_metadata:\n    host_fqdn_override:\n      public:\n        host: metadata.openstack-helm.org\n  compute_novnc_proxy:\n    host_fqdn_override:\n      public:\n        host: novncproxy.openstack-helm.org\n  compute_serial_proxy:\n    host_fqdn_override:\n      public:\n        host: serialproxy.openstack-helm.org\n  compute_spice_proxy:\n    host_fqdn_override:\n      public:\n        host: spiceproxy.openstack-helm.org\n\nmanifests:\n  ingress_metadata: false\n  ingress_novncproxy: false\n  ingress_serialproxy: false\n  ingress_spiceproxy: false\n  ingress_osapi: false\n  service_ingress_metadata: false\n  service_ingress_novncproxy: false\n  service_ingress_serialproxy: false\n  service_ingress_spiceproxy: false\n  service_ingress_osapi: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: nova-route-osapi\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.compute.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: nova-api\n              port: 8774\n\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: nova-route-metadata\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.compute_metadata.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: nova-metadata\n              port: 8775\n\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: nova-route-novncproxy\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.compute_novnc_proxy.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: nova-novncproxy\n              port: 6080\n\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: nova-route-serialproxy\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.compute_serial_proxy.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: nova-serialproxy\n              port: 6083\n\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: nova-route-spiceproxy\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.compute_spice_proxy.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: nova-spiceproxy\n              port: 6082\n...\n"
  },
  {
    "path": "values_overrides/nova/loci-2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    nova_api: \"quay.io/airshipit/nova:2025.1-ubuntu_noble_loci\"\n    nova_cell_setup: \"quay.io/airshipit/nova:2025.1-ubuntu_noble_loci\"\n    nova_cell_setup_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    nova_compute: \"quay.io/airshipit/nova:2025.1-ubuntu_noble_loci\"\n    nova_compute_ssh: \"quay.io/airshipit/nova:2025.1-ubuntu_noble_loci\"\n    nova_conductor: \"quay.io/airshipit/nova:2025.1-ubuntu_noble_loci\"\n    nova_db_sync: \"quay.io/airshipit/nova:2025.1-ubuntu_noble_loci\"\n    nova_novncproxy: \"quay.io/airshipit/nova:2025.1-ubuntu_noble_loci\"\n    nova_novncproxy_assets: \"quay.io/airshipit/nova:2025.1-ubuntu_noble_loci\"\n    nova_scheduler: \"quay.io/airshipit/nova:2025.1-ubuntu_noble_loci\"\n    nova_spiceproxy: \"quay.io/airshipit/nova:2025.1-ubuntu_noble_loci\"\n    nova_spiceproxy_assets: \"quay.io/airshipit/nova:2025.1-ubuntu_noble_loci\"\n    nova_service_cleaner: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/nova/loci-2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    nova_api: \"quay.io/airshipit/nova:2025.2-ubuntu_noble_loci\"\n    nova_cell_setup: \"quay.io/airshipit/nova:2025.2-ubuntu_noble_loci\"\n    nova_cell_setup_init: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    nova_compute: \"quay.io/airshipit/nova:2025.2-ubuntu_noble_loci\"\n    nova_compute_ssh: \"quay.io/airshipit/nova:2025.2-ubuntu_noble_loci\"\n    nova_conductor: \"quay.io/airshipit/nova:2025.2-ubuntu_noble_loci\"\n    nova_db_sync: \"quay.io/airshipit/nova:2025.2-ubuntu_noble_loci\"\n    nova_novncproxy: \"quay.io/airshipit/nova:2025.2-ubuntu_noble_loci\"\n    nova_novncproxy_assets: \"quay.io/airshipit/nova:2025.2-ubuntu_noble_loci\"\n    nova_scheduler: \"quay.io/airshipit/nova:2025.2-ubuntu_noble_loci\"\n    nova_spiceproxy: \"quay.io/airshipit/nova:2025.2-ubuntu_noble_loci\"\n    nova_spiceproxy_assets: \"quay.io/airshipit/nova:2025.2-ubuntu_noble_loci\"\n    nova_service_cleaner: \"quay.io/airshipit/ceph-config-helper:latest-ubuntu_jammy\"\n...\n"
  },
  {
    "path": "values_overrides/nova/mariadb-operator.yaml",
    "content": "---\nconf:\n  nova:\n    database:\n      connection: null\n    api_database:\n      connection: null\n    cell0_database:\n      connection: null\n\nmanifests:\n  job_db_init: false\n  secret_db_api: false\n  secret_db_cell0: false\n\netcSources:\n  nova_api_ospi:\n    - nova-db-conn\n    - nova-api-db-conn\n    - nova-cell0-db-conn\n  nova_api_metadata:\n    - nova-db-conn\n    - nova-api-db-conn\n    - nova-cell0-db-conn\n  nova_conductor:\n    - nova-db-conn\n    - nova-api-db-conn\n    - nova-cell0-db-conn\n  nova_scheduler:\n    - nova-db-conn\n    - nova-api-db-conn\n    - nova-cell0-db-conn\n  nova_cell_setup:\n    - nova-db-conn\n    - nova-api-db-conn\n    - nova-cell0-db-conn\n  nova_db_sync:\n    - nova-db-conn\n    - nova-api-db-conn\n    - nova-cell0-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: nova\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: nova_api\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: nova_cell0\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: nova\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: nova-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: nova\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"nova\"\n      table: \"*\"\n      username: nova\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: nova-api\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"nova_api\"\n      table: \"*\"\n      username: nova\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: nova-cell0\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"nova_cell0\"\n      table: \"*\"\n      username: nova\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: nova-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: nova\n      passwordSecretKeyRef:\n        name: nova-db-password\n        key: password\n      database: nova\n      secretName: nova-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: nova-api-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: nova\n      passwordSecretKeyRef:\n        name: nova-db-password\n        key: password\n      database: nova\n      secretName: nova-api-db-conn\n      secretTemplate:\n        key: api_db_conn.conf\n        format: |\n          [api_database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: nova-db-cell0-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: nova\n      passwordSecretKeyRef:\n        name: nova-db-password\n        key: password\n      database: nova_cell0\n      secretName: nova-cell0-db-conn\n      secretTemplate:\n        key: cell0_db_conn.conf\n        format: |\n          [cell0_database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/nova/netpol.yaml",
    "content": "---\nmanifests:\n  network_policy: true\nnetwork_policy:\n  nova:\n    egress:\n      - to:\n        - podSelector:\n            matchLabels:\n              application: nova\n      - to:\n        - ipBlock:\n            cidr: %%%REPLACE_API_ADDR%%%/32\n        ports:\n          - protocol: TCP\n            port: %%%REPLACE_API_PORT%%%\n...\n"
  },
  {
    "path": "values_overrides/nova/opensuse_15.yaml",
    "content": "---\nconf:\n  software:\n    apache2:\n      binary: apache2ctl\n      start_parameters: -DFOREGROUND -k start\n      site_dir: /etc/apache2/vhosts.d\n      conf_dir: /etc/apache2/conf.d\n      a2enmod:\n        - version\n  security: |\n    <Directory \"/var/www\">\n      Options Indexes FollowSymLinks\n      AllowOverride All\n      <IfModule !mod_access_compat.c>\n        Require all granted\n      </IfModule>\n      <IfModule mod_access_compat.c>\n        Order allow,deny\n        Allow from all\n      </IfModule>\n    </Directory>\n  nova:\n    DEFAULT:\n      mkisofs_cmd: mkisofs\n...\n"
  },
  {
    "path": "values_overrides/nova/ovn.yaml",
    "content": "---\nnetwork:\n  backend:\n    - ovn\n\nconf:\n  nova:\n    DEFAULT:\n      vif_plugging_is_fatal: true\n      vif_plugging_timeout: 300\n...\n"
  },
  {
    "path": "values_overrides/nova/rabbitmq4.yaml",
    "content": "---\n# Upgrading from rabbitmq 3.x to 4.x requires:\n# 1: upgrading to the latest rabbitmq 3.x release and enabling all feature flags\n# 2: removing all rabbitmq 3.x openstack vhost ha policies\n# 3: setting rabbit_ha_queues to false in all openstack component configs\n# 4: wiping the rabbitmq database if rabbit_ha_queues and/or vhost ha policies were used with 3.x\nconf:\n  nova:\n    oslo_messaging_rabbit:\n      rabbit_ha_queues: false\n\n# Note: rabbit_ha_queues is true by default for all openstack components in openstack-helm\n\n# Steps to wipe rabbitmq database:\n# 1: rabbitmqctl stop_app\n# 2: rabbitmqctl force_reset\n# 3: rabbitmqctl start_app\n# 4: rerun all openstack component rabbit-init jobs to recreate rabbitmq vhosts and users\n\n# Note: rabbitmq classic v2 vs quorum queues\n# With rabbitmq 4.x classic queues have been replaced with classic v2 queues. Classic v2 queues\n# do not support high availability. For HA, quorum queues must be used. Quorum queues are HA by default.\n# Classic v2 queues are the default in Rabbitmq 4.x.\n#\n# To enable quorum queues with rabbitmq 4.x you can use:\n#\n# conf:\n#   nova:\n#     oslo_messaging_rabbit:\n#       rabbit_ha_queues: false\n#       rabbit_quorum_queues: true\n#       rabbit_transient_quorum_queue: true\n#       use_queue_manager: true\n...\n"
  },
  {
    "path": "values_overrides/nova/ssh.yaml",
    "content": "---\nnetwork:\n  ssh:\n    enabled: true\n    public_key: |\n      ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfgGkoPxu6jVqyBTGDlhGqoFFaTymMOH3pDRzrzXCVodqrtv1heBAyi7L63+MZ+m/facDDo43hWzhFLmmMgD00AS7L+VH+oeEwKVCfq0HN3asKLadpweBQVAkGX7PzjRKF25qj6J7iVpKAf1NcnJCsWL3b+wC9mwK7TmupOmWra8BrfP7Fvek1RLx3lwk+ZZ9lUlm6o+jwXn/9rCEFa7ywkGpdrPRBNHQshGjDlJPi15boXIKxOmoZ/DszkJq7iLYQnwa4Kdb0dJ9OE/l2LLBiEpkMlTnwXA7QCS5jEHXwW78b4BOZvqrFflga+YldhDmkyRRfnhcF5Ok2zQmx9Q+t root@openstack-helm\n    private_key: |\n      -----BEGIN RSA PRIVATE KEY-----\n      MIIEpAIBAAKCAQEA34BpKD8buo1asgUxg5YRqqBRWk8pjDh96Q0c681wlaHaq7b9\n      YXgQMouy+t/jGfpv32nAw6ON4Vs4RS5pjIA9NAEuy/lR/qHhMClQn6tBzd2rCi2n\n      acHgUFQJBl+z840Shduao+ie4laSgH9TXJyQrFi92/sAvZsCu05rqTplq2vAa3z+\n      xb3pNUS8d5cJPmWfZVJZuqPo8F5//awhBWu8sJBqXaz0QTR0LIRow5ST4teW6FyC\n      sTpqGfw7M5Cau4i2EJ8GuCnW9HSfThP5diywYhKZDJU58FwO0AkuYxB18Fu/G+AT\n      mb6qxX5YGvmJXYQ5pMkUX54XBeTpNs0JsfUPrQIDAQABAoIBAFkEFd3XtL2KSxMY\n      Cm50OLkSfRRQ7yVP4qYNePVZr3uJKUS27xgA78KR7UkKHrNcEW6T+hhxbbLR2AmF\n      wLga40VxKyhGNqgJ5Vx/OAM//Ed4AAVfxYvTkfmsXqPRPiTEjRoPKvoZTh6riFHx\n      ZExAd0aNWaDhyZu6v03GoA6YmaG53CLhUpDjIEpAHT8Q5fiukvpvFNAkSpSU3wWW\n      YD14S5BTXx8Z7v5mNgbxzDIST9P6oGm9jOoMJJCxu3KVF5Xh6k23DP1wukiWNypJ\n      b7dzfE8/NZUZ15Du4g1ZXHZyOATwN+4GQi1tV+oB1o6wI6829lpIMlsmqHhrw867\n      942SmakCgYEA9R1xFEEVRavBGIUeg/NMbFP+Ssl2DljAdnmcOASCxAFqCx6y3WSK\n      P2xWTD/MCG/uz627EVp+lfbapZimm171rUMpVCqTa5tH+LZ+Lbl+rjoLwSWVqySK\n      MGyIEzpPLq5PrpGdUghZNsGAG7kgTarJM5SYyA+Esqr8AADjDrZdmzcCgYEA6W1C\n      h9nU5i04UogndbkOiDVDWn0LnjUnVDTmhgGhbJDLtx4/hte/zGK7+mKl561q3Qmm\n      xY0s8cSQCX1ULHyrgzS9rc0k42uvuRWgpKKKT5IrjiA91HtfcVM1r9hxa2/dw4wk\n      WbAoaqpadjQAKoB4PNYzRfvITkv/9O+JSyK5BjsCgYEA5p9C68momBrX3Zgyc/gQ\n      qcQFeJxAxZLf0xjs0Q/9cSnbeobxx7h3EuF9+NP1xuJ6EVDmt5crjzHp2vDboUgh\n      Y1nToutENXSurOYXpjHnbUoUETCpt5LzqkgTZ/Pu2H8NXbSIDszoE8rQHEV8jVbp\n      Y+ymK2XedrTF0cMD363aONUCgYEAy5J4+kdUL+VyADAz0awxa0KgWdNCBZivkvWL\n      sYTMhgUFVM7xciTIZXQaIjRUIeeQkfKv2gvUDYlyYIRHm4Cih4vAfEmziQ7KMm0V\n      K1+BpgGBMLMXmS57PzblVFU8HQlzau3Wac2CgfvNZtbU6jweIFhiYP9DYl1PfQpG\n      PxuqJy8CgYBERsjdYfnyGMnFg3DVwgv/W/JspX201jMhQW2EW1OGDf7RQV+qTUnU\n      2NRGN9QbVYUvdwuRPd7C9wXQfLzXf0/E67oYg6fHHGTBNMjSq56qhZ2dSZnyQCxI\n      UZu0B4/1A5493Mypxp8c2fPhBdfzjTA5latsr75U26OMPxCxgFxm1A==\n      -----END RSA PRIVATE KEY-----\n...\n"
  },
  {
    "path": "values_overrides/nova/tls-offloading.yaml",
    "content": "---\nendpoints:\n  identity:\n    auth:\n      admin:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      nova:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      test:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n\ntls:\n  identity: true\n...\n"
  },
  {
    "path": "values_overrides/nova/tls.yaml",
    "content": "---\nnetwork:\n  osapi:\n    ingress:\n      annotations:\n        nginx.ingress.kubernetes.io/backend-protocol: \"https\"\n  metadata:\n    ingress:\n      annotations:\n        nginx.ingress.kubernetes.io/backend-protocol: \"https\"\n  novncproxy:\n    ingress:\n      annotations:\n        nginx.ingress.kubernetes.io/backend-protocol: \"https\"\nconf:\n  mpm_event: |\n    <IfModule mpm_event_module>\n      ServerLimit         1024\n      StartServers        32\n      MinSpareThreads     32\n      MaxSpareThreads     256\n      ThreadsPerChild     25\n      MaxRequestsPerChild 128\n      ThreadLimit         720\n    </IfModule>\n  wsgi_nova_api: |\n    {{- $portInt := tuple \"compute\" \"service\" \"api\" $ | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    Listen {{ $portInt }}\n    <Directory /var/lib/openstack/bin>\n        Require all granted\n    </Directory>\n    <VirtualHost *:{{ $portInt }}>\n      ServerName {{ printf \"%s.%s.svc.%s\" \"nova-api\" .Release.Namespace .Values.endpoints.cluster_domain_suffix }}\n      WSGIDaemonProcess nova-api processes=1 threads=1 user=nova display-name=%{GROUP}\n      WSGIProcessGroup nova-api\n      WSGIScriptAlias /  /var/lib/openstack/bin/nova-api-wsgi\n      WSGIApplicationGroup %{GLOBAL}\n      WSGIPassAuthorization On\n      AllowEncodedSlashes On\n      SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n      ErrorLogFormat \"%{cu}t %M\"\n      ErrorLog /dev/stdout\n      CustomLog /dev/stdout combined env=!forwarded\n      CustomLog /dev/stdout proxy env=forwarded\n\n      SSLEngine on\n      SSLCertificateFile      /etc/nova/certs/tls.crt\n      SSLCertificateKeyFile   /etc/nova/certs/tls.key\n      SSLProtocol             all -SSLv3 -TLSv1 -TLSv1.1\n      SSLCipherSuite          ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256\n      SSLHonorCipherOrder     on\n    </VirtualHost>\n  wsgi_nova_metadata: |\n    {{- $portInt := tuple \"compute_metadata\" \"service\" \"metadata\" $ | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    Listen {{ $portInt }}\n    <Directory /var/lib/openstack/bin>\n        Require all granted\n    </Directory>\n    <VirtualHost *:{{ $portInt }}>\n      ServerName {{ printf \"%s.%s.svc.%s\" \"nova-metadata\" .Release.Namespace .Values.endpoints.cluster_domain_suffix }}\n      WSGIDaemonProcess nova-metadata processes=1 threads=1 user=nova display-name=%{GROUP}\n      WSGIProcessGroup nova-metadata\n      WSGIScriptAlias /  /var/lib/openstack/bin/nova-metadata-wsgi\n      WSGIApplicationGroup %{GLOBAL}\n      WSGIPassAuthorization On\n      AllowEncodedSlashes On\n      SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n      ErrorLogFormat \"%{cu}t %M\"\n      ErrorLog /dev/stdout\n      CustomLog /dev/stdout combined env=!forwarded\n      CustomLog /dev/stdout proxy env=forwarded\n\n      SSLEngine on\n      SSLCertificateFile      /etc/nova/certs/tls.crt\n      SSLCertificateKeyFile   /etc/nova/certs/tls.key\n      SSLProtocol             all -SSLv3 -TLSv1 -TLSv1.1\n      SSLCipherSuite          ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256\n      SSLHonorCipherOrder     on\n    </VirtualHost>\n  software:\n    apache2:\n      a2enmod:\n        - ssl\n  nova:\n    console:\n      ssl_minimum_version: tlsv1_2\n    glance:\n      cafile: /etc/nova/certs/ca.crt\n    ironic:\n      cafile: /etc/nova/certs/ca.crt\n    neutron:\n      cafile: /etc/nova/certs/ca.crt\n    keystone_authtoken:\n      cafile: /etc/nova/certs/ca.crt\n    cinder:\n      cafile: /etc/nova/certs/ca.crt\n    placement:\n      cafile: /etc/nova/certs/ca.crt\n    keystone:\n      cafile: /etc/nova/certs/ca.crt\n    oslo_messaging_rabbit:\n      ssl: true\n      ssl_ca_file: /etc/rabbitmq/certs/ca.crt\n      ssl_cert_file: /etc/rabbitmq/certs/tls.crt\n      ssl_key_file: /etc/rabbitmq/certs/tls.key\nendpoints:\n  identity:\n    auth:\n      admin:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      nova:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      neutron:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      placement:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      test:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n    scheme:\n      default: https\n    port:\n      api:\n        default: 443\n  image:\n    scheme:\n      default: https\n    port:\n      api:\n        public: 443\n  compute:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: nova-tls-api\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\n    scheme:\n      default: 'https'\n      service: 'https'\n    port:\n      api:\n        public: 443\n  compute_metadata:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: metadata-tls-metadata\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\n    scheme:\n      default: https\n    port:\n      metadata:\n        public: 443\n  compute_novnc_proxy:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: nova-novncproxy-tls-proxy\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\n    scheme:\n      default: https\n    port:\n      novnc_proxy:\n        public: 443\n  compute_spice_proxy:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: nova-tls-spiceproxy\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\n    scheme:\n      default: https\n  compute_serial_proxy:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: nova-tls-serialproxy\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\n    scheme:\n      default: wss\n  placement:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: placement-tls-api\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\n    scheme:\n      default: https\n    port:\n      api:\n        public: 443\n  network:\n    scheme:\n      default: https\n    port:\n      api:\n        public: 443\n  oslo_messaging:\n    port:\n      https:\n        default: 15680\npod:\n  security_context:\n    nova:\n      container:\n        nova_api:\n          runAsUser: 0\n          readOnlyRootFilesystem: false\n        nova_osapi:\n          runAsUser: 0\n          readOnlyRootFilesystem: false\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/octavia/2024.2-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    bootstrap: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    octavia_db_sync: quay.io/airshipit/octavia:2024.2-ubuntu_jammy\n    db_drop: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ks_user: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    ks_service: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    ks_endpoints: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: docker.io/docker:17.07.0\n    octavia_api: quay.io/airshipit/octavia:2024.2-ubuntu_jammy\n    octavia_driver_agent: quay.io/airshipit/octavia:2024.2-ubuntu_jammy\n    octavia_worker: quay.io/airshipit/octavia:2024.2-ubuntu_jammy\n    octavia_worker_init: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    octavia_housekeeping: quay.io/airshipit/octavia:2024.2-ubuntu_jammy\n    octavia_health_manager: quay.io/airshipit/octavia:2024.2-ubuntu_jammy\n    octavia_health_manager_init: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    openvswitch_vswitchd: quay.io/airshipit/openvswitch:latest-ubuntu_jammy\nconf:\n  octavia_api_uwsgi:\n    uwsgi:\n      wsgi-file: /var/lib/openstack/bin/octavia-wsgi\n...\n"
  },
  {
    "path": "values_overrides/octavia/2025.1-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    octavia_db_sync: quay.io/airshipit/octavia:2025.1-ubuntu_jammy\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: docker.io/docker:17.07.0\n    octavia_api: quay.io/airshipit/octavia:2025.1-ubuntu_jammy\n    octavia_driver_agent: quay.io/airshipit/octavia:2025.1-ubuntu_jammy\n    octavia_worker: quay.io/airshipit/octavia:2025.1-ubuntu_jammy\n    octavia_worker_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    octavia_housekeeping: quay.io/airshipit/octavia:2025.1-ubuntu_jammy\n    octavia_health_manager: quay.io/airshipit/octavia:2025.1-ubuntu_jammy\n    octavia_health_manager_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    openvswitch_vswitchd: quay.io/airshipit/openvswitch:latest-ubuntu_jammy\nconf:\n  octavia_api_uwsgi:\n    uwsgi:\n      # in 2025.2 the wsgi script was removed\n      wsgi-file: /var/lib/openstack/bin/octavia-wsgi\n...\n"
  },
  {
    "path": "values_overrides/octavia/2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    octavia_db_sync: quay.io/airshipit/octavia:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_noble\n    image_repo_sync: docker.io/docker:17.07.0\n    octavia_api: quay.io/airshipit/octavia:2025.1-ubuntu_noble\n    octavia_driver_agent: quay.io/airshipit/octavia:2025.1-ubuntu_noble\n    octavia_worker: quay.io/airshipit/octavia:2025.1-ubuntu_noble\n    octavia_worker_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    octavia_housekeeping: quay.io/airshipit/octavia:2025.1-ubuntu_noble\n    octavia_health_manager: quay.io/airshipit/octavia:2025.1-ubuntu_noble\n    octavia_health_manager_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    openvswitch_vswitchd: quay.io/airshipit/openvswitch:latest-ubuntu_noble\nconf:\n  octavia_api_uwsgi:\n    uwsgi:\n      # in 2025.2 the wsgi script was removed\n      wsgi-file: /var/lib/openstack/bin/octavia-wsgi\n...\n"
  },
  {
    "path": "values_overrides/octavia/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    bootstrap: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    octavia_db_sync: quay.io/airshipit/octavia:2025.2-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_noble\n    image_repo_sync: docker.io/docker:17.07.0\n    octavia_api: quay.io/airshipit/octavia:2025.2-ubuntu_noble\n    octavia_driver_agent: quay.io/airshipit/octavia:2025.2-ubuntu_noble\n    octavia_worker: quay.io/airshipit/octavia:2025.2-ubuntu_noble\n    octavia_worker_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    octavia_housekeeping: quay.io/airshipit/octavia:2025.2-ubuntu_noble\n    octavia_health_manager: quay.io/airshipit/octavia:2025.2-ubuntu_noble\n    octavia_health_manager_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    openvswitch_vswitchd: quay.io/airshipit/openvswitch:latest-ubuntu_noble\nconf:\n  octavia_api_uwsgi:\n    uwsgi:\n      # in 2025.2 the wsgi script was removed\n      wsgi-file: /var/lib/openstack/bin/octavia-wsgi\n...\n"
  },
  {
    "path": "values_overrides/octavia/annotations.yaml",
    "content": "---\nannotations:\n  pod:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    octavia_api:\n      another.tld/foo: \"bar\"\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      octavia:\n        custom.tld/key: \"value\"\n    tls:\n      load_balancer_api_public:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/octavia/gateway.yaml",
    "content": "# Gateway API overrides for Octavia.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  load_balancer:\n    host_fqdn_override:\n      public:\n        host: octavia.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n  service_ingress_api: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: octavia-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.load_balancer.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: octavia-api\n              port: 9876\n...\n"
  },
  {
    "path": "values_overrides/octavia/loci-2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    octavia_db_sync: quay.io/airshipit/octavia:2025.1-ubuntu_noble_loci\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_noble\n    image_repo_sync: docker.io/docker:17.07.0\n    octavia_api: quay.io/airshipit/octavia:2025.1-ubuntu_noble_loci\n    octavia_driver_agent: quay.io/airshipit/octavia:2025.1-ubuntu_noble_loci\n    octavia_worker: quay.io/airshipit/octavia:2025.1-ubuntu_noble_loci\n    octavia_worker_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    octavia_housekeeping: quay.io/airshipit/octavia:2025.1-ubuntu_noble_loci\n    octavia_health_manager: quay.io/airshipit/octavia:2025.1-ubuntu_noble_loci\n    octavia_health_manager_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    openvswitch_vswitchd: quay.io/airshipit/openvswitch:latest-ubuntu_noble\nconf:\n  octavia_api_uwsgi:\n    uwsgi:\n      # in 2025.2 the wsgi script was removed\n      wsgi-file: /var/lib/openstack/bin/octavia-wsgi\n...\n"
  },
  {
    "path": "values_overrides/octavia/loci-2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    bootstrap: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    octavia_db_sync: quay.io/airshipit/octavia:2025.2-ubuntu_noble_loci\n    db_drop: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_service: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_noble\n    image_repo_sync: docker.io/docker:17.07.0\n    octavia_api: quay.io/airshipit/octavia:2025.2-ubuntu_noble_loci\n    octavia_driver_agent: quay.io/airshipit/octavia:2025.2-ubuntu_noble_loci\n    octavia_worker: quay.io/airshipit/octavia:2025.2-ubuntu_noble_loci\n    octavia_worker_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    octavia_housekeeping: quay.io/airshipit/octavia:2025.2-ubuntu_noble_loci\n    octavia_health_manager: quay.io/airshipit/octavia:2025.2-ubuntu_noble_loci\n    octavia_health_manager_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    openvswitch_vswitchd: quay.io/airshipit/openvswitch:latest-ubuntu_noble\nconf:\n  octavia_api_uwsgi:\n    uwsgi:\n      # in 2025.2 the wsgi script was removed\n      wsgi-file: /var/lib/openstack/bin/octavia-wsgi\n...\n"
  },
  {
    "path": "values_overrides/octavia/mariadb-operator.yaml",
    "content": "---\nconf:\n  octavia:\n    database:\n      connection: null\n    task_flow:\n      persistence_connection: null\n\nmanifests:\n  job_db_init: false\n  secret_db_persistence: false\n\netcSources:\n  octavia_api:\n    - octavia-db-conn\n  octavia_db_sync:\n    - octavia-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: octavia\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: octavia\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: octavia-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: octavia-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"octavia\"\n      table: \"*\"\n      username: octavia\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: octavia-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: octavia\n      passwordSecretKeyRef:\n        name: octavia-db-password\n        key: password\n      database: octavia\n      secretName: octavia-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/openvswitch/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    ovs:\n      container:\n        vswitchd:\n          appArmorProfile:\n            type: RuntimeDefault\n        server:\n          appArmorProfile:\n            type: RuntimeDefault\n        modules:\n          appArmorProfile:\n            type: RuntimeDefault\n        perms:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/openvswitch/dpdk-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    openvswitch_db_server: quay.io/airshipit/openvswitch:latest-ubuntu_jammy-dpdk\n    openvswitch_vswitchd: quay.io/airshipit/openvswitch:latest-ubuntu_jammy-dpdk\npod:\n  resources:\n    enabled: true\n    ovs:\n      vswitchd:\n        requests:\n          memory: \"2Gi\"\n          cpu: \"2\"\n        limits:\n          memory: \"2Gi\"\n          cpu: \"2\"\n          hugepages-2Mi: \"1Gi\"\nconf:\n  ovs_dpdk:\n    enabled: true\n    hugepages_mountpath: /dev/hugepages\n    vhostuser_socket_dir: vhostuser\n    socket_memory: 512\n    lcore_mask: 0x1\n    pmd_cpu_mask: 0x4\n...\n"
  },
  {
    "path": "values_overrides/openvswitch/dpdk-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    openvswitch_db_server: quay.io/airshipit/openvswitch:latest-ubuntu_noble-dpdk\n    openvswitch_vswitchd: quay.io/airshipit/openvswitch:latest-ubuntu_noble-dpdk\npod:\n  resources:\n    enabled: true\n    ovs:\n      vswitchd:\n        requests:\n          memory: \"2Gi\"\n          cpu: \"2\"\n        limits:\n          memory: \"2Gi\"\n          cpu: \"2\"\n          hugepages-2Mi: \"1Gi\"\nconf:\n  ovs_dpdk:\n    enabled: true\n    hugepages_mountpath: /dev/hugepages\n    vhostuser_socket_dir: vhostuser\n    socket_memory: 512\n    lcore_mask: 0x1\n    pmd_cpu_mask: 0x4\n...\n"
  },
  {
    "path": "values_overrides/openvswitch/exporter.yaml",
    "content": "---\nopenvswitch:\n  extraContainers:\n    - name: ovs-exporter\n      image: ghcr.io/saeed-mcu/ovs_exporter/openvswitch_exporter:v2.3.2\n      imagePullPolicy: IfNotPresent\n      ports:\n        - name: metrics\n          containerPort: 9475\n          protocol: TCP\n      livenessProbe:\n        httpGet:\n          path: /metrics\n          port: 9475\n        initialDelaySeconds: 30\n        periodSeconds: 60\n        timeoutSeconds: 5\n      readinessProbe:\n        httpGet:\n          path: /metrics\n          port: 9475\n        initialDelaySeconds: 15\n        periodSeconds: 30\n        timeoutSeconds: 5\n      securityContext:\n        readOnlyRootFilesystem: true\n      resources:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"256Mi\"\n          cpu: \"500m\"\n      volumeMounts:\n        - name: run\n          mountPath: /run\n          readOnly: true\n...\n"
  },
  {
    "path": "values_overrides/openvswitch/netpol.yaml",
    "content": "---\nmanifests:\n  network_policy: true\n...\n"
  },
  {
    "path": "values_overrides/openvswitch/ovn.yaml",
    "content": "---\nconf:\n  openvswitch_db_server:\n    ptcp_port: 6640\n...\n"
  },
  {
    "path": "values_overrides/openvswitch/ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    openvswitch_db_server: quay.io/airshipit/openvswitch:latest-ubuntu_jammy\n    openvswitch_vswitchd: quay.io/airshipit/openvswitch:latest-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/openvswitch/ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    openvswitch_db_server: quay.io/airshipit/openvswitch:latest-ubuntu_noble\n    openvswitch_vswitchd: quay.io/airshipit/openvswitch:latest-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/openvswitch/vswitchd-probes.yaml",
    "content": "---\npod:\n  probes:\n    ovs_vswitch:\n      ovs_vswitch:\n        liveness:\n          exec:\n            - /bin/bash\n            - -c\n            - '/usr/bin/ovs-appctl bond/list; C1=$?; ovs-vsctl --column statistics list interface dpdk_b0s0 | grep -q -E \"rx_|tx_\"; C2=$?; ovs-vsctl --column statistics list interface dpdk_b0s1 | grep -q -E \"rx_|tx_\"; C3=$?; exit $(($C1+$C2+$C3))'\n...\n"
  },
  {
    "path": "values_overrides/ovn/ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    ovn_ovsdb_nb: quay.io/airshipit/ovn:ubuntu_jammy\n    ovn_ovsdb_sb: quay.io/airshipit/ovn:ubuntu_jammy\n    ovn_northd: quay.io/airshipit/ovn:ubuntu_jammy\n    ovn_controller: quay.io/airshipit/ovn:ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/ovn/ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    ovn_ovsdb_nb: quay.io/airshipit/ovn:ubuntu_noble\n    ovn_ovsdb_sb: quay.io/airshipit/ovn:ubuntu_noble\n    ovn_northd: quay.io/airshipit/ovn:ubuntu_noble\n    ovn_controller: quay.io/airshipit/ovn:ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/placement/2024.2-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  pull_policy: IfNotPresent\n  tags:\n    placement: \"quay.io/airshipit/placement:2024.2-ubuntu_jammy\"\n    ks_user: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    ks_service: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    db_init: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    db_drop: \"quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\"\n    placement_db_sync: \"quay.io/airshipit/placement:2024.2-ubuntu_jammy\"\n    dep_check: \"quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\"\n    image_repo_sync: \"docker.io/docker:17.07.0\"\ndependencies:\n  static:\n    db_sync:\n      jobs:\n        - placement-db-init\n...\n"
  },
  {
    "path": "values_overrides/placement/2025.1-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  pull_policy: IfNotPresent\n  tags:\n    placement: \"quay.io/airshipit/placement:2025.1-ubuntu_jammy\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\"\n    placement_db_sync: \"quay.io/airshipit/placement:2025.1-ubuntu_jammy\"\n    dep_check: \"quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\"\n    image_repo_sync: \"docker.io/docker:17.07.0\"\ndependencies:\n  static:\n    db_sync:\n      jobs:\n        - placement-db-init\n...\n"
  },
  {
    "path": "values_overrides/placement/2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  pull_policy: IfNotPresent\n  tags:\n    placement: \"quay.io/airshipit/placement:2025.1-ubuntu_noble\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\"\n    placement_db_sync: \"quay.io/airshipit/placement:2025.1-ubuntu_noble\"\n    dep_check: \"quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\"\n    image_repo_sync: \"docker.io/docker:17.07.0\"\ndependencies:\n  static:\n    db_sync:\n      jobs:\n        - placement-db-init\n...\n"
  },
  {
    "path": "values_overrides/placement/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  pull_policy: IfNotPresent\n  tags:\n    placement: \"quay.io/airshipit/placement:2025.2-ubuntu_noble\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\"\n    placement_db_sync: \"quay.io/airshipit/placement:2025.2-ubuntu_noble\"\n    dep_check: \"quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\"\n    image_repo_sync: \"docker.io/docker:17.07.0\"\ndependencies:\n  static:\n    db_sync:\n      jobs:\n        - placement-db-init\n...\n"
  },
  {
    "path": "values_overrides/placement/annotations.yaml",
    "content": "---\nannotations:\n  pod:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    placement:\n      another.tld/foo: \"bar\"\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      placement:\n        custom.tld/key: \"value\"\n    tls:\n      placement_api_public:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/placement/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    placement:\n      container:\n        placement_api:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/placement/gateway.yaml",
    "content": "# Gateway API overrides for Placement.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  placement:\n    host_fqdn_override:\n      public:\n        host: placement.openstack-helm.org\n\nmanifests:\n  ingress: false\n  service_ingress: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: placement-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.placement.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: placement-api\n              port: 8778\n...\n"
  },
  {
    "path": "values_overrides/placement/loci-2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  pull_policy: IfNotPresent\n  tags:\n    placement: \"quay.io/airshipit/placement:2025.1-ubuntu_noble_loci\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\"\n    placement_db_sync: \"quay.io/airshipit/placement:2025.1-ubuntu_noble_loci\"\n    dep_check: \"quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\"\n    image_repo_sync: \"docker.io/docker:17.07.0\"\ndependencies:\n  static:\n    db_sync:\n      jobs:\n        - placement-db-init\n...\n"
  },
  {
    "path": "values_overrides/placement/loci-2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  pull_policy: IfNotPresent\n  tags:\n    placement: \"quay.io/airshipit/placement:2025.2-ubuntu_noble_loci\"\n    ks_user: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    ks_service: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    ks_endpoints: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    db_init: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    db_drop: \"quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\"\n    placement_db_sync: \"quay.io/airshipit/placement:2025.2-ubuntu_noble_loci\"\n    dep_check: \"quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\"\n    image_repo_sync: \"docker.io/docker:17.07.0\"\ndependencies:\n  static:\n    db_sync:\n      jobs:\n        - placement-db-init\n...\n"
  },
  {
    "path": "values_overrides/placement/mariadb-operator.yaml",
    "content": "---\nconf:\n  placement:\n    placement_database:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  placement_api:\n    - placement-db-conn\n  placement_db_sync:\n    - placement-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: placement\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: placement\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: placement-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: placement-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"placement\"\n      table: \"*\"\n      username: placement\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: placement-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: placement\n      passwordSecretKeyRef:\n        name: placement-db-password\n        key: password\n      database: placement\n      secretName: placement-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/placement/netpol.yaml",
    "content": "---\nmanifests:\n  network_policy: true\nnetwork_policy:\n  placement:\n    egress:\n      - {}\n    ingress:\n      - from:\n        - podSelector:\n            matchLabels:\n              application: nova\n        ports:\n        - protocol: TCP\n          port: 8778\n        - protocol: TCP\n          port: 80\n        - protocol: TCP\n          port: 8080\n...\n"
  },
  {
    "path": "values_overrides/placement/tls-offloading.yaml",
    "content": "---\nendpoints:\n  identity:\n    auth:\n      admin:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      placement:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n\ntls:\n  identity: true\n...\n"
  },
  {
    "path": "values_overrides/placement/tls.yaml",
    "content": "---\nnetwork:\n  api:\n    ingress:\n      annotations:\n        nginx.ingress.kubernetes.io/backend-protocol: \"https\"\nconf:\n  software:\n    apache2:\n      binary: apache2\n      start_parameters: -DFOREGROUND\n      site_dir: /etc/apache2/sites-enabled\n      conf_dir: /etc/apache2/conf-enabled\n      mods_dir: /etc/apache2/mods-available\n      a2enmod:\n        - ssl\n      a2dismod: null\n  placement:\n    keystone_authtoken:\n      cafile: /etc/placement/certs/ca.crt\n  wsgi_placement: |\n    {{- $portInt := tuple \"placement\" \"service\" \"api\" $ | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    Listen {{ $portInt }}\n    <Directory /var/lib/openstack/bin>\n        Require all granted\n    </Directory>\n    <VirtualHost *:{{ $portInt }}>\n      ServerName {{ printf \"%s.%s.svc.%s\" \"placement-api\" .Release.Namespace .Values.endpoints.cluster_domain_suffix }}\n      WSGIDaemonProcess placement-api processes=1 threads=1 user=placement group=placement display-name=%{GROUP}\n      WSGIProcessGroup placement-api\n      WSGIScriptAlias / /var/lib/openstack/bin/placement-api\n      WSGIApplicationGroup %{GLOBAL}\n      WSGIPassAuthorization On\n      ErrorLogFormat \"%{cu}t %M\"\n      SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n      ErrorLog /dev/stdout\n      CustomLog /dev/stdout combined env=!forwarded\n      CustomLog /dev/stdout proxy env=forwarded\n      SSLEngine on\n      SSLCertificateFile      /etc/placement/certs/tls.crt\n      SSLCertificateKeyFile   /etc/placement/certs/tls.key\n      SSLProtocol             all -SSLv3 -TLSv1 -TLSv1.1\n      SSLCipherSuite          ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256\n      SSLHonorCipherOrder     on\n    </VirtualHost>\nendpoints:\n  identity:\n    auth:\n      admin:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      placement:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n    scheme:\n      default: https\n    port:\n      api:\n        default: 443\n  placement:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: placement-tls-api\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\n    scheme:\n      default: https\n      service: https\n    port:\n      api:\n        public: 443\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/postgresql/2024.2-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    ks_user: quay.io/airshipit/heat:2024.2-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/postgresql/2025.1-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/postgresql/2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/postgresql/2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/postgresql/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    server:\n      container:\n        postgresql:\n          appArmorProfile:\n            type: RuntimeDefault\n        set_volume_perms:\n          appArmorProfile:\n            type: RuntimeDefault\n    prometheus_postgresql_exporter:\n      container:\n        postgresql_exporter:\n          appArmorProfile:\n            type: RuntimeDefault\n    create_user:\n      container:\n        prometheus_postgresql_exporter_create_user:\n          appArmorProfile:\n            type: RuntimeDefault\n    postgresql_backup:\n      container:\n        postgresql_backup:\n          appArmorProfile:\n            type: RuntimeDefault\n        backup_perms:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/postgresql/backups.yaml",
    "content": "---\nconf:\n  backup:\n    enabled: true\n    remote_backup:\n      enabled: true\nvolume:\n  backup:\n    enabled: true\nmanifests:\n  pvc_backup: true\n  job_ks_user: false\n  cron_job_postgresql_backup: true\n  secret_backup_restore: true\nendpoints:\n  identity:\n    auth:\n      postgresql:\n        auth_url: null\n        role: admin\n        region_name: RegionOne\n        username: postgresql-backup-user\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      postgresql_failover:\n        auth_url: null\n        role: admin\n        region_name: RegionOne\n        username: postgresql-backup-user-failover\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n...\n"
  },
  {
    "path": "values_overrides/postgresql/loci-2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/postgresql/loci-2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/postgresql/netpol.yaml",
    "content": "---\nmanifests:\n  network_policy: true\nnetwork_policy:\n  postgresql:\n    egress:\n      - to:\n        - ipBlock:\n            cidr: %%%REPLACE_API_ADDR%%%/32\n        ports:\n          - protocol: TCP\n            port: %%%REPLACE_API_PORT%%%\n...\n"
  },
  {
    "path": "values_overrides/postgresql/staggered-backups.yaml",
    "content": "---\nconf:\n  backup:\n    enabled: true\n    remote_backup:\n      enabled: false\npod:\n  labels:\n    backup:\n      staggered_backups: enabled\n  affinity:\n    postgresql_backup:\n      podAntiAffinity:\n        requiredDuringSchedulingIgnoredDuringExecution:\n          - labelSelector:\n              matchExpressions:\n                - key: status.phase\n                  operator: NotIn\n                  values:\n                    - Running\n                - key: staggered-backups\n                  operator: In\n                  values:\n                    - enabled\n            namespaces:\n              - openstack\n              - osh-infra\n              - ucp\n            topologyKey: kubernetes.io/os\nvolume:\n  backup:\n    enabled: true\nmanifests:\n  pvc_backup: true\n  job_ks_user: false\n  cron_job_postgresql_backup: true\n  secret_backup_restore: true\n...\n"
  },
  {
    "path": "values_overrides/postgresql/tls.yaml",
    "content": "---\nconf:\n  postgresql:\n    ssl: 'on'\npod:\n  security_context:\n    server:\n      container:\n        perms:\n          readOnlyRootFilesystem: false\n        postgresql:\n          runAsUser: 0\n          allowPrivilegeEscalation: true\n          readOnlyRootFilesystem: false\nendpoints:\n  postgresql:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: postgresql-tls-direct\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/powerdns/2024.2-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    db_init: quay.io/airshipit/heat:2024.2-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/powerdns/2025.1-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/powerdns/2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/powerdns/2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/powerdns/loci-2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/powerdns/loci-2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/prometheus/2024.2-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    helm_tests: quay.io/airshipit/heat:2024.2-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/prometheus/2025.1-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/prometheus/2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/prometheus/2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    helm_tests: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/prometheus/alertmanager.yaml",
    "content": "---\nconf:\n  prometheus:\n    rules:\n      alertmanager:\n        groups:\n        - name: alertmanager.rules\n          rules:\n          - alert: AlertmanagerConfigInconsistent\n            expr: count_values(\"config_hash\", alertmanager_config_hash) BY (service) / ON(service) GROUP_LEFT() label_replace(prometheus_operator_alertmanager_spec_replicas, \"service\", \"alertmanager-$1\", \"alertmanager\", \"(.*)\") != 1\n            for: 5m\n            labels:\n              severity: critical\n            annotations:\n              description: The configuration of the instances of the Alertmanager cluster `{{$labels.service}}` are out of sync.\n              summary: Alertmanager configurations are inconsistent\n          - alert: AlertmanagerDownOrMissing\n            expr: label_replace(prometheus_operator_alertmanager_spec_replicas, \"job\", \"alertmanager-$1\", \"alertmanager\", \"(.*)\") / ON(job) GROUP_RIGHT() sum(up) BY (job) != 1\n            for: 5m\n            labels:\n              severity: warning\n            annotations:\n              description: An unexpected number of Alertmanagers are scraped or Alertmanagers disappeared from discovery.\n              summary: Alertmanager down or not discovered\n          - alert: FailedReload\n            expr: alertmanager_config_last_reload_successful == 0\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: Reloading Alertmanager's configuration has failed for {{ $labels.namespace }}/{{ $labels.pod }}.\n              summary: Alertmanager configuration reload has failed\n...\n"
  },
  {
    "path": "values_overrides/prometheus/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    api:\n      container:\n        prometheus:\n          appArmorProfile:\n            type: RuntimeDefault\n        prometheus_perms:\n          appArmorProfile:\n            type: RuntimeDefault\n        apache_proxy:\n          appArmorProfile:\n            type: RuntimeDefault\n    test:\n      container:\n        prometheus_helm_tests:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/prometheus/ceph.yaml",
    "content": "---\nconf:\n  prometheus:\n    rules:\n      ceph:\n        groups:\n        - name: ceph.recording_rules\n          rules:\n          - record: ceph_cluster_usage_percent\n            expr: 100 * (ceph_cluster_total_used_bytes / ceph_cluster_total_bytes)\n          - record: ceph_placement_group_degrade_percent\n            expr: 100 * (ceph_pg_degraded / ceph_pg_total)\n          - record: ceph_osd_down_percent\n            expr: 100 * (count(ceph_osd_up == 0) / count(ceph_osd_metadata))\n          - record: ceph_osd_out_percent\n            expr: 100 * (count(ceph_osd_in == 0) / count(ceph_osd_metadata))\n        - name: ceph.alerting_rules\n          rules:\n          - alert: prom_exporter_ceph_unavailable\n            expr: absent(ceph_health_status)\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: Ceph exporter is not collecting metrics or is not available for past 10 minutes\n              title: Ceph exporter is not collecting metrics or is not available\n          - alert: no_active_ceph_mgr\n            expr: avg_over_time(up{job=\"ceph-mgr\"}[5m]) == 0\n            labels:\n              severity: warning\n            annotations:\n              description: 'no ceph active mgr is present or all ceph mgr are down'\n              summary: 'no ceph active mgt is present'\n          - alert: ceph_monitor_quorum_low\n            expr: ceph_mon_quorum_count < 3\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'ceph monitor quorum has been less than 3 for more than 5 minutes'\n              summary: 'ceph high availability is at risk'\n          - alert: ceph_monitor_quorum_absent\n            expr: absent(avg_over_time(ceph_mon_quorum_status[5m]))\n            labels:\n              severity: page\n            annotations:\n              description: 'ceph monitor quorum has been gone for more than 5 minutes'\n              summary: 'ceph high availability is at risk'\n          - alert: ceph_cluster_usage_high\n            expr: avg_over_time(ceph_cluster_usage_percent[5m]) > 80\n            labels:\n              severity: page\n            annotations:\n              description: 'ceph cluster capacity usage more than 80 percent'\n              summary: 'ceph cluster usage is more than 80 percent'\n          - alert: ceph_placement_group_degrade_pct_high\n            expr: avg_over_time(ceph_placement_group_degrade_percent[5m]) > 80\n            labels:\n              severity: critical\n            annotations:\n              description: 'ceph placement group degradation is more than 80 percent'\n              summary: 'ceph placement groups degraded'\n          - alert: ceph_osd_down_pct_high\n            expr: avg_over_time(ceph_osd_down_percent[5m]) > 80\n            labels:\n              severity: critical\n            annotations:\n              description: 'ceph OSDs down percent is more than 80 percent'\n              summary: 'ceph OSDs down percent is high'\n          - alert: ceph_osd_down\n            expr: avg_over_time(ceph_osd_up[5m]) == 0\n            labels:\n              severity: critical\n            annotations:\n              description: 'ceph OSD {{ $labels.ceph_daemon }} is down in instance {{ $labels.instance }}.'\n              summary: 'ceph OSD {{ $labels.ceph_daemon }} is down in instance {{ $labels.instance }}.'\n          - alert: ceph_osd_out\n            expr: avg_over_time(ceph_osd_in[5m]) == 0\n            labels:\n              severity: page\n            annotations:\n              description: 'ceph OSD {{ $labels.ceph_daemon }} is out in instance {{ $labels.instance }}.'\n              summary: 'ceph OSD {{ $labels.ceph_daemon }} is out in instance {{ $labels.instance }}.'\n...\n"
  },
  {
    "path": "values_overrides/prometheus/elasticsearch.yaml",
    "content": "---\nconf:\n  prometheus:\n    rules:\n      elasticsearch:\n        groups:\n        - name: elasticsearch.alerting_rules\n          rules:\n          - alert: prom_exporter_elasticsearch_unavailable\n            expr: avg_over_time(up{job=\"elasticsearch-exporter\"}[5m]) == 0\n            for: 5m\n            labels:\n              severity: warning\n            annotations:\n              description: Elasticsearch exporter is not collecting metrics or is not available for past 10 minutes\n              title: Elasticsearch exporter is not collecting metrics or is not available\n          - alert: es_high_process_open_files_count\n            expr: sum(elasticsearch_process_open_files_count) by (host) > 64000\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: 'Elasticsearch at {{ $labels.host }} has more than 64000 process open file count.'\n              summary: 'Elasticsearch has a very high process open file count.'\n          - alert: es_high_process_cpu_percent\n            expr: elasticsearch_process_cpu_percent > 95\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: 'Elasticsearch at {{ $labels.instance }} has high process cpu percent of {{ $value }}.'\n              summary: 'Elasticsearch process cpu usage is more than 95 percent.'\n          - alert: es_fs_usage_high\n            expr: (100 * (elasticsearch_filesystem_data_size_bytes - elasticsearch_filesystem_data_free_bytes) / elasticsearch_filesystem_data_size_bytes) > 80\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: 'Elasticsearch at {{ $labels.instance }} has filesystem usage of {{ $value }}.'\n              summary: 'Elasticsearch filesystem usage is high.'\n          - alert: es_unassigned_shards\n            expr: elasticsearch_cluster_health_unassigned_shards > 0\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: 'Elasticsearch has {{ $value }} unassigned shards.'\n              summary: 'Elasticsearch has unassigned shards and hence a unhealthy cluster state.'\n          - alert: es_cluster_health_timed_out\n            expr: elasticsearch_cluster_health_timed_out > 0\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: 'Elasticsearch cluster health status call timedout {{ $value }} times.'\n              summary: 'Elasticsearch cluster health status calls are timing out.'\n          - alert: es_cluster_health_status_alert\n            expr: (sum(elasticsearch_cluster_health_status{color=\"green\"})*2)+sum(elasticsearch_cluster_health_status{color=\"yellow\"}) < 2\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: 'Elasticsearch cluster health status is {{ $value }}, not 2 (green). One or more shards or replicas are unallocated.'\n              summary: 'Elasticsearch cluster health status is not green.'\n          - alert: es_cluster_health_too_few_nodes_running\n            expr: elasticsearch_cluster_health_number_of_nodes < 3\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: 'There are only {{$value}} < 3 ElasticSearch nodes running'\n              summary: 'ElasticSearch running on less than 3 nodes'\n          - alert: es_cluster_health_too_few_data_nodes_running\n            expr: elasticsearch_cluster_health_number_of_data_nodes < 3\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: 'There are only {{$value}} < 3 ElasticSearch data nodes running'\n              summary: 'ElasticSearch running on less than 3 data nodes'\n          - alert: es_cluster_health_too_few_data_nodes_running\n            expr: elasticsearch_cluster_health_number_of_data_nodes < 3\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: 'There are only {{$value}} < 3 ElasticSearch data nodes running'\n              summary: 'ElasticSearch running on less than 3 data nodes'\n      fluentd:\n        groups:\n        - name: fluentd.alerting_rules\n          rules:\n          - alert: prom_exporter_fluentd_unavailable\n            expr: avg_over_time(up{job=\"fluentd-daemonset-exporter\"}[5m]) == 0\n            for: 5m\n            labels:\n              severity: warning\n            annotations:\n              description: Fluentd exporter is not collecting metrics or is not available for past 10 minutes\n              title: Fluentd exporter is not collecting metrics or is not available\n...\n"
  },
  {
    "path": "values_overrides/prometheus/gateway.yaml",
    "content": "# Gateway API overrides for Prometheus.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  monitoring:\n    host_fqdn_override:\n      public:\n        host: prometheus.openstack-helm.org\n\nmanifests:\n  ingress: false\n  service_ingress: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: prometheus-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.monitoring.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: prom-metrics\n              port: 9090\n...\n"
  },
  {
    "path": "values_overrides/prometheus/kubernetes.yaml",
    "content": "---\nconf:\n  prometheus:\n    rules:\n      kubernetes:\n        groups:\n        - name: calico.rules\n          rules:\n          - alert: prom_exporter_calico_unavailable\n            expr: avg_over_time(up{job=\"kubernetes-pods\",application=\"calico\"}[5m]) == 0\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: Calico exporter is not collecting metrics or is not available for past 10 minutes\n              title: Calico exporter is not collecting metrics or is not available\n          - alert: calico_datapane_failures_high_1h\n            expr: absent(felix_int_dataplane_failures) OR increase(felix_int_dataplane_failures[1h]) > 5\n            labels:\n              severity: page\n            annotations:\n              description: 'Felix instance {{ $labels.instance }} has seen {{ $value }} dataplane failures within the last hour'\n              summary: 'A high number of dataplane failures within Felix are happening'\n          - alert: calico_datapane_address_msg_batch_size_high_5m\n            expr: absent(felix_int_dataplane_addr_msg_batch_size_sum) OR absent(felix_int_dataplane_addr_msg_batch_size_count) OR (felix_int_dataplane_addr_msg_batch_size_sum/felix_int_dataplane_addr_msg_batch_size_count) > 5\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'Felix instance {{ $labels.instance }} has seen a high value of {{ $value }} dataplane address message batch size'\n              summary: 'Felix address message batch size is higher'\n          - alert: calico_datapane_iface_msg_batch_size_high_5m\n            expr: absent(felix_int_dataplane_iface_msg_batch_size_sum) OR absent(felix_int_dataplane_iface_msg_batch_size_count) OR (felix_int_dataplane_iface_msg_batch_size_sum/felix_int_dataplane_iface_msg_batch_size_count) > 5\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'Felix instance {{ $labels.instance }} has seen a high value of {{ $value }} dataplane interface message batch size'\n              summary: 'Felix interface message batch size is higher'\n          - alert: calico_ipset_errors_high_1h\n            expr: absent(felix_ipset_errors) OR increase(felix_ipset_errors[1h]) > 5\n            labels:\n              severity: page\n            annotations:\n              description: 'Felix instance {{ $labels.instance }} has seen {{ $value }} ipset errors within the last hour'\n              summary: 'A high number of ipset errors within Felix are happening'\n          - alert: calico_iptable_save_errors_high_1h\n            expr: absent(felix_iptables_save_errors) OR increase(felix_iptables_save_errors[1h]) > 5\n            labels:\n              severity: page\n            annotations:\n              description: 'Felix instance {{ $labels.instance }} has seen {{ $value }} iptable save errors within the last hour'\n              summary: 'A high number of iptable save errors within Felix are happening'\n          - alert: calico_iptable_restore_errors_high_1h\n            expr: absent(felix_iptables_restore_errors) OR increase(felix_iptables_restore_errors[1h]) > 5\n            labels:\n              severity: page\n            annotations:\n              description: 'Felix instance {{ $labels.instance }} has seen {{ $value }} iptable restore errors within the last hour'\n              summary: 'A high number of iptable restore errors within Felix are happening'\n        - name: etcd3.rules\n          rules:\n          - alert: etcd_InsufficientMembers\n            expr: count(up{job=\"etcd\"} == 0) > (count(up{job=\"etcd\"}) / 2 - 1)\n            for: 3m\n            labels:\n              severity: critical\n            annotations:\n              description: If one more etcd member goes down the cluster will be unavailable\n              summary: etcd cluster insufficient members\n          - alert: etcd_NoLeader\n            expr: etcd_server_has_leader{job=\"etcd\"} == 0\n            for: 1m\n            labels:\n              severity: critical\n            annotations:\n              description: etcd member {{ $labels.instance }} has no leader\n              summary: etcd member has no leader\n          - alert: etcd_HighNumberOfLeaderChanges\n            expr: increase(etcd_server_leader_changes_seen_total{job=\"etcd\"}[1h]) > 3\n            labels:\n              severity: warning\n            annotations:\n              description: etcd instance {{ $labels.instance }} has seen {{ $value }} leader changes within the last hour\n              summary: a high number of leader changes within the etcd cluster are happening\n          - alert: etcd_HighNumberOfFailedGRPCRequests\n            expr: sum(rate(etcd_grpc_requests_failed_total{job=\"etcd\"}[5m])) BY (grpc_method) / sum(rate(etcd_grpc_total{job=\"etcd\"}[5m])) BY (grpc_method) > 0.01\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: '{{ $value }}% of requests for {{ $labels.grpc_method }} failed on etcd instance {{ $labels.instance }}'\n              summary: a high number of gRPC requests are failing\n          - alert: etcd_HighNumberOfFailedGRPCRequests\n            expr: sum(rate(etcd_grpc_requests_failed_total{job=\"etcd\"}[5m])) BY (grpc_method) / sum(rate(etcd_grpc_total{job=\"etcd\"}[5m])) BY (grpc_method) > 0.05\n            for: 5m\n            labels:\n              severity: critical\n            annotations:\n              description: '{{ $value }}% of requests for {{ $labels.grpc_method }} failed on etcd instance {{ $labels.instance }}'\n              summary: a high number of gRPC requests are failing\n          - alert: etcd_GRPCRequestsSlow\n            expr: histogram_quantile(0.99, rate(etcd_grpc_unary_requests_duration_seconds_bucket[5m])) > 0.15\n            for: 10m\n            labels:\n              severity: critical\n            annotations:\n              description: on etcd instance {{ $labels.instance }} gRPC requests to {{ $labels.grpc_method }} are slow\n              summary: slow gRPC requests\n          - alert: etcd_HighNumberOfFailedHTTPRequests\n            expr: sum(rate(etcd_http_failed_total{job=\"etcd\"}[5m])) BY (method) / sum(rate(etcd_http_received_total{job=\"etcd\"}[5m])) BY (method) > 0.01\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: '{{ $value }}% of requests for {{ $labels.method }} failed on etcd instance {{ $labels.instance }}'\n              summary: a high number of HTTP requests are failing\n          - alert: etcd_HighNumberOfFailedHTTPRequests\n            expr: sum(rate(etcd_http_failed_total{job=\"etcd\"}[5m])) BY (method) / sum(rate(etcd_http_received_total{job=\"etcd\"}[5m])) BY (method) > 0.05\n            for: 5m\n            labels:\n              severity: critical\n            annotations:\n              description: '{{ $value }}% of requests for {{ $labels.method }} failed on etcd instance {{ $labels.instance }}'\n              summary: a high number of HTTP requests are failing\n          - alert: etcd_HTTPRequestsSlow\n            expr: histogram_quantile(0.99, rate(etcd_http_successful_duration_seconds_bucket[5m])) > 0.15\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: on etcd instance {{ $labels.instance }} HTTP requests to {{ $labels.method }} are slow\n              summary: slow HTTP requests\n          - alert: etcd_EtcdMemberCommunicationSlow\n            expr: histogram_quantile(0.99, rate(etcd_network_member_round_trip_time_seconds_bucket[5m])) > 0.15\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: etcd instance {{ $labels.instance }} member communication with {{ $labels.To }} is slow\n              summary: etcd member communication is slow\n          - alert: etcd_HighNumberOfFailedProposals\n            expr: increase(etcd_server_proposals_failed_total{job=\"etcd\"}[1h]) > 5\n            labels:\n              severity: warning\n            annotations:\n              description: etcd instance {{ $labels.instance }} has seen {{ $value }} proposal failures within the last hour\n              summary: a high number of proposals within the etcd cluster are failing\n          - alert: etcd_HighFsyncDurations\n            expr: histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) > 0.5\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: etcd instance {{ $labels.instance }} fync durations are high\n              summary: high fsync durations\n          - alert: etcd_HighCommitDurations\n            expr: histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) > 0.25\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: etcd instance {{ $labels.instance }} commit durations are high\n              summary: high commit durations\n        - name: kubelet.rules\n          rules:\n          - alert: K8SNodeNotReady\n            expr: kube_node_status_condition{condition=\"Ready\", status=\"unknown\"} == 1 or kube_node_status_condition{condition=\"Ready\", status=\"false\"} == 1\n            for: 1m\n            labels:\n              severity: critical\n            annotations:\n              description: The Kubelet on {{ $labels.node }} has not checked in with the API, or has set itself to NotReady, for more than a minute\n              summary: '{{ $labels.node }} Node status is NotReady and {{ $labels.status }}'\n          - alert: K8SManyNodesNotReady\n            expr: count(kube_node_status_condition{condition=\"Ready\", status=\"unknown\"} == 1) > 1 and (count(kube_node_status_condition{condition=\"Ready\", status=\"unknown\"} == 1) / count(kube_node_status_condition{condition=\"Ready\", status=\"unknown\"})) > 0.2\n            for: 1m\n            labels:\n              severity: critical\n            annotations:\n              description: '{{ $value }} Kubernetes nodes (more than 10% are in the NotReady state).'\n              summary: Many Kubernetes nodes are Not Ready\n          - alert: K8SManyNodesNotReady\n            expr: count(kube_node_status_condition{condition=\"Ready\", status=\"false\"} == 1) > 1 and (count(kube_node_status_condition{condition=\"Ready\", status=\"false\"} == 1) / count(kube_node_status_condition{condition=\"Ready\", status=\"false\"})) > 0.2\n            for: 1m\n            labels:\n              severity: critical\n            annotations:\n              description: '{{ $value }} Kubernetes nodes (more than 10% are in the NotReady state).'\n              summary: Many Kubernetes nodes are Not Ready\n          - alert: K8SNodesNotReady\n            expr: count(kube_node_status_condition{condition=\"Ready\", status=\"false\"} == 1) > 0 or count(kube_node_status_condition{condition=\"Ready\", status=\"unknown\"} == 1) > 0\n            for: 1m\n            labels:\n              severity: critical\n            annotations:\n              description: '{{ $value }} nodes are notReady state.'\n              summary: One or more Kubernetes nodes are Not Ready\n          - alert: K8SKubeletDown\n            expr: count(up{job=\"kubelet\"} == 0) / count(up{job=\"kubelet\"}) > 0.03\n            for: 1m\n            labels:\n              severity: critical\n            annotations:\n              description: Prometheus failed to scrape {{ $value }}% of kubelets.\n              summary: Many Kubelets cannot be scraped\n          - alert: K8SKubeletDown\n            expr: absent(up{job=\"kubelet\"} == 1) or count(up{job=\"kubelet\"} == 0) / count(up{job=\"kubelet\"}) > 0.1\n            for: 1m\n            labels:\n              severity: critical\n            annotations:\n              description: Prometheus failed to scrape {{ $value }}% of kubelets, or all Kubelets have disappeared from service discovery.\n              summary: Many Kubelets cannot be scraped\n          - alert: K8SKubeletTooManyPods\n            expr: kubelet_running_pod_count > 100\n            labels:\n              severity: warning\n            annotations:\n              description: Kubelet {{$labels.instance}} is running {{$value}} pods, close to the limit of 110\n              summary: Kubelet is close to pod limit\n        - name: kube-apiserver.rules\n          rules:\n          - alert: K8SApiserverDown\n            expr: absent(up{job=\"apiserver\"} == 1)\n            for: 5m\n            labels:\n              severity: critical\n            annotations:\n              description: Prometheus failed to scrape API server(s), or all API servers have disappeared from service discovery.\n              summary: API server unreachable\n          - alert: K8SApiServerLatency\n            expr: histogram_quantile(0.99, sum(apiserver_request_latencies_bucket{verb!~\"CONNECT|WATCHLIST|WATCH|PROXY\"}) WITHOUT (instance, resource)) / 1e+06 > 1\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: 99th percentile Latency for {{ $labels.verb }} requests to the kube-apiserver is higher than 1s.\n              summary: Kubernetes apiserver latency is high\n        - name: kube-controller-manager.rules\n          rules:\n          - alert: K8SControllerManagerDown\n            expr: absent(up{job=\"kube-controller-manager-discovery\"} == 1)\n            for: 5m\n            labels:\n              severity: critical\n            annotations:\n              description: There is no running K8S controller manager. Deployments and replication controllers are not making progress.\n              runbook: https://coreos.com/tectonic/docs/latest/troubleshooting/controller-recovery.html#recovering-a-controller-manager\n              summary: Controller manager is down\n        - name: kubernetes-object.rules\n          rules:\n          - alert: prom_exporter_kube_state_metrics_unavailable\n            expr: avg_over_time(up{job=\"kube-state-metrics\"}[5m]) == 0\n            for: 5m\n            labels:\n              severity: warning\n            annotations:\n              description: kube-state-metrics exporter is not collecting metrics or is not available for past 10 minutes\n              title: kube-state-metrics exporter is not collecting metrics or is not available\n          - alert: kube_statefulset_replicas_unavailable\n            expr: kube_statefulset_status_replicas < kube_statefulset_replicas\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'statefulset {{$labels.statefulset}} has {{$value}} replicas, which is less than desired'\n              summary: '{{$labels.statefulset}}: has inssuficient replicas.'\n          - alert: daemonsets_misscheduled\n            expr: kube_daemonset_status_number_misscheduled > 0\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: 'Daemonset {{$labels.daemonset}} is running where it is not supposed to run'\n              summary: 'Daemonsets not scheduled correctly'\n          - alert: daemonsets_not_scheduled\n            expr: kube_daemonset_status_desired_number_scheduled - kube_daemonset_status_current_number_scheduled > 0\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: '{{ $value }} of Daemonset {{$labels.daemonset}} scheduled which is less than desired number'\n              summary: 'Less than desired number of daemonsets scheduled'\n          - alert: daemonset_pods_unavailable\n            expr: kube_daemonset_status_number_unavailable > 0\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: 'Daemonset {{$labels.daemonset}} currently has pods unavailable'\n              summary: 'Daemonset pods unavailable, due to one of many reasons'\n          - alert: deployment_replicas_unavailable\n            expr: kube_deployment_status_replicas_unavailable > 0\n            for: 10m\n            labels:\n              severity: page\n            annotations:\n              description: 'deployment {{$labels.deployment}} has {{$value}} replicas unavailable'\n              summary: '{{$labels.deployment}}: has inssuficient replicas.'\n          - alert: rollingupdate_deployment_replica_less_than_spec_max_unavailable\n            expr: kube_deployment_status_replicas_available - kube_deployment_spec_strategy_rollingupdate_max_unavailable < 0\n            for: 10m\n            labels:\n              severity: page\n            annotations:\n              description: 'deployment {{$labels.deployment}} has {{$value}} replicas available which is less than specified as max unavailable during a rolling update'\n              summary: '{{$labels.deployment}}: has inssuficient replicas during a rolling update.'\n          - alert: job_status_failed\n            expr: kube_job_status_failed > 0\n            for: 10m\n            labels:\n              severity: page\n            annotations:\n              description: 'Job {{$labels.exported_job}} is in failed status'\n              summary: '{{$labels.exported_job}} has failed status'\n          - alert: pod_status_pending\n            expr: kube_pod_status_phase{phase=\"Pending\"} == 1\n            for: 10m\n            labels:\n              severity: page\n            annotations:\n              description: 'Pod {{$labels.pod}} in namespace {{$labels.namespace}} has been in pending status for more than 10 minutes'\n              summary: 'Pod {{$labels.pod}} in namespace {{$labels.namespace}} in pending status'\n          - alert: pod_status_error_image_pull\n            expr: kube_pod_container_status_waiting_reason {reason=\"ErrImagePull\"} == 1\n            for: 10m\n            labels:\n              severity: page\n            annotations:\n              description: 'Pod {{$labels.pod}} in namespace {{$labels.namespace}} has an Image pull error for more than 10 minutes'\n              summary: 'Pod {{$labels.pod}} in namespace {{$labels.namespace}} in error status'\n          - alert: pod_status_error_image_pull_backoff\n            expr: kube_pod_container_status_waiting_reason {reason=\"ImagePullBackOff\"} == 1\n            for: 10m\n            labels:\n              severity: page\n            annotations:\n              description: 'Pod {{$labels.pod}} in namespace {{$labels.namespace}} has an ImagePullBackOff error for more than 10 minutes'\n              summary: 'Pod {{$labels.pod}} in namespace {{$labels.namespace}} in error status'\n          - alert: pod_error_crash_loop_back_off\n            expr: kube_pod_container_status_waiting_reason {reason=\"CrashLoopBackOff\"} == 1\n            for: 10m\n            labels:\n              severity: page\n            annotations:\n              description: 'Pod {{$labels.pod}} in namespace {{$labels.namespace}} has an CrashLoopBackOff  error for more than 10 minutes'\n              summary: 'Pod {{$labels.pod}} in namespace {{$labels.namespace}} in error status'\n          - alert: pod_error_config_error\n            expr: kube_pod_container_status_waiting_reason {reason=\"CreateContainerConfigError\"} == 1\n            for: 10m\n            labels:\n              severity: page\n            annotations:\n              description: 'Pod {{$labels.pod}} in namespace {{$labels.namespace}} has a CreateContainerConfigError error for more than 10 minutes'\n              summary: 'Pod {{$labels.pod}} in namespace {{$labels.namespace}} in error status'\n          - alert: replicaset_missing_replicas\n            expr: kube_replicaset_spec_replicas -  kube_replicaset_status_ready_replicas > 0\n            for: 10m\n            labels:\n              severity: page\n            annotations:\n              description: 'Replicaset {{$labels.replicaset}} is missing desired number of replicas for more than 10 minutes'\n              summary: 'Replicaset {{$labels.replicaset}} is missing replicas'\n          - alert: pod_container_terminated\n            expr: kube_pod_container_status_terminated_reason{reason=~\"OOMKilled|Error|ContainerCannotRun\"} > 0\n            for: 10m\n            labels:\n              severity: page\n            annotations:\n              description: 'Pod {{$labels.pod}} in namespace {{$labels.namespace}} has a container terminated for more than 10 minutes'\n              summary: 'Pod {{$labels.pod}} in namespace {{$labels.namespace}} in error status'\n          - alert: volume_claim_capacity_high_utilization\n            expr: 100 * kubelet_volume_stats_used_bytes / kubelet_volume_stats_capacity_bytes > 80\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'volume claim {{$labels.persistentvolumeclaim}} usage has exceeded 80% of total capacity'\n              summary: '{{$labels.persistentvolumeclaim}} usage has exceeded 80% of total capacity.'\n...\n"
  },
  {
    "path": "values_overrides/prometheus/local-storage.yaml",
    "content": "---\npod:\n  replicas:\n    prometheus: 1\nstorage:\n  requests:\n    storage: 1Gi\n  storage_class: local-storage\n...\n"
  },
  {
    "path": "values_overrides/prometheus/loci-2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/prometheus/loci-2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    helm_tests: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/prometheus/nodes.yaml",
    "content": "---\nconf:\n  prometheus:\n    rules:\n      nodes:\n        groups:\n        - name: node.recording_rules\n          rules:\n          - record: node_filesystem_free_percent\n            expr: 100 * {fstype =~ \"xfs|ext[34]\"} / node_filesystem_size{fstype =~ \"xfs|ext[34]\"}\n          - record: node_ram_usage_percent\n            expr: 100 * (node_memory_MemFree + node_memory_Buffers + node_memory_Cached) / node_memory_MemTotal\n          - record: node_swap_usage_percent\n            expr: 100 * (node_memory_SwapFree + node_memory_SwapCached) / node_memory_SwapTotal\n        - name: nodes.alerting_rules\n          rules:\n          - alert: prom_exporter_node_unavailable\n            expr: avg_over_time(up{job=\"node-exporter\"}[5m]) == 0\n            for: 5m\n            labels:\n              severity: warning\n            annotations:\n              description: node exporter is not collecting metrics or is not available for past 10 minutes\n              title: node exporter is not collecting metrics or is not available\n          - alert: node_filesystem_full_80percent\n            expr: avg_over_time(node_filesystem_free_percent[2m]) > 80\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: '{{$labels.alias}} device {{$labels.device}} on {{$labels.mountpoint}}\n                has less than 20% free space left.'\n              summary: '{{$labels.alias}}: Filesystem is running out of space soon.'\n          - alert: node_filesystem_full_in_4h\n            expr: predict_linear(node_filesystem_free{fstype =~ \"xfs|ext[34]\"}[1h], 4 * 3600) <= 0\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: '{{$labels.alias}} device {{$labels.device}} on {{$labels.mountpoint}}\n                is running out of space of in approx. 4 hours'\n              summary: '{{$labels.alias}}: Filesystem is running out of space in 4 hours.'\n          - alert: node_filedescriptors_full_in_3h\n            expr: predict_linear(node_filefd_allocated[1h], 3 * 3600) >= node_filefd_maximum\n            for: 20m\n            labels:\n              severity: page\n            annotations:\n              description: '{{$labels.alias}} is running out of available file descriptors\n                in approx. 3 hours'\n              summary: '{{$labels.alias}} is running out of available file descriptors in\n                3 hours.'\n          - alert: node_load1_90percent\n            expr: node_load1 / ON(alias) count(node_cpu{mode=\"system\"}) BY (alias) >= 0.9\n            for: 1h\n            labels:\n              severity: page\n            annotations:\n              description: '{{$labels.alias}} is running with > 90% total load for at least\n                1h.'\n              summary: '{{$labels.alias}}: Running on high load.'\n          - alert: node_cpu_util_90percent\n            expr: 100 - (avg(irate(node_cpu{mode=\"idle\"}[5m])) BY (alias) * 100) >= 90\n            for: 1h\n            labels:\n              severity: page\n            annotations:\n              description: '{{$labels.alias}} has total CPU utilization over 90% for at least\n                1h.'\n              summary: '{{$labels.alias}}: High CPU utilization.'\n          - alert: node_ram_using_90percent\n            expr: avg_over_time(node_ram_usage_percent[2m]) > 90\n            for: 30m\n            labels:\n              severity: page\n            annotations:\n              description: '{{$labels.alias}} is using at least 90% of its RAM for at least\n                30 minutes now.'\n              summary: '{{$labels.alias}}: Using lots of RAM.'\n          - alert: node_swap_using_80percent\n            expr: avg_over_time(node_swap_usage_percent[2m]) > 80\n            for: 10m\n            labels:\n              severity: page\n            annotations:\n              description: '{{$labels.alias}} is using 80% of its swap space for at least\n                10 minutes now.'\n              summary: '{{$labels.alias}}: Running out of swap soon.'\n          - alert: node_high_cpu_load\n            expr: node_load15 / on(alias) count(node_cpu{mode=\"system\"}) by (alias) >= 0\n            for: 1m\n            labels:\n              severity: warning\n            annotations:\n              description: '{{$labels.alias}} is running with load15 > 1 for at least 5 minutes: {{$value}}'\n              summary: '{{$labels.alias}}: Running on high load: {{$value}}'\n          - alert: node_high_memory_load\n            expr: avg_over_time(node_ram_usage_percent[2m]) > 85\n            for: 1m\n            labels:\n              severity: warning\n            annotations:\n              description: Host memory usage is {{ humanize $value }}%. Reported by\n                instance {{ $labels.instance }} of job {{ $labels.job }}.\n              summary: Server memory is almost full\n          - alert: node_high_storage_load\n            expr: avg_over_time(node_storage_usage_percent{mountpoint=\"/\"}[2m]) > 85\n            for: 30s\n            labels:\n              severity: warning\n            annotations:\n              description: Host storage usage is {{ humanize $value }}%. Reported by\n                instance {{ $labels.instance }} of job {{ $labels.job }}.\n              summary: Server storage is almost full\n          - alert: node_high_swap\n            expr: (node_memory_SwapTotal - node_memory_SwapFree) < (node_memory_SwapTotal\n              * 0.4)\n            for: 1m\n            labels:\n              severity: warning\n            annotations:\n              description: Host system has a high swap usage of {{ humanize $value }}. Reported\n                by instance {{ $labels.instance }} of job {{ $labels.job }}.\n              summary: Server has a high swap usage\n          - alert: node_high_network_drop_rcv\n            expr: node_network_receive_drop{device!=\"lo\"} > 3000\n            for: 30s\n            labels:\n              severity: warning\n            annotations:\n              description: Host system has an unusally high drop in network reception ({{\n                humanize $value }}). Reported by instance {{ $labels.instance }} of job {{\n                $labels.job }}\n              summary: Server has a high receive drop\n          - alert: node_high_network_drop_send\n            expr: node_network_transmit_drop{device!=\"lo\"} > 3000\n            for: 30s\n            labels:\n              severity: warning\n            annotations:\n              description: Host system has an unusally high drop in network transmission ({{\n                humanize $value }}). Reported by instance {{ $labels.instance }} of job {{\n                $labels.job }}\n              summary: Server has a high transmit drop\n          - alert: node_high_network_errs_rcv\n            expr: node_network_receive_errs{device!=\"lo\"} > 3000\n            for: 30s\n            labels:\n              severity: warning\n            annotations:\n              description: Host system has an unusally high error rate in network reception\n                ({{ humanize $value }}). Reported by instance {{ $labels.instance }} of job\n                {{ $labels.job }}\n              summary: Server has unusual high reception errors\n          - alert: node_high_network_errs_send\n            expr: node_network_transmit_errs{device!=\"lo\"} > 3000\n            for: 30s\n            labels:\n              severity: warning\n            annotations:\n              description: Host system has an unusally high error rate in network transmission\n                ({{ humanize $value }}). Reported by instance {{ $labels.instance }} of job\n                {{ $labels.job }}\n              summary: Server has unusual high transmission errors\n          - alert: node_network_conntrack_usage_80percent\n            expr: sort(node_nf_conntrack_entries{job=\"node-exporter\"} > node_nf_conntrack_entries_limit{job=\"node-exporter\"}  * 0.8)\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: '{{$labels.instance}} has network conntrack entries of {{ $value }} which is more than 80% of maximum limit'\n              summary: '{{$labels.instance}}: available network conntrack entries are low.'\n          - alert: node_entropy_available_low\n            expr: node_entropy_available_bits < 300\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: '{{$labels.instance}} has available entropy bits of {{ $value }} which is less than required of 300'\n              summary: '{{$labels.instance}}: is low on entropy bits.'\n          - alert: node_hwmon_high_cpu_temp\n            expr: node_hwmon_temp_crit_celsius*0.9 - node_hwmon_temp_celsius < 0 OR node_hwmon_temp_max_celsius*0.95 - node_hwmon_temp_celsius < 0\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: '{{$labels.alias}} reports hwmon sensor {{$labels.sensor}}/{{$labels.chip}} temperature value is nearly critical: {{$value}}'\n              summary: '{{$labels.alias}}: Sensor {{$labels.sensor}}/{{$labels.chip}} temp is high: {{$value}}'\n          - alert: node_vmstat_paging_rate_high\n            expr: irate(node_vmstat_pgpgin[5m]) > 80\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: '{{$labels.alias}} has a memory paging rate of change higher than 80%: {{$value}}'\n              summary: '{{$labels.alias}}: memory paging rate is high: {{$value}}'\n          - alert: node_xfs_block_allocation_high\n            expr: 100*(node_xfs_extent_allocation_blocks_allocated_total{job=\"node-exporter\", instance=~\"172.17.0.1.*\"} / (node_xfs_extent_allocation_blocks_freed_total{job=\"node-exporter\", instance=~\"172.17.0.1.*\"} + node_xfs_extent_allocation_blocks_allocated_total{job=\"node-exporter\", instance=~\"172.17.0.1.*\"})) > 80\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: '{{$labels.alias}} has xfs allocation blocks higher than 80%: {{$value}}'\n              summary: '{{$labels.alias}}: xfs block allocation high: {{$value}}'\n          - alert: node_network_bond_slaves_down\n            expr: node_net_bonding_slaves - node_net_bonding_slaves_active > 0\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: '{{ $labels.master }} is missing {{ $value }} slave interface(s).'\n              summary: 'Instance {{ $labels.instance }}: {{ $labels.master }} missing {{ $value }} slave interface(s)'\n          - alert: node_numa_memory_used\n            expr: 100*node_memory_numa_MemUsed / node_memory_numa_MemTotal > 80\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: '{{$labels.alias}} has more than 80% NUMA memory usage: {{ $value }}'\n              summary: '{{$labels.alias}}: has high NUMA memory usage: {{$value}}'\n          - alert: node_ntp_clock_skew_high\n            expr: abs(node_ntp_drift_seconds) > 2\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: '{{$labels.alias}} has time difference of more than 2 seconds compared to NTP server: {{ $value }}'\n              summary: '{{$labels.alias}}: time is skewed by : {{$value}} seconds'\n          - alert: node_disk_read_latency\n            expr: (rate(node_disk_read_time_ms[5m]) / rate(node_disk_reads_completed[5m])) > 40\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: '{{$labels.device}} has a high read latency of {{ $value }}'\n              summary: 'High read latency observed for device {{ $labels.device }}'\n          - alert: node_disk_write_latency\n            expr: (rate(node_disk_write_time_ms[5m]) / rate(node_disk_writes_completed[5m])) > 40\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: '{{$labels.device}} has a high write latency of {{ $value }}'\n              summary: 'High write latency observed for device {{ $labels.device }}'\n...\n"
  },
  {
    "path": "values_overrides/prometheus/openstack.yaml",
    "content": "---\nconf:\n  prometheus:\n    rules:\n      openstack:\n        groups:\n        - name: mariadb.rules\n          rules:\n          - alert: prom_exporter_mariadb_openstack_unavailable\n            expr: avg_over_time(up{job=\"mysql-exporter\",kubernetes_namespace=\"openstack\"}[5m]) == 0\n            for: 5m\n            labels:\n              severity: warning\n            annotations:\n              description: MariaDB exporter in  {{ $labels.kubernetes_namespace }} is not collecting metrics or is not available for past 10 minutes\n              title: MariaDB exporter is not collecting metrics or is not available\n          - alert: prom_exporter_mariadb_osh_infra_unavailable\n            expr: avg_over_time(up{job=\"mysql-exporter\",kubernetes_namespace=\"osh-infra\"}[5m]) == 0\n            for: 5m\n            labels:\n              severity: warning\n            annotations:\n              description: MariaDB exporter in  {{ $labels.kubernetes_namespace }} is not collecting metrics or is not available for past 10 minutes\n              title: MariaDB exporter is not collecting metrics or is not available\n          - alert: mariadb_table_lock_wait_high\n            expr: 100 * mysql_global_status_table_locks_waited/(mysql_global_status_table_locks_waited + mysql_global_status_table_locks_immediate) > 30\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: 'Mariadb has high table lock waits of {{ $value }} percentage'\n              summary: 'Mariadb table lock waits are high'\n          - alert: mariadb_node_not_ready\n            expr: mysql_global_status_wsrep_ready != 1\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: '{{$labels.job}} on {{$labels.instance}} is not ready.'\n              summary: 'Galera cluster node not ready'\n          - alert: mariadb_galera_node_out_of_sync\n            expr: mysql_global_status_wsrep_local_state != 4 AND mysql_global_variables_wsrep_desync == 0\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: '{{$labels.job}} on {{$labels.instance}} is not in sync ({{$value}} != 4)'\n              summary: 'Galera cluster node out of sync'\n          - alert: mariadb_innodb_replication_fallen_behind\n            expr: (mysql_global_variables_innodb_replication_delay > 30) AND on (instance) (predict_linear(mysql_global_variables_innodb_replication_delay[5m], 60*2) > 0)\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: 'The mysql innodb replication has fallen behind and is not recovering'\n              summary: 'MySQL innodb replication is lagging'\n        - name: openstack.rules\n          rules:\n          - alert: prom_exporter_openstack_unavailable\n            expr: avg_over_time(up{job=\"openstack-metrics\"}[5m]) == 0\n            for: 5m\n            labels:\n              severity: warning\n            annotations:\n              description: Openstack exporter is not collecting metrics or is not available for past 10 minutes\n              title: Openstack exporter is not collecting metrics or is not available\n          - alert: os_glance_api_availability\n            expr: openstack_check_glance_api != 1\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'Glance API is not available at {{$labels.url}} for more than 5 minutes'\n              summary: 'Glance API is not available at {{$labels.url}}'\n          - alert: os_nova_api_availability\n            expr: openstack_check_nova_api != 1\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'Nova API is not available at {{$labels.url}} for more than 5 minutes'\n              summary: 'Nova API is not available at {{$labels.url}}'\n          - alert: os_keystone_api_availability\n            expr: openstack_check_keystone_api != 1\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'Keystone API is not available at {{$labels.url}} for more than 5 minutes'\n              summary: 'Keystone API is not available at {{$labels.url}}'\n          - alert: os_neutron_api_availability\n            expr: openstack_check_neutron_api != 1\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'Neutron API is not available at {{$labels.url}} for more than 5 minutes'\n              summary: 'Neutron API is not available at {{$labels.url}}'\n          - alert: os_neutron_metadata_agent_availability\n            expr: openstack_services_neutron_metadata_agent_down_total > 0\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'One or more neutron metadata_agents are not available for more than 5 minutes'\n              summary: 'One or more neutron metadata_agents are not available'\n          - alert: os_neutron_openvswitch_agent_availability\n            expr: openstack_services_neutron_openvswitch_agent_down_total > 0\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'One or more neutron openvswitch agents are not available for more than 5 minutes'\n              summary: 'One or more neutron openvswitch agents are not available'\n          - alert: os_neutron_dhcp_agent_availability\n            expr: openstack_services_neutron_dhcp_agent_down_total > 0\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'One or more neutron dhcp agents are not available for more than 5 minutes'\n              summary: 'One or more neutron dhcp agents are not available'\n          - alert: os_neutron_l3_agent_availability\n            expr: openstack_services_neutron_l3_agent_down_total > 0\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'One or more neutron L3 agents are not available for more than 5 minutes'\n              summary: 'One or more neutron L3 agents are not available'\n          - alert: os_swift_api_availability\n            expr: openstack_check_swift_api != 1\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'Swift API is not available at {{$labels.url}} for more than 5 minutes'\n              summary: 'Swift API is not available at {{$labels.url}}'\n          - alert: os_cinder_api_availability\n            expr: openstack_check_cinder_api != 1\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'Cinder API is not available at {{$labels.url}} for more than 5 minutes'\n              summary: 'Cinder API is not available at {{$labels.url}}'\n          - alert: os_cinder_scheduler_availability\n            expr: openstack_services_cinder_cinder_scheduler != 1\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'Cinder scheduler is not available for more than 5 minutes'\n              summary: 'Cinder scheduler is not available'\n          - alert: os_heat_api_availability\n            expr: openstack_check_heat_api != 1\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'Heat API is not available at {{$labels.url}} for more than 5 minutes'\n              summary: 'Heat API is not available at {{$labels.url}}'\n          - alert: os_nova_compute_disabled\n            expr: openstack_services_nova_compute_disabled_total > 0\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'nova-compute is disabled on certain hosts for more than 5 minutes'\n              summary: 'Openstack compute service nova-compute is disabled on some hosts'\n          - alert: os_nova_conductor_disabled\n            expr: openstack_services_nova_conductor_disabled_total > 0\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'nova-conductor is disabled on certain hosts for more than 5 minutes'\n              summary: 'Openstack compute service nova-conductor is disabled on some hosts'\n          - alert: os_nova_consoleauth_disabled\n            expr: openstack_services_nova_consoleauth_disabled_total > 0\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'nova-consoleauth is disabled on certain hosts for more than 5 minutes'\n              summary: 'Openstack compute service nova-consoleauth is disabled on some hosts'\n          - alert: os_nova_scheduler_disabled\n            expr: openstack_services_nova_scheduler_disabled_total > 0\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'nova-scheduler is disabled on certain hosts for more than 5 minutes'\n              summary: 'Openstack compute service nova-scheduler is disabled on some hosts'\n          - alert: os_nova_compute_down\n            expr: openstack_services_nova_compute_down_total > 0\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'nova-compute is down on certain hosts for more than 5 minutes'\n              summary: 'Openstack compute service nova-compute is down on some hosts'\n          - alert: os_nova_conductor_down\n            expr: openstack_services_nova_conductor_down_total > 0\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'nova-conductor is down on certain hosts for more than 5 minutes'\n              summary: 'Openstack compute service nova-conductor is down on some hosts'\n          - alert: os_nova_consoleauth_down\n            expr: openstack_services_nova_consoleauth_down_total > 0\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'nova-consoleauth is down on certain hosts for more than 5 minutes'\n              summary: 'Openstack compute service nova-consoleauth is down on some hosts'\n          - alert: os_nova_scheduler_down\n            expr: openstack_services_nova_scheduler_down_total > 0\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'nova-scheduler is down on certain hosts for more than 5 minutes'\n              summary: 'Openstack compute service nova-scheduler is down on some hosts'\n          - alert: os_vm_vcpu_usage_high\n            expr: openstack_total_used_vcpus * 100/(openstack_total_used_vcpus + openstack_total_free_vcpus) > 80\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'Openstack VM vcpu usage is hight at {{$value}} percent'\n              summary: 'Openstack VM vcpu usage is high'\n          - alert: os_vm_ram_usage_high\n            expr: openstack_total_used_ram_MB * 100/(openstack_total_used_ram_MB + openstack_total_free_ram_MB) > 80\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'Openstack VM RAM usage is hight at {{$value}} percent'\n              summary: 'Openstack VM RAM usage is high'\n          - alert: os_vm_disk_usage_high\n            expr: openstack_total_used_disk_GB * 100/ ( openstack_total_used_disk_GB + openstack_total_free_disk_GB ) > 80\n            for: 5m\n            labels:\n              severity: page\n            annotations:\n              description: 'Openstack VM Disk usage is hight at {{$value}} percent'\n              summary: 'Openstack VM Disk usage is high'\n        - name: rabbitmq.rules\n          rules:\n          - alert: rabbitmq_network_pratitions_detected\n            expr: min(partitions) by(instance) > 0\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: 'RabbitMQ at {{ $labels.instance }} has {{ $value }} partitions'\n              summary: 'RabbitMQ Network partitions detected'\n          - alert: rabbitmq_down\n            expr: min(rabbitmq_up) by(instance) != 1\n            for: 10m\n            labels:\n              severity: page\n            annotations:\n              description: 'RabbitMQ Server instance {{ $labels.instance }} is down'\n              summary: 'The RabbitMQ Server instance at {{ $labels.instance }} has been down the last 10 mins'\n          - alert: rabbitmq_file_descriptor_usage_high\n            expr: fd_used * 100 /fd_total > 80\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: 'RabbitMQ Server instance {{ $labels.instance }} has high file descriptor usage of {{ $value }} percent.'\n              summary: 'RabbitMQ file descriptors usage is high for last 10 mins'\n          - alert: rabbitmq_node_disk_free_alarm\n            expr: node_disk_free_alarm > 0\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: 'RabbitMQ Server instance {{ $labels.instance }} has low disk free space available.'\n              summary: 'RabbitMQ disk space usage is high'\n          - alert: rabbitmq_node_memory_alarm\n            expr: node_mem_alarm > 0\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: 'RabbitMQ Server instance {{ $labels.instance }} has low free memory.'\n              summary: 'RabbitMQ memory usage is high'\n          - alert: rabbitmq_less_than_3_nodes\n            expr: running < 3\n            for: 10m\n            labels:\n              severity: warning\n            annotations:\n              description: 'RabbitMQ Server has less than 3 nodes running.'\n              summary: 'RabbitMQ server is at risk of loosing data'\n          - alert: rabbitmq_queue_messages_returned_high\n            expr: queue_messages_returned_total/queue_messages_published_total * 100 > 50\n            for: 5m\n            labels:\n              severity: warning\n            annotations:\n              description: 'RabbitMQ Server is returing more than 50 percent of messages received.'\n              summary: 'RabbitMQ server is returning more than 50 percent of messages received.'\n          - alert: rabbitmq_consumers_low_utilization\n            expr: queue_consumer_utilisation < .4\n            for: 5m\n            labels:\n              severity: warning\n            annotations:\n              description: 'RabbitMQ consumers message consumption speed is low'\n              summary: 'RabbitMQ consumers message consumption speed is low'\n          - alert: rabbitmq_high_message_load\n            expr: queue_messages_total > 17000 or increase(queue_messages_total[5m]) > 4000\n            for: 5m\n            labels:\n              severity: warning\n            annotations:\n              description: 'RabbitMQ has high message load. Total Queue depth > 17000 or growth more than 4000 messages.'\n              summary: 'RabbitMQ has high message load'\n...\n"
  },
  {
    "path": "values_overrides/prometheus/postgresql.yaml",
    "content": "---\nconf:\n  prometheus:\n    rules:\n      postgresql:\n        groups:\n        - name: postgresql.rules\n          rules:\n          - alert: prom_exporter_postgresql_unavailable\n            expr: avg_over_time(up{job=\"postgresql-exporter\"}[5m]) == 0\n            for: 5m\n            labels:\n              severity: warning\n            annotations:\n              description: postgresql exporter is not collecting metrics or is not available for past 10 minutes\n              title: postgresql exporter is not collecting metrics or is not available\n          - alert: pg_replication_fallen_behind\n            expr: (pg_replication_lag > 120) and ON(instance) (pg_replication_is_replica ==  1)\n            for: 5m\n            labels:\n              severity: warning\n            annotations:\n              description: Replication lag on server {{$labels.instance}} is currently {{$value | humanizeDuration }}\n              title: Postgres Replication lag is over 2 minutes\n          - alert: pg_connections_too_high\n            expr: sum(pg_stat_activity_count) BY (environment, fqdn) > ON(fqdn) pg_settings_max_connections * 0.95\n            for: 5m\n            labels:\n              severity: warn\n              channel: database\n            annotations:\n              title: Postgresql has {{$value}} connections on {{$labels.fqdn}} which is close to the maximum\n          - alert: pg_deadlocks_detected\n            expr: sum by(datname) (rate(pg_stat_database_deadlocks[1m])) > 0\n            for: 5m\n            labels:\n              severity: warn\n            annotations:\n              description: postgresql at {{$labels.instance}} is showing {{$value}} rate of deadlocks for database {{$labels.datname}}\n              title: Postgres server is experiencing deadlocks\n...\n"
  },
  {
    "path": "values_overrides/prometheus/tls.yaml",
    "content": "---\nendpoints:\n  monitoring:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: prometheus-tls-api\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\n    scheme:\n      default: \"https\"\n    port:\n      http:\n        default: 443\nnetwork:\n  prometheus:\n    ingress:\n      annotations:\n        nginx.ingress.kubernetes.io/backend-protocol: https\nconf:\n  httpd: |\n    ServerRoot \"/usr/local/apache2\"\n    Listen 443\n    LoadModule mpm_event_module modules/mod_mpm_event.so\n    LoadModule authn_file_module modules/mod_authn_file.so\n    LoadModule authn_core_module modules/mod_authn_core.so\n    LoadModule authz_host_module modules/mod_authz_host.so\n    LoadModule authz_groupfile_module modules/mod_authz_groupfile.so\n    LoadModule authz_user_module modules/mod_authz_user.so\n    LoadModule authz_core_module modules/mod_authz_core.so\n    LoadModule access_compat_module modules/mod_access_compat.so\n    LoadModule auth_basic_module modules/mod_auth_basic.so\n    LoadModule ldap_module modules/mod_ldap.so\n    LoadModule authnz_ldap_module modules/mod_authnz_ldap.so\n    LoadModule reqtimeout_module modules/mod_reqtimeout.so\n    LoadModule filter_module modules/mod_filter.so\n    LoadModule proxy_html_module modules/mod_proxy_html.so\n    LoadModule log_config_module modules/mod_log_config.so\n    LoadModule env_module modules/mod_env.so\n    LoadModule headers_module modules/mod_headers.so\n    LoadModule setenvif_module modules/mod_setenvif.so\n    LoadModule version_module modules/mod_version.so\n    LoadModule proxy_module modules/mod_proxy.so\n    LoadModule proxy_connect_module modules/mod_proxy_connect.so\n    LoadModule proxy_http_module modules/mod_proxy_http.so\n    LoadModule proxy_balancer_module modules/mod_proxy_balancer.so\n    LoadModule slotmem_shm_module modules/mod_slotmem_shm.so\n    LoadModule slotmem_plain_module modules/mod_slotmem_plain.so\n    LoadModule unixd_module modules/mod_unixd.so\n    LoadModule status_module modules/mod_status.so\n    LoadModule autoindex_module modules/mod_autoindex.so\n    LoadModule ssl_module modules/mod_ssl.so\n\n    <IfModule unixd_module>\n    User daemon\n    Group daemon\n    </IfModule>\n\n    <Directory />\n        AllowOverride none\n        Require all denied\n    </Directory>\n\n    <Files \".ht*\">\n        Require all denied\n    </Files>\n\n    ErrorLog /dev/stderr\n\n    LogLevel warn\n\n    <IfModule log_config_module>\n        LogFormat \"%a %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" combined\n        LogFormat \"%{X-Forwarded-For}i %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" proxy\n        LogFormat \"%h %l %u %t \\\"%r\\\" %>s %b\" common\n\n        <IfModule logio_module>\n          LogFormat \"%a %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\" %I %O\" combinedio\n        </IfModule>\n\n        SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded\n        CustomLog /dev/stdout common\n        CustomLog /dev/stdout combined\n        CustomLog /dev/stdout proxy env=forwarded\n    </IfModule>\n\n    <Directory \"/usr/local/apache2/cgi-bin\">\n        AllowOverride None\n        Options None\n        Require all granted\n    </Directory>\n\n    <IfModule headers_module>\n        RequestHeader unset Proxy early\n    </IfModule>\n\n    <IfModule proxy_html_module>\n    Include conf/extra/proxy-html.conf\n    </IfModule>\n\n    <VirtualHost *:443>\n      # Expose metrics to all users, as this is not sensitive information and\n      # circumvents the inability of Prometheus to interpolate environment vars\n      # in its configuration file\n      <Location /metrics>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/metrics\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/metrics\n          Satisfy Any\n          Allow from all\n      </Location>\n      # Expose the /federate endpoint to all users, as this is also not\n      # sensitive information and circumvents the inability of Prometheus to\n      # interpolate environment vars in its configuration file\n      <Location /federate>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/metrics\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/metrics\n          Satisfy Any\n          Allow from all\n      </Location>\n      # Restrict general user (LDAP) access to the /graph endpoint, as general trusted\n      # users should only be able to query Prometheus for metrics and not have access\n      # to information like targets, configuration, flags or build info for Prometheus\n      <Location />\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file ldap\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          AuthLDAPBindDN {{ .Values.endpoints.ldap.auth.admin.bind }}\n          AuthLDAPBindPassword {{ .Values.endpoints.ldap.auth.admin.password }}\n          AuthLDAPURL {{ tuple \"ldap\" \"default\" \"ldap\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | quote }}\n          Require valid-user\n      </Location>\n      <Location /graph>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/graph\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/graph\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file ldap\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          AuthLDAPBindDN {{ .Values.endpoints.ldap.auth.admin.bind }}\n          AuthLDAPBindPassword {{ .Values.endpoints.ldap.auth.admin.password }}\n          AuthLDAPURL {{ tuple \"ldap\" \"default\" \"ldap\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | quote }}\n          Require valid-user\n      </Location>\n      # Restrict access to the /config (dashboard) and /api/v1/status/config (http) endpoints\n      # to the admin user\n      <Location /config>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/config\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/config\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          Require valid-user\n      </Location>\n      <Location /api/v1/status/config>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/status/config\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/status/config\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          Require valid-user\n      </Location>\n      # Restrict access to the /flags (dashboard) and /api/v1/status/flags (http) endpoints\n      # to the admin user\n      <Location /flags>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/flags\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/flags\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          Require valid-user\n      </Location>\n      <Location /api/v1/status/flags>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/status/flags\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/status/flags\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          Require valid-user\n      </Location>\n      # Restrict access to the /status (dashboard) endpoint to the admin user\n      <Location /status>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/status\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/status\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          Require valid-user\n      </Location>\n      # Restrict access to the /rules (dashboard) endpoint to the admin user\n      <Location /rules>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/rules\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/rules\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          Require valid-user\n      </Location>\n      # Restrict access to the /targets (dashboard) and /api/v1/targets (http) endpoints\n      # to the admin user\n      <Location /targets>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/targets\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/targets\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          Require valid-user\n      </Location>\n      <Location /api/v1/targets>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/targets\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/targets\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          Require valid-user\n      </Location>\n      # Restrict access to the /api/v1/admin/tsdb/ endpoints (http) to the admin user.\n      # These endpoints are disabled by default, but are included here to ensure only\n      # an admin user has access to these endpoints when enabled\n      <Location /api/v1/admin/tsdb/>\n          ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/admin/tsdb/\n          ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/admin/tsdb/\n          AuthName \"Prometheus\"\n          AuthType Basic\n          AuthBasicProvider file\n          AuthUserFile /usr/local/apache2/conf/.htpasswd\n          Require valid-user\n      </Location>\n      SSLEngine On\n      SSLProxyEngine on\n      SSLCertificateFile      /etc/prometheus/certs/tls.crt\n      SSLCertificateKeyFile   /etc/prometheus/certs/tls.key\n      SSLProtocol             all -SSLv3 -TLSv1 -TLSv1.1\n      SSLCipherSuite          ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256\n      SSLHonorCipherOrder     on\n    </VirtualHost>\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/prometheus-alertmanager/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    server:\n      container:\n        prometheus_alertmanager:\n          appArmorProfile:\n            type: RuntimeDefault\n        prometheus_alertmanager_perms:\n          appArmorProfile:\n            type: RuntimeDefault\n        apache_proxy:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/prometheus-alertmanager/gateway.yaml",
    "content": "# Gateway API overrides for Prometheus Alertmanager.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  alertmanager:\n    host_fqdn_override:\n      public:\n        host: alertmanager.openstack-helm.org\n\nmanifests:\n  ingress: false\n  service_ingress: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: alertmanager-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.alertmanager.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: alerts-engine\n              port: 9093\n...\n"
  },
  {
    "path": "values_overrides/prometheus-blackbox-exporter/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    prometheus_blackbox_exporter:\n      container:\n        blackbox_exporter:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/prometheus-kube-state-metrics/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    exporter:\n      container:\n        kube_state_metrics:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/prometheus-mysql-exporter/2024.2-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/heat:2024.2-ubuntu_jammy\n    ks_user: quay.io/airshipit/heat:2024.2-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/prometheus-mysql-exporter/2025.1-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/prometheus-mysql-exporter/2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/prometheus-mysql-exporter/2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/prometheus-mysql-exporter/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    prometheus_mysql_exporter:\n      container:\n        exporter:\n          appArmorProfile:\n            type: RuntimeDefault\n    prometheus_create_mysql_user:\n      container:\n        main:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/prometheus-mysql-exporter/loci-2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/prometheus-mysql-exporter/loci-2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_mysql_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/prometheus-mysql-exporter/prometheus.yaml",
    "content": "---\nmonitoring:\n  prometheus:\n    enabled: true\nmanifests:\n  monitoring:\n    prometheus:\n      configmap_bin: true\n      deployment_exporter: true\n      job_user_create: true\n      secret_etc: true\n      service_exporter: true\n      network_policy_exporter: true\n...\n"
  },
  {
    "path": "values_overrides/prometheus-mysql-exporter/tls.yaml",
    "content": "---\nendpoints:\n  oslo_db:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: mariadb-tls-direct\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/prometheus-node-exporter/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    metrics:\n      container:\n        node_exporter:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/prometheus-openstack-exporter/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    exporter:\n      container:\n        openstack_metrics_exporter:\n          appArmorProfile:\n            type: RuntimeDefault\n    ks_user:\n      container:\n        prometheus_openstack_exporter_ks_user:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/prometheus-openstack-exporter/netpol.yaml",
    "content": "---\nmanifests:\n  network_policy: true\n...\n"
  },
  {
    "path": "values_overrides/prometheus-openstack-exporter/tls.yaml",
    "content": "---\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/prometheus-process-exporter/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    metrics:\n      container:\n        process_exporter:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/rabbitmq/2024.2-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_rabbitmq_exporter_helm_tests: quay.io/airshipit/heat:2024.2-ubuntu_jammy\n    rabbitmq_init: quay.io/airshipit/heat:2024.2-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/rabbitmq/2025.1-ubuntu_jammy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_rabbitmq_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    rabbitmq_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/rabbitmq/2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_rabbitmq_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    rabbitmq_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/rabbitmq/2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_rabbitmq_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    rabbitmq_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/rabbitmq/apparmor.yaml",
    "content": "---\npod:\n  security_context:\n    cluster_wait:\n      container:\n        rabbitmq_cluster_wait:\n          appArmorProfile:\n            type: RuntimeDefault\n        rabbitmq_cookie:\n          appArmorProfile:\n            type: RuntimeDefault\n    server:\n      container:\n        rabbitmq:\n          appArmorProfile:\n            type: RuntimeDefault\n        rabbitmq_perms:\n          appArmorProfile:\n            type: RuntimeDefault\n        rabbitmq_cookie:\n          appArmorProfile:\n            type: RuntimeDefault\n        rabbitmq_password:\n          appArmorProfile:\n            type: RuntimeDefault\n    exporter:\n      container:\n        rabbitmq_exporter:\n          appArmorProfile:\n            type: RuntimeDefault\n    test:\n      container:\n        rabbitmq_test:\n          appArmorProfile:\n            type: RuntimeDefault\n    kubernetes_entrypoint:\n      container:\n        kubernetes_entrypoint:\n          appArmorProfile:\n            type: RuntimeDefault\n...\n"
  },
  {
    "path": "values_overrides/rabbitmq/builtin-metrics.yaml",
    "content": "---\n# This enable Rabbitmq built-in prometheus plugin\nconf:\n  enabled_plugins:\n    - rabbitmq_management\n    - rabbitmq_peer_discovery_k8s\n    - rabbitmq_prometheus\n\nmanifests:\n  monitoring:\n    prometheus:\n      configmap_bin: false\n      deployment_exporter: false\n      service_exporter: false\n      network_policy_exporter: false\n...\n"
  },
  {
    "path": "values_overrides/rabbitmq/gateway.yaml",
    "content": "# Gateway API overrides for RabbitMQ.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  oslo_messaging:\n    host_fqdn_override:\n      public:\n        host: rabbitmq.openstack-helm.org\n\nmanifests:\n  ingress_management: false\n  service_ingress_management: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: rabbitmq-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.oslo_messaging.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: rabbitmq\n              port: 15672\n...\n"
  },
  {
    "path": "values_overrides/rabbitmq/loci-2025.1-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_rabbitmq_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    rabbitmq_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/rabbitmq/loci-2025.2-ubuntu_noble.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nimages:\n  tags:\n    prometheus_rabbitmq_exporter_helm_tests: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    rabbitmq_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/rabbitmq/netpol.yaml",
    "content": "---\nnetwork_policy:\n  rabbitmq:\n    ingress:\n      - from:\n        - podSelector:\n            matchLabels:\n              application: keystone\n        - podSelector:\n            matchLabels:\n              application: heat\n        - podSelector:\n            matchLabels:\n              application: glance\n        - podSelector:\n            matchLabels:\n              application: cinder\n        - podSelector:\n            matchLabels:\n              application: aodh\n        - podSelector:\n            matchLabels:\n              application: barbican\n        - podSelector:\n            matchLabels:\n              application: ceilometer\n        - podSelector:\n            matchLabels:\n              application: designate\n        - podSelector:\n            matchLabels:\n              application: ironic\n        - podSelector:\n            matchLabels:\n              application: magnum\n        - podSelector:\n            matchLabels:\n              application: mistral\n        - podSelector:\n            matchLabels:\n              application: nova\n        - podSelector:\n            matchLabels:\n              application: neutron\n        - podSelector:\n            matchLabels:\n              application: senlin\n        - podSelector:\n            matchLabels:\n              application: placement\n        - podSelector:\n            matchLabels:\n              application: rabbitmq\n        - podSelector:\n            matchLabels:\n              application: prometheus_rabbitmq_exporter\n        ports:\n          # AMQP port\n          - protocol: TCP\n            port: 5672\n          # HTTP API ports\n          - protocol: TCP\n            port: 15672\n          - protocol: TCP\n            port: 80\n      - from:\n        - podSelector:\n            matchLabels:\n              application: rabbitmq\n        ports:\n          # Clustering port AMQP + 20000\n          - protocol: TCP\n            port: 25672\n          # Erlang Port Mapper Daemon (epmd)\n          - protocol: TCP\n            port: 4369\n    egress:\n      - to:\n        - podSelector:\n            matchLabels:\n              application: rabbitmq\n        ports:\n          # Erlang port mapper daemon (epmd)\n          - protocol: TCP\n            port: 4369\n          # Rabbit clustering port AMQP + 20000\n          - protocol: TCP\n            port: 25672\n          # NOTE(lamt): Set by inet_dist_listen_{min/max}. Firewalls must\n          # permit traffic in this range to pass between clustered nodes.\n          # - protocol: TCP\n          #  port: 35197\n      - to:\n        - ipBlock:\n            cidr: %%%REPLACE_API_ADDR%%%/32\n        ports:\n          - protocol: TCP\n            port: %%%REPLACE_API_PORT%%%\n\nmanifests:\n  monitoring:\n    prometheus:\n      network_policy_exporter: true\n  network_policy: true\n...\n"
  },
  {
    "path": "values_overrides/rabbitmq/rabbitmq-exporter.yaml",
    "content": "---\n# This enable external pod for rabbitmq-exporter\nmanifests:\n  monitoring:\n    prometheus:\n      configmap_bin: true\n      deployment_exporter: true\n      service_exporter: true\n      network_policy_exporter: false\n...\n"
  },
  {
    "path": "values_overrides/rabbitmq/tls.yaml",
    "content": "---\nconf:\n  rabbitmq:\n    ssl_options:\n      cacertfile: \"/etc/rabbitmq/certs/ca.crt\"\n      certfile: \"/etc/rabbitmq/certs/tls.crt\"\n      keyfile: \"/etc/rabbitmq/certs/tls.key\"\n      verify: verify_peer\n      fail_if_no_peer_cert: false\n    management:\n      ssl:\n        cacertfile: \"/etc/rabbitmq/certs/ca.crt\"\n        certfile: \"/etc/rabbitmq/certs/tls.crt\"\n        keyfile: \"/etc/rabbitmq/certs/tls.key\"\nendpoints:\n  oslo_messaging:\n    host_fqdn_override:\n      default:\n        tls:\n          secretName: rabbitmq-tls-direct\n          issuerRef:\n            name: ca-issuer\n            kind: ClusterIssuer\n    port:\n      https:\n        default: 15680\n        public: 443\nmanifests:\n  certificates: true\n...\n"
  },
  {
    "path": "values_overrides/rally/annotations.yaml",
    "content": "---\nannotations:\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      rally:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/rally/mariadb-operator.yaml",
    "content": "---\nconf:\n  rally:\n    database:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  rally_api:\n    - rally-db-conn\n  rally_db_sync:\n    - rally-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: rally\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: rally\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: rally-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: rally-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"rally\"\n      table: \"*\"\n      username: rally\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: rally-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: rally\n      passwordSecretKeyRef:\n        name: rally-db-password\n        key: password\n      database: rally\n      secretName: rally-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/rally/tls-offloading.yaml",
    "content": "---\nendpoints:\n  identity:\n    auth:\n      admin:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      rally:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n      test:\n        cacert: /etc/ssl/certs/openstack-helm.crt\n\ntls:\n  identity: true\n...\n"
  },
  {
    "path": "values_overrides/skyline/2025.1-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    skyline_db_sync: quay.io/airshipit/skyline:2025.1-ubuntu_jammy\n    skyline: quay.io/airshipit/skyline:2025.1-ubuntu_jammy\n    skyline_nginx: quay.io/airshipit/skyline:2025.1-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    dep_check: 'quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy'\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/skyline/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    skyline_db_sync: quay.io/airshipit/skyline:2025.2-ubuntu_noble\n    skyline: quay.io/airshipit/skyline:2025.2-ubuntu_noble\n    skyline_nginx: quay.io/airshipit/skyline:2025.2-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    dep_check: 'quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy'\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/skyline/gateway.yaml",
    "content": "# Gateway API overrides for Skyline.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  skyline:\n    host_fqdn_override:\n      public:\n        host: skyline.openstack-helm.org\n\nmanifests:\n  ingress: false\n  service_ingress: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: skyline-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.skyline.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: skyline-api\n              port: 9999\n...\n"
  },
  {
    "path": "values_overrides/skyline/loci-2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    skyline_db_sync: quay.io/airshipit/skyline:2025.2-ubuntu_noble_loci\n    skyline: quay.io/airshipit/skyline:2025.2-ubuntu_noble_loci\n    skyline_nginx: quay.io/airshipit/skyline:2025.2-ubuntu_noble_loci\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    dep_check: 'quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy'\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/swift/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/swift:2025.2-ubuntu_noble\n    test: quay.io/airshipit/swift:2025.2-ubuntu_noble\n    swift_proxy: quay.io/airshipit/swift:2025.2-ubuntu_noble\n    swift_account: quay.io/airshipit/swift:2025.2-ubuntu_noble\n    swift_container: quay.io/airshipit/swift:2025.2-ubuntu_noble\n    swift_object: quay.io/airshipit/swift:2025.2-ubuntu_noble\n    swift_storage: quay.io/airshipit/swift:2025.2-ubuntu_noble\n    swift_storage_init: quay.io/airshipit/swift:2025.2-ubuntu_noble\n    swift_ring_builder: quay.io/airshipit/swift:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/swift/gateway.yaml",
    "content": "# Gateway API overrides for Swift.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  object_store:\n    host_fqdn_override:\n      public:\n        host: swift.openstack-helm.org\n\nmanifests:\n  ingress_proxy: false\n  service_ingress_proxy: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: swift-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.object_store.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: swift-proxy\n              port: 8080\n...\n"
  },
  {
    "path": "values_overrides/tacker/2024.2-ubuntu_jammy.yaml",
    "content": "# Default values for tacker.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nimages:\n  tags:\n    tacker_server: quay.io/airshipit/tacker:2024.2-ubuntu_jammy\n    tacker_conductor: quay.io/airshipit/tacker:2024.2-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    db_drop: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    tacker_db_sync: quay.io/airshipit/tacker:2024.2-ubuntu_jammy\n    ks_endpoints: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    ks_service: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    ks_user: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/tacker/2025.1-ubuntu_jammy.yaml",
    "content": "# Default values for tacker.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n---\nimages:\n  tags:\n    tacker_server: quay.io/airshipit/tacker:2025.1-ubuntu_jammy\n    tacker_conductor: quay.io/airshipit/tacker:2025.1-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    tacker_db_sync: quay.io/airshipit/tacker:2025.1-ubuntu_jammy\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/tacker/2025.1-ubuntu_noble.yaml",
    "content": "# Default values for tacker.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n---\nimages:\n  tags:\n    tacker_server: quay.io/airshipit/tacker:2025.1-ubuntu_noble\n    tacker_conductor: quay.io/airshipit/tacker:2025.1-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    tacker_db_sync: quay.io/airshipit/tacker:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/tacker/2025.2-ubuntu_noble.yaml",
    "content": "# Default values for tacker.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n---\nimages:\n  tags:\n    tacker_server: quay.io/airshipit/tacker:2025.2-ubuntu_noble\n    tacker_conductor: quay.io/airshipit/tacker:2025.2-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    tacker_db_sync: quay.io/airshipit/tacker:2025.2-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/tacker/annotations.yaml",
    "content": "---\nannotations:\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      tacker:\n        custom.tld/key: \"value\"\n    tls:\n      nfv_orchestration_api_public:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/tacker/gateway.yaml",
    "content": "# Gateway API overrides for Tacker.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  nfv_orchestration:\n    host_fqdn_override:\n      public:\n        host: tacker.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n  service_ingress_api: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: tacker-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.nfv_orchestration.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: tacker-api\n              port: 9890\n...\n"
  },
  {
    "path": "values_overrides/tacker/loci-2025.1-ubuntu_noble.yaml",
    "content": "# Default values for tacker.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n---\nimages:\n  tags:\n    tacker_server: quay.io/airshipit/tacker:2025.1-ubuntu_noble_loci\n    tacker_conductor: quay.io/airshipit/tacker:2025.1-ubuntu_noble_loci\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    tacker_db_sync: quay.io/airshipit/tacker:2025.1-ubuntu_noble_loci\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/tacker/loci-2025.2-ubuntu_noble.yaml",
    "content": "# Default values for tacker.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n---\nimages:\n  tags:\n    tacker_server: quay.io/airshipit/tacker:2025.2-ubuntu_noble_loci\n    tacker_conductor: quay.io/airshipit/tacker:2025.2-ubuntu_noble_loci\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    db_drop: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    tacker_db_sync: quay.io/airshipit/tacker:2025.2-ubuntu_noble_loci\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_service: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/tacker/mariadb-operator.yaml",
    "content": "---\nconf:\n  tacker:\n    database:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  tacker_api:\n    - tacker-db-conn\n  tacker_db_sync:\n    - tacker-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: tacker\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: tacker\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: tacker-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: tacker-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"tacker\"\n      table: \"*\"\n      username: tacker\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: tacker-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: tacker\n      passwordSecretKeyRef:\n        name: tacker-db-password\n        key: password\n      database: tacker\n      secretName: tacker-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/tempest/annotations.yaml",
    "content": "---\nannotations:\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      tempest:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/trove/2024.2-ubuntu_jammy.yaml",
    "content": "# Default values for tacker.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nimages:\n  tags:\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    db_init: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    trove_db_sync: quay.io/airshipit/trove:2024.2-ubuntu_jammy\n    db_drop: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ks_user: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    ks_service: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    ks_endpoints: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    trove_api: quay.io/airshipit/trove:2024.2-ubuntu_jammy\n    trove_conductor: quay.io/airshipit/trove:2024.2-ubuntu_jammy\n    trove_taskmanager: quay.io/airshipit/trove:2024.2-ubuntu_jammy\n    trove_db_purge: quay.io/airshipit/trove:2024.2-ubuntu_jammy\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: docker.io/docker:17.07.0\n...\n"
  },
  {
    "path": "values_overrides/trove/2025.1-ubuntu_jammy.yaml",
    "content": "# Default values for tacker.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nimages:\n  tags:\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    trove_db_sync: quay.io/airshipit/trove:2025.1-ubuntu_jammy\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    trove_api: quay.io/airshipit/trove:2025.1-ubuntu_jammy\n    trove_conductor: quay.io/airshipit/trove:2025.1-ubuntu_jammy\n    trove_taskmanager: quay.io/airshipit/trove:2025.1-ubuntu_jammy\n    trove_db_purge: quay.io/airshipit/trove:2025.1-ubuntu_jammy\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: docker.io/docker:17.07.0\n...\n"
  },
  {
    "path": "values_overrides/trove/2025.1-ubuntu_noble.yaml",
    "content": "# Default values for tacker.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nimages:\n  tags:\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    trove_db_sync: quay.io/airshipit/trove:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    trove_api: quay.io/airshipit/trove:2025.1-ubuntu_noble\n    trove_conductor: quay.io/airshipit/trove:2025.1-ubuntu_noble\n    trove_taskmanager: quay.io/airshipit/trove:2025.1-ubuntu_noble\n    trove_db_purge: quay.io/airshipit/trove:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: docker.io/docker:17.07.0\n...\n"
  },
  {
    "path": "values_overrides/trove/2025.2-ubuntu_noble.yaml",
    "content": "# Default values for tacker.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n---\nimages:\n  tags:\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    trove_db_sync: quay.io/airshipit/trove:2025.2-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    trove_api: quay.io/airshipit/trove:2025.2-ubuntu_noble\n    trove_conductor: quay.io/airshipit/trove:2025.2-ubuntu_noble\n    trove_taskmanager: quay.io/airshipit/trove:2025.2-ubuntu_noble\n    trove_db_purge: quay.io/airshipit/trove:2025.2-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: docker.io/docker:17.07.0\n...\n"
  },
  {
    "path": "values_overrides/trove/annotations.yaml",
    "content": "---\nannotations:\n  secret:\n    default:\n      custom.tld/key: \"value\"\n      custom.tld/key2: \"value2\"\n    identity:\n      admin:\n        another.tld/foo: \"bar\"\n    oci_image_registry:\n      trove:\n        custom.tld/key: \"value\"\n    tls:\n      nfv_orchestration_api_public:\n        custom.tld/key: \"value\"\n...\n"
  },
  {
    "path": "values_overrides/trove/gateway.yaml",
    "content": "# Gateway API overrides for Trove.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  database:\n    host_fqdn_override:\n      public:\n        host: trove.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n  service_ingress_api: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: trove-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.database.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: trove-api\n              port: 8779\n...\n"
  },
  {
    "path": "values_overrides/trove/mariadb-operator.yaml",
    "content": "---\nconf:\n  trove:\n    database:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  trove_api:\n    - trove-db-conn\n  trove_db_sync:\n    - trove-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: trove\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: trove\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: trove-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: trove-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"trove\"\n      table: \"*\"\n      username: trove\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: trove-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: trove\n      passwordSecretKeyRef:\n        name: trove-db-password\n        key: password\n      database: trove\n      secretName: trove-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/watcher/2024.2-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    db_drop: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    ks_user: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    ks_service: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    ks_endpoints: quay.io/airshipit/openstack-client:2024.2-ubuntu_jammy\n    watcher_api: quay.io/airshipit/watcher:2024.2-ubuntu_jammy\n    watcher_decision_engine: quay.io/airshipit/watcher:2024.2-ubuntu_jammy\n    watcher_applier: quay.io/airshipit/watcher:2024.2-ubuntu_jammy\n    watcher_db_sync: quay.io/airshipit/watcher:2024.2-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/watcher/2025.1-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    watcher_api: quay.io/airshipit/watcher:2025.1-ubuntu_jammy\n    watcher_decision_engine: quay.io/airshipit/watcher:2025.1-ubuntu_jammy\n    watcher_applier: quay.io/airshipit/watcher:2025.1-ubuntu_jammy\n    watcher_db_sync: quay.io/airshipit/watcher:2025.1-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/watcher/2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    watcher_api: quay.io/airshipit/watcher:2025.1-ubuntu_noble\n    watcher_decision_engine: quay.io/airshipit/watcher:2025.1-ubuntu_noble\n    watcher_applier: quay.io/airshipit/watcher:2025.1-ubuntu_noble\n    watcher_db_sync: quay.io/airshipit/watcher:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/watcher/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    watcher_api: quay.io/airshipit/watcher:2025.2-ubuntu_noble\n    watcher_decision_engine: quay.io/airshipit/watcher:2025.2-ubuntu_noble\n    watcher_applier: quay.io/airshipit/watcher:2025.2-ubuntu_noble\n    watcher_db_sync: quay.io/airshipit/watcher:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/watcher/gateway.yaml",
    "content": "# Gateway API overrides for Watcher.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  infra_optim:\n    host_fqdn_override:\n      public:\n        host: watcher.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n  service_ingress_api: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: watcher-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.infra_optim.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: watcher-api\n              port: 9322\n...\n"
  },
  {
    "path": "values_overrides/watcher/loci-2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    watcher_api: quay.io/airshipit/watcher:2025.1-ubuntu_noble_loci\n    watcher_decision_engine: quay.io/airshipit/watcher:2025.1-ubuntu_noble_loci\n    watcher_applier: quay.io/airshipit/watcher:2025.1-ubuntu_noble_loci\n    watcher_db_sync: quay.io/airshipit/watcher:2025.1-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/watcher/loci-2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    db_drop: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_service: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    watcher_api: quay.io/airshipit/watcher:2025.2-ubuntu_noble_loci\n    watcher_decision_engine: quay.io/airshipit/watcher:2025.2-ubuntu_noble_loci\n    watcher_applier: quay.io/airshipit/watcher:2025.2-ubuntu_noble_loci\n    watcher_db_sync: quay.io/airshipit/watcher:2025.2-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/watcher/mariadb-operator.yaml",
    "content": "---\nconf:\n  watcher:\n    database:\n      connection: null\n\nmanifests:\n  job_db_init: false\n\netcSources:\n  watcher_api:\n    - watcher-db-conn\n  watcher_db_sync:\n    - watcher-db-conn\n\nextraObjects:\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Database\n    metadata:\n      name: watcher\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      characterSet: utf8\n      collate: utf8_general_ci\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: User\n    metadata:\n      name: watcher\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      passwordSecretKeyRef:\n        name: watcher-db-password\n        key: password\n      # This field is immutable and defaults to 10, 0 means unlimited.\n      maxUserConnections: 0\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Grant\n    metadata:\n      name: watcher-grant\n      namespace: openstack\n    spec:\n      mariaDbRef:\n        name: mariadb  # name of the MariaDB kind\n        waitForIt: true\n      privileges:\n        - \"ALL\"\n      database: \"watcher\"\n      table: \"*\"\n      username: watcher\n      grantOption: false\n      host: \"%\"\n      retryInterval: 5s\n  - apiVersion: k8s.mariadb.com/v1alpha1\n    kind: Connection\n    metadata:\n      name: watcher-db-conn\n    spec:\n      mariaDbRef:\n        name: mariadb\n      username: watcher\n      passwordSecretKeyRef:\n        name: watcher-db-password\n        key: password\n      database: watcher\n      secretName: watcher-db-conn\n      secretTemplate:\n        key: db_conn.conf\n        format: |\n          [database]\n          connection = mysql+pymysql://{{ .Username }}:{{ .Password }}@{{ .Host }}:{{ .Port }}/{{ .Database }}{{ .Params }}\n      healthCheck:\n        interval: 30s\n        retryInterval: 3s\n      serviceName: mariadb\n...\n"
  },
  {
    "path": "values_overrides/zaqar/2025.1-ubuntu_jammy.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    zaqar_api: quay.io/airshipit/zaqar:2025.1-ubuntu_jammy\n    zaqar_db_sync: quay.io/airshipit/zaqar:2025.1-ubuntu_jammy\n...\n"
  },
  {
    "path": "values_overrides/zaqar/2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    zaqar_api: quay.io/airshipit/zaqar:2025.1-ubuntu_noble\n    zaqar_db_sync: quay.io/airshipit/zaqar:2025.1-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/zaqar/2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble\n    zaqar_api: quay.io/airshipit/zaqar:2025.2-ubuntu_noble\n    zaqar_db_sync: quay.io/airshipit/zaqar:2025.2-ubuntu_noble\n...\n"
  },
  {
    "path": "values_overrides/zaqar/gateway.yaml",
    "content": "# Gateway API overrides for Zaqar.\n#\n# Public endpoints use *.openstack-helm.org FQDNs which are resolved by\n# dnsmasq to the MetalLB Gateway VIP. In-cluster pods reach the Gateway\n# through this DNS path so no ExternalName services are needed.\n---\nendpoints:\n  messaging:\n    host_fqdn_override:\n      public:\n        host: zaqar.openstack-helm.org\n\nmanifests:\n  ingress_api: false\n  service_ingress_api: false\n\nextraObjects:\n  - apiVersion: gateway.networking.k8s.io/v1\n    kind: HTTPRoute\n    metadata:\n      name: zaqar-route\n      namespace: openstack\n    spec:\n      hostnames:\n        - \"{{ .Values.endpoints.messaging.host_fqdn_override.public.host }}\"\n      parentRefs:\n        - name: gateway-default\n          namespace: envoy-gateway-system\n      rules:\n        - matches:\n            - path:\n                type: PathPrefix\n                value: /\n          backendRefs:\n            - name: zaqar-api\n              port: 8888\n...\n"
  },
  {
    "path": "values_overrides/zaqar/loci-2025.1-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble_loci\n    zaqar_api: quay.io/airshipit/zaqar:2025.1-ubuntu_noble_loci\n    zaqar_db_sync: quay.io/airshipit/zaqar:2025.1-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "values_overrides/zaqar/loci-2025.2-ubuntu_noble.yaml",
    "content": "---\nimages:\n  tags:\n    bootstrap: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    db_init: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    db_drop: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_user: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_service: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.2-ubuntu_noble_loci\n    zaqar_api: quay.io/airshipit/zaqar:2025.2-ubuntu_noble_loci\n    zaqar_db_sync: quay.io/airshipit/zaqar:2025.2-ubuntu_noble_loci\n...\n"
  },
  {
    "path": "watcher/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack-Helm Watcher\nname: watcher\nversion: 2025.2.0\nhome: https://docs.openstack.org/watcher/latest/\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Watcher/OpenStack_Project_Watcher_vertical.png\nsources:\n  - https://opendev.org/openstack/watcher\n  - https://opendev.org/openstack/openstack-helm\nmaintainers:\n  - name: OpenStack-Helm Authors\n\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "watcher/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "watcher/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n\nwatcher-db-manage --config-file /etc/watcher/watcher.conf upgrade"
  },
  {
    "path": "watcher/templates/bin/_watcher-api.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec watcher-api \\\n        --config-file /etc/watcher/watcher.conf\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "watcher/templates/bin/_watcher-applier.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexec watcher-applier \\\n      --config-file /etc/watcher/watcher.conf\n"
  },
  {
    "path": "watcher/templates/bin/_watcher-decision-engine.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nexec watcher-decision-engine \\\n      --config-file /etc/watcher/watcher.conf\n"
  },
  {
    "path": "watcher/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n{{- $rallyTests := .Values.conf.rally_tests }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: watcher-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n{{- if .Values.bootstrap.enabled }}\n  bootstrap.sh: |\n{{ tuple \"bin/_bootstrap.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n{{- end }}\n  rally-test.sh: |\n{{ tuple $rallyTests | include \"helm-toolkit.scripts.rally_test\" | indent 4 }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  watcher-api.sh: |\n{{ tuple \"bin/_watcher-api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  watcher-decision-engine.sh: |\n{{ tuple \"bin/_watcher-decision-engine.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  watcher-applier.sh: |\n{{ tuple \"bin/_watcher-applier.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  rabbit-init.sh: |\n{{- include \"helm-toolkit.scripts.rabbit_init\" . | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n{{- if empty .Values.conf.watcher.keystone_authtoken.auth_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.watcher.keystone_authtoken \"auth_uri\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.watcher.keystone_authtoken.auth_url -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.watcher.keystone_authtoken \"auth_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.watcher.keystone_authtoken.region_name -}}\n{{- $_ := set .Values.conf.watcher.keystone_authtoken \"region_name\" .Values.endpoints.identity.auth.watcher.region_name -}}\n{{- end -}}\n{{- if empty .Values.conf.watcher.keystone_authtoken.project_name -}}\n{{- $_ := set .Values.conf.watcher.keystone_authtoken \"project_name\" .Values.endpoints.identity.auth.watcher.project_name -}}\n{{- end -}}\n{{- if empty .Values.conf.watcher.keystone_authtoken.project_domain_name -}}\n{{- $_ := set .Values.conf.watcher.keystone_authtoken \"project_domain_name\" .Values.endpoints.identity.auth.watcher.project_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.watcher.keystone_authtoken.user_domain_name -}}\n{{- $_ := set .Values.conf.watcher.keystone_authtoken \"user_domain_name\" .Values.endpoints.identity.auth.watcher.user_domain_name -}}\n{{- end -}}\n{{- if empty .Values.conf.watcher.keystone_authtoken.username -}}\n{{- $_ := set .Values.conf.watcher.keystone_authtoken \"username\" .Values.endpoints.identity.auth.watcher.username -}}\n{{- end -}}\n{{- if empty .Values.conf.watcher.keystone_authtoken.password -}}\n{{- $_ := set .Values.conf.watcher.keystone_authtoken \"password\" .Values.endpoints.identity.auth.watcher.password -}}\n{{- end -}}\n\n{{- if empty .Values.conf.watcher.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.watcher.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n{{- if empty .Values.conf.watcher.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.watcher.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if and (not (kindIs \"invalid\" .Values.conf.watcher.database.connection)) (empty .Values.conf.watcher.database.connection) -}}\n{{- $_ := tuple \"oslo_db\" \"internal\" \"watcher\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\"| set .Values.conf.watcher.database \"connection\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.watcher.DEFAULT.transport_url -}}\n{{- $_ := tuple \"oslo_messaging\" \"internal\" \"watcher\" \"amqp\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | set .Values.conf.watcher.DEFAULT \"transport_url\" -}}\n{{- end -}}\n\n{{- if empty .Values.conf.watcher.api.port -}}\n{{- $_ := tuple \"infra-optim\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" | set .Values.conf.watcher.api \"port\" -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.handler_fluent) (has \"fluent\" .Values.conf.logging.handlers.keys) -}}\n{{- $fluentd_host := tuple \"fluentd\" \"internal\" $envAll | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\n{{- $fluentd_port := tuple \"fluentd\" \"internal\" \"service\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $fluent_args := printf \"('%s.%s', '%s', %s)\" .Release.Namespace .Release.Name $fluentd_host $fluentd_port }}\n{{- $handler_fluent := dict \"class\" \"fluent.handler.FluentHandler\" \"formatter\" \"fluent\" \"args\" $fluent_args -}}\n{{- $_ := set .Values.conf.logging \"handler_fluent\" $handler_fluent -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.formatter_fluent) (has \"fluent\" .Values.conf.logging.formatters.keys) -}}\n{{- $formatter_fluent := dict \"class\" \"oslo_log.formatters.FluentFormatter\" -}}\n{{- $_ := set .Values.conf.logging \"formatter_fluent\" $formatter_fluent -}}\n{{- end -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: watcher-etc\ntype: Opaque\ndata:\n  rally_tests.yaml: {{ toYaml .Values.conf.rally_tests.tests | b64enc }}\n  watcher.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.watcher | b64enc }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.logging | b64enc }}\n  policy.yaml: {{ toYaml .Values.conf.policy | b64enc }}\n{{- range $key, $value := $envAll.Values.conf.rally_tests.templates }}\n  {{ printf \"test_template_%d\" $key }}: {{ $value.template | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/deployment-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_api }}\n{{- $envAll := . }}\n\n{{- $mounts_watcher_api := .Values.pod.mounts.watcher_api.watcher_api }}\n{{- $mounts_watcher_api_init := .Values.pod.mounts.watcher_api.init_container }}\n\n{{- $serviceAccountName := \"watcher-api\" }}\n{{ tuple $envAll \"api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: watcher-api\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"watcher\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"watcher\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"watcher\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"watcher\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.api.node_selector_key }}: {{ .Values.labels.api.node_selector_value }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.api.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"api\" $mounts_watcher_api_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: watcher-api\n{{ tuple $envAll \"watcher_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          securityContext:\n            runAsUser: {{ .Values.pod.user.watcher.uid }}\n          command:\n            - /tmp/watcher-api.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/watcher-api.sh\n                  - stop\n          ports:\n            - name: w-api\n              containerPort: {{ tuple \"infra-optim\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          readinessProbe:\n            httpGet:\n              scheme: {{ tuple \"infra-optim\" \"service\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_scheme_lookup\" | upper }}\n              path: /\n              port: {{ tuple \"infra-optim\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.watcher.oslo_concurrency.lock_path }}\n            - name: pod-etc-watcher\n              mountPath: /etc/watcher\n            - name: watcher-bin\n              mountPath: /tmp/watcher-api.sh\n              subPath: watcher-api.sh\n              readOnly: true\n            - name: watcher-etc\n              mountPath: /etc/watcher/watcher.conf\n              subPath: watcher.conf\n              readOnly: true\n            - name: watcher-etc\n              mountPath: {{ .Values.conf.watcher.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.watcher.DEFAULT.log_config_append }}\n              readOnly: true\n            - name: watcher-etc\n              mountPath: /etc/watcher/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n{{ if $mounts_watcher_api.volumeMounts }}{{ toYaml $mounts_watcher_api.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-watcher\n          emptyDir: {}\n        - name: watcher-bin\n          configMap:\n            name: watcher-bin\n            defaultMode: 0555\n        - name: watcher-etc\n          secret:\n            secretName: watcher-etc\n            defaultMode: 0444\n{{ if $mounts_watcher_api.volumes }}{{ toYaml $mounts_watcher_api.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/deployment-applier.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.deployment_applier }}\n{{- $envAll := . }}\n\n{{- $mounts_watcher_applier := .Values.pod.mounts.watcher_applier.watcher_applier }}\n{{- $mounts_watcher_applier_init := .Values.pod.mounts.watcher_applier.init_container }}\n\n{{- $serviceAccountName := \"watcher-applier\" }}\n{{ tuple $envAll \"applier\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: watcher-applier\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"watcher\" \"applier\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.applier }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"watcher\" \"applier\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"watcher\" \"applier\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"watcher\" \"applier\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.applier.node_selector_key }}: {{ .Values.labels.applier.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"applier\" $mounts_watcher_applier_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: watcher-applier\n{{ tuple $envAll \"watcher_applier\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.applier | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          securityContext:\n            runAsUser: {{ .Values.pod.user.watcher.uid }}\n          command:\n            - /tmp/watcher-applier.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.watcher.oslo_concurrency.lock_path }}\n            - name: pod-etc-watcher\n              mountPath: /etc/watcher\n            - name: watcher-bin\n              mountPath: /tmp/watcher-applier.sh\n              subPath: watcher-applier.sh\n              readOnly: true\n            - name: watcher-etc\n              mountPath: /etc/watcher/watcher.conf\n              subPath: watcher.conf\n              readOnly: true\n            - name: watcher-etc\n              mountPath: {{ .Values.conf.watcher.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.watcher.DEFAULT.log_config_append }}\n              readOnly: true\n{{ if $mounts_watcher_applier.volumeMounts }}{{ toYaml $mounts_watcher_applier.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-watcher\n          emptyDir: {}\n        - name: watcher-bin\n          configMap:\n            name: watcher-bin\n            defaultMode: 0555\n        - name: watcher-etc\n          secret:\n            secretName: watcher-etc\n            defaultMode: 0444\n{{ if $mounts_watcher_applier.volumes }}{{ toYaml $mounts_watcher_applier.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "watcher/templates/ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_api .Values.network.api.ingress.public }}\n{{- $ingressOpts := dict \"envAll\" . \"backendServiceType\" \"infra-optim\" \"backendPort\" \"w-api\" -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $bootstrapJob := dict \"envAll\" . \"serviceName\" \"watcher\" \"keystoneUser\" .Values.bootstrap.ks_user \"logConfigFile\" .Values.conf.watcher.DEFAULT.log_config_append -}}\n{{ $bootstrapJob | include \"helm-toolkit.manifests.job_bootstrap\" }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" \"watcher\" -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"watcher\" -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"watcher\" -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"watcher\" -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"watcher\" \"serviceTypes\" ( tuple \"infra-optim\" ) -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"watcher\" \"serviceTypes\" ( tuple \"infra-optim\" ) -}}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"watcher\" -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/job-rabbit-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_rabbit_init }}\n{{- $rmqUserJob := dict \"envAll\" . \"serviceName\" \"watcher\" -}}\n{{ $rmqUserJob | include \"helm-toolkit.manifests.job_rabbit_init\" }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/network_policy.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"watcher\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "watcher/templates/pdb-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_api }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: watcher-api\nspec:\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.api.min_available }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"watcher\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/pod-rally-test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pod_rally_test }}\n{{- $envAll := . }}\n\n{{- $mounts_tests := .Values.pod.mounts.watcher_tests.watcher_tests }}\n{{- $mounts_tests_init := .Values.pod.mounts.watcher_tests.init_container }}\n\n{{- $serviceAccountName := print $envAll.Release.Name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: {{ print $envAll.Release.Name \"-test\" }}\n  labels:\n{{ tuple $envAll \"watcher\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\nspec:\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n  restartPolicy: Never\n  serviceAccountName: {{ $serviceAccountName }}\n  initContainers:\n{{ tuple $envAll \"tests\" $mounts_tests_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n    - name: {{ .Release.Name }}-test-ks-user\n{{ tuple $envAll \"ks_user\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.ks_user | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n      command:\n        - /tmp/ks-user.sh\n      volumeMounts:\n        - name: watcher-bin\n          mountPath: /tmp/ks-user.sh\n          subPath: ks-user.sh\n          readOnly: true\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_SERVICE_NAME\n          value: \"test\"\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.test }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_ROLE\n          value: {{ .Values.endpoints.identity.auth.test.role | quote }}\n  containers:\n    - name: {{ .Release.Name }}-test\n{{ tuple $envAll \"test\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.test }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: RALLY_ENV_NAME\n          value: {{.Release.Name}}\n      command:\n        - /tmp/rally-test.sh\n      volumeMounts:\n        - name: watcher-etc\n          mountPath: /etc/rally/rally_tests.yaml\n          subPath: rally_tests.yaml\n          readOnly: true\n        - name: watcher-bin\n          mountPath: /tmp/rally-test.sh\n          subPath: rally-test.sh\n          readOnly: true\n        - name: rally-db\n          mountPath: /var/lib/rally\n        {{- range $key, $value := $envAll.Values.conf.rally_tests.templates }}\n        - name: watcher-etc\n          mountPath: {{ $value.name }}\n          subPath: {{ printf \"test_template_%d\" $key }}\n          readOnly: true\n        {{- end }}\n{{ if $mounts_tests.volumeMounts }}{{ toYaml $mounts_tests.volumeMounts | indent 8 }}{{ end }}\n  volumes:\n    - name: watcher-etc\n      secret:\n        secretName: watcher-etc\n        defaultMode: 0444\n    - name: watcher-bin\n      configMap:\n        name: watcher-bin\n        defaultMode: 0555\n    - name: rally-db\n      emptyDir: {}\n{{ if $mounts_tests.volumes }}{{ toYaml $mounts_tests.volumes | indent 4 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"watcher\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n  DB_CONNECTION: {{ tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"watcher\" \"test\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/secret-rabbitmq.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_rabbitmq }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"watcher\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_messaging $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\ntype: Opaque\ndata:\n  RABBITMQ_CONNECTION: {{ tuple \"oslo_messaging\" \"internal\" $userClass \"http\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" | b64enc }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/service-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_api }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"infra-optim\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: w-api\n      port: {{ tuple \"infra-optim\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n    {{ if .Values.network.api.node_port.enabled }}\n      nodePort: {{ .Values.network.api.node_port.port }}\n    {{ end }}\n  selector:\n{{ tuple $envAll \"watcher\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{ if .Values.network.api.node_port.enabled }}\n  type: NodePort\n  {{ end }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/service-ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_api .Values.network.api.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"infra-optim\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "watcher/templates/statefulset-decision-engine.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.statefulset_decision_engine }}\n{{- $envAll := . }}\n\n{{- $mounts_watcher_decision_engine := .Values.pod.mounts.watcher_decision_engine.watcher_decision_engine }}\n{{- $mounts_watcher_decision_engine_init := .Values.pod.mounts.watcher_decision_engine.init_container }}\n\n{{- $serviceAccountName := \"watcher-decision-engine\" }}\n{{ tuple $envAll \"decision_engine\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  name: watcher-decision-engine\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"watcher\" \"decision-engine\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  serviceName: watcher-decision-engine\n  replicas: {{ .Values.pod.replicas.engine }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"watcher\" \"decision-engine\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"watcher\" \"decision-engine\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n    spec:\n      serviceAccountName: {{ $serviceAccountName }}\n      affinity:\n{{ tuple $envAll \"watcher\" \"decision-engine\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.decision_engine.node_selector_key }}: {{ .Values.labels.decision_engine.node_selector_value }}\n      initContainers:\n{{ tuple $envAll \"decision-engine\" $mounts_watcher_decision_engine_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: watcher-decision-engine\n{{ tuple $envAll \"watcher_decision_engine\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.decision_engine | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n          securityContext:\n            runAsUser: {{ .Values.pod.user.watcher.uid }}\n          command:\n            - /tmp/watcher-decision-engine.sh\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.watcher.oslo_concurrency.lock_path }}\n            - name: pod-etc-watcher\n              mountPath: /etc/watcher\n            - name: watcher-bin\n              mountPath: /tmp/watcher-decision-engine.sh\n              subPath: watcher-decision-engine.sh\n              readOnly: true\n            - name: watcher-etc\n              mountPath: /etc/watcher/watcher.conf\n              subPath: watcher.conf\n              readOnly: true\n            - name: watcher-etc\n              mountPath: {{ .Values.conf.watcher.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.watcher.DEFAULT.log_config_append }}\n              readOnly: true\n{{ if $mounts_watcher_decision_engine.volumeMounts }}{{ toYaml $mounts_watcher_decision_engine.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-watcher\n          emptyDir: {}\n        - name: watcher-bin\n          configMap:\n            name: watcher-bin\n            defaultMode: 0555\n        - name: watcher-etc\n          secret:\n            secretName: watcher-etc\n            defaultMode: 0444\n{{ if $mounts_watcher_decision_engine.volumes }}{{ toYaml $mounts_watcher_decision_engine.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "watcher/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default values for watcher\n# This is a YAML-formatted file.\n# Declare name/value pairs to be passed into your templates.\n# name: value\n\n---\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  decision_engine:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  applier:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nrelease_group: null\n\nimages:\n  tags:\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    watcher_db_sync: quay.io/airshipit/watcher:2025.1-ubuntu_noble\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    rabbit_init: docker.io/rabbitmq:3.13-management\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble\n    watcher_api: quay.io/airshipit/watcher:2025.1-ubuntu_noble\n    watcher_decision_engine: quay.io/airshipit/watcher:2025.1-ubuntu_noble\n    watcher_applier: quay.io/airshipit/watcher:2025.1-ubuntu_noble\n    image_repo_sync: docker.io/docker:17.07.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    node_port:\n      enabled: false\n      port: 31233\n\nbootstrap:\n  enabled: false\n  ks_user: watcher\n  script: |\n    openstack token issue\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - watcher-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    api:\n      jobs:\n        - watcher-db-sync\n        - watcher-ks-user\n        - watcher-ks-endpoints\n        - watcher-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n    db_drop:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - watcher-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    decision_engine:\n      jobs:\n        - watcher-db-sync\n        - watcher-ks-user\n        - watcher-ks-endpoints\n        - watcher-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n    applier:\n      jobs:\n        - watcher-db-sync\n        - watcher-ks-user\n        - watcher-ks-endpoints\n        - watcher-rabbit-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n    ks_endpoints:\n      jobs:\n        - watcher-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n    rabbit_init:\n      services:\n        - service: oslo_messaging\n          endpoint: internal\n    tests:\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: infra-optim\n    image_repo_sync:\n      services:\n        - endpoint: internal\n          service: local_image_registry\n\n# Names of secrets used by bootstrap and environmental checks\nsecrets:\n  identity:\n    admin: watcher-keystone-admin\n    watcher: watcher-keystone-user\n    test: watcher-keystone-test\n  oslo_db:\n    admin: watcher-db-admin\n    watcher: watcher-db-user\n  oslo_messaging:\n    admin: watcher-rabbitmq-admin\n    watcher: watcher-rabbitmq-user\n  oci_image_registry:\n    watcher: watcher-oci-image-registry\n\n# typically overridden by environmental\n# values, but should include all endpoints\n# required by this chart\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  oci_image_registry:\n    name: oci-image-registry\n    namespace: oci-image-registry\n    auth:\n      enabled: false\n      watcher:\n        username: watcher\n        password: password\n    hosts:\n      default: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        default: null\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      watcher:\n        role: admin\n        region_name: RegionOne\n        username: watcher\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      test:\n        role: admin\n        region_name: RegionOne\n        username: test\n        password: password\n        project_name: test\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  infra_optim:\n    name: watcher\n    hosts:\n      default: watcher-api\n      public: watcher\n    host_fqdn_override:\n      default: null\n    path:\n      default: /\n    scheme:\n      default: 'http'\n    port:\n      api:\n        default: 9322\n        public: 80\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n      watcher:\n        username: watcher\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /watcher\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_messaging:\n    auth:\n      admin:\n        username: rabbitmq\n        password: password\n      watcher:\n        username: watcher\n        password: password\n    hosts:\n      default: rabbitmq\n    host_fqdn_override:\n      default: null\n    path: /watcher\n    scheme: rabbit\n    port:\n      amqp:\n        default: 5672\n      http:\n        default: 15672\n  oslo_cache:\n    auth:\n      # NOTE(portdirect): this is used to define the value for keystone\n      # authtoken cache encryption key, if not set it will be populated\n      # automatically with a random value, but to take advantage of\n      # this feature all services should be set to use the same key,\n      # and memcache service.\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  fluentd:\n    namespace: null\n    name: fluentd\n    hosts:\n      default: fluentd-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: 'http'\n    port:\n      service:\n        default: 24224\n      metrics:\n        default: 24220\n\nconf:\n  rally_tests:\n    run_tempest: false\n    tests: {}\n    templates: []\n  policy: {}\n  watcher:\n    DEFAULT:\n      log_config_append: /etc/watcher/logging.conf\n      transport_url: null\n    api:\n      host: '0.0.0.0'\n    database:\n      max_retries: -1\n      # -- Database connection URI. When empty the URI is auto-generated\n      ## from endpoints.oslo_db. Set to null to disable auto-generation,\n      ## e.g. when using an operator such as mariadb-operator that supplies\n      ## the connection string via a mounted configuration snippet.\n      connection: \"\"\n    keystone_authtoken:\n      auth_type: password\n      auth_version: v3\n      memcache_security_strategy: ENCRYPT\n    watcher_clients_auth:\n      cafile: null\n      certfile: null\n      keyfile: null\n      insecure: false\n    oslo_concurrency:\n      lock_path: /var/lock\n  logging:\n    loggers:\n      keys:\n        - root\n        - watcher\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: WARNING\n      handlers: 'null'\n    logger_watcher:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: watcher\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    formatter_default:\n      format: \"%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n\npod:\n  user:\n    watcher:\n      uid: 1000\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n  mounts:\n    watcher_api:\n      init_container: null\n      watcher_api:\n        volumeMounts:\n        volumes:\n    watcher_decision_engine:\n      init_container: null\n      watcher_decision_engine:\n        volumeMounts:\n        volumes:\n    watcher_applier:\n      init_container: null\n      watcher_applier:\n        volumeMounts:\n        volumes:\n    watcher_tests:\n      init_container: null\n      watcher_tests:\n        volumeMounts:\n        volumes:\n  replicas:\n    api: 1\n    decision_engine: 1\n    applier: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    disruption_budget:\n      api:\n        min_available: 0\n    termination_grace_period:\n      api:\n        timeout: 30\n  resources:\n    enabled: false\n    api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    decision_engine:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    applier:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      rabbit_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\nnetwork_policy:\n  watcher:\n    ingress:\n      - from:\n        - podSelector:\n            matchLabels:\n              application: watcher\n        - podSelector:\n            matchLabels:\n              application: horizon\n        - podSelector:\n            matchLabels:\n              application: ingress\n        - podSelector:\n            matchLabels:\n              application: heat\n        ports:\n        - protocol: TCP\n          port: 80\n        - protocol: TCP\n          port: 9322\n\nmanifests:\n  configmap_bin: true\n  configmap_etc: true\n  deployment_api: true\n  deployment_applier: true\n  ingress_api: true\n  job_bootstrap: true\n  job_db_init: true\n  job_db_sync: true\n  job_db_drop: false\n  job_image_repo_sync: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  job_rabbit_init: true\n  pdb_api: true\n  pod_rally_test: true\n  network_policy: false\n  secret_db: true\n  secret_keystone: true\n  secret_rabbitmq: true\n  secret_registry: true\n  service_ingress_api: true\n  service_api: true\n  statefulset_decision_engine: true\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "yamllint-templates.conf",
    "content": "---\n\nyaml-files:\n- '*.yaml'\n- '*.yml'\n- '.yamllint'\n\nrules:\n  braces: enable\n  brackets: enable\n  colons: enable\n  commas: enable\n  comments: enable\n  comments-indentation: enable\n  document-end: disable\n  document-start: enable\n  empty-lines: disable\n  empty-values: disable\n  hyphens: enable\n  indentation:\n    spaces: 2\n    indent-sequences: whatever\n  key-duplicates: enable\n  key-ordering: disable\n  line-length: disable\n  new-line-at-end-of-file: disable\n  new-lines: disable\n  octal-values: disable\n  quoted-strings: disable\n  trailing-spaces: disable\n  truthy: disable\n...\n"
  },
  {
    "path": "yamllint.conf",
    "content": "---\n\nyaml-files:\n- '*.yaml'\n- '*.yml'\n- '.yamllint'\n\nrules:\n  braces: enable\n  brackets: enable\n  colons: enable\n  commas: enable\n  comments: enable\n  comments-indentation: disable\n  document-end: enable\n  document-start: enable\n  empty-lines: enable\n  empty-values: disable\n  hyphens:\n    ignore: .yamllint/zuul.d/jobs.yaml\n  indentation:\n    spaces: 2\n    indent-sequences: whatever\n  key-duplicates: enable\n  key-ordering: disable\n  line-length: disable\n  new-line-at-end-of-file: enable\n  new-lines: enable\n  octal-values: disable\n  quoted-strings: disable\n  trailing-spaces: enable\n  truthy: disable\n...\n"
  },
  {
    "path": "zaqar/Chart.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\napiVersion: v2\nappVersion: v1.0.0\ndescription: OpenStack Messaging Service (Zaqar)\nname: zaqar\ntype: application\nversion: 2025.2.0\nhome: https://docs.openstack.org/zaqar/\nicon: https://www.openstack.org/themes/openstack/images/project-mascots/Zaqar/OpenStack_Project_Zaqar_vertical.png\nsources:\n  - https://opendev.org/openstack/zaqar\nkeywords:\n  - openstack\n  - messaging\n  - queue\n  - helm\nmaintainers:\n  - name: OpenStack-Helm Team\n    email: openstack-helm@lists.openstack.org\ndependencies:\n  - name: helm-toolkit\n    repository: file://../helm-toolkit\n    version: \">= 0.1.0\"\n...\n"
  },
  {
    "path": "zaqar/templates/bin/_bootstrap.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\n{{ .Values.bootstrap.script | default \"echo 'Not Enabled'\" }}\n"
  },
  {
    "path": "zaqar/templates/bin/_db-sync.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n\nset -ex\n\nzaqar-sql-db-manage \\\n  --config-file /etc/zaqar/zaqar.conf \\\n  upgrade head\n"
  },
  {
    "path": "zaqar/templates/bin/_zaqar-test.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n*/}}\n\nset -ex\nexport HOME=/tmp\n\necho \"Test: list queues\"\nopenstack queue list\n\nQUEUE_NAME=\"test-queue-$(uuidgen | cut -d'-' -f1)\"\n\necho \"Test: create queue\"\nopenstack queue create ${QUEUE_NAME}\n\necho \"Test: post messages\"\nopenstack message post ${QUEUE_NAME} --message '{\"body\":\"Hello World 1\"}'\nopenstack message post ${QUEUE_NAME} --message '{\"body\":\"Hello World 2\"}'\n\necho \"Test: list messages\"\nopenstack message list ${QUEUE_NAME}\n\necho \"Test: get a single message\"\nMESSAGE_ID=$(openstack message list ${QUEUE_NAME} -f value -c id | head -1)\nopenstack message get ${QUEUE_NAME} ${MESSAGE_ID}\n\necho \"Test: claim messages\"\nCLAIM_ID=$(openstack claim create ${QUEUE_NAME} --ttl 30 --grace 30 -f value -c id)\nopenstack claim get ${QUEUE_NAME} ${CLAIM_ID}\n\necho \"Test: delete messages\"\nopenstack message delete ${QUEUE_NAME} ${MESSAGE_ID}\n\necho \"Test: delete queue\"\nopenstack queue delete ${QUEUE_NAME}\n\nexit 0\n"
  },
  {
    "path": "zaqar/templates/bin/_zaqar_api.sh.tpl",
    "content": "#!/bin/bash\n\n{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\nset -ex\nCOMMAND=\"${@:-start}\"\n\nfunction start () {\n  exec zaqar-server \\\n        --config-file /etc/zaqar/zaqar.conf\n}\n\nfunction stop () {\n  kill -TERM 1\n}\n\n$COMMAND\n"
  },
  {
    "path": "zaqar/templates/configmap-bin.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_bin }}\n{{- $envAll := . }}\n{{- $rallyTests := .Values.conf.rally_tests }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: zaqar-bin\ndata:\n{{- if .Values.images.local_registry.active }}\n  image-repo-sync.sh: |\n{{- include \"helm-toolkit.scripts.image_repo_sync\" . | indent 4 }}\n{{- end }}\n  db-init.py: |\n{{- include \"helm-toolkit.scripts.db_init\" . | indent 4 }}\n  db-sync.sh: |\n{{ tuple \"bin/_db-sync.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  db-drop.py: |\n{{- include \"helm-toolkit.scripts.db_drop\" . | indent 4 }}\n  zaqar-api.sh: |\n{{ tuple \"bin/_zaqar_api.sh.tpl\" . | include \"helm-toolkit.utils.template\" | indent 4 }}\n  ks-service.sh: |\n{{- include \"helm-toolkit.scripts.keystone_service\" . | indent 4 }}\n  ks-endpoints.sh: |\n{{- include \"helm-toolkit.scripts.keystone_endpoints\" . | indent 4 }}\n  ks-user.sh: |\n{{- include \"helm-toolkit.scripts.keystone_user\" . | indent 4 }}\n  rally-test.sh: |\n{{ tuple $rallyTests | include \"helm-toolkit.scripts.rally_test\" | indent 4 }}\n{{- end }}\n"
  },
  {
    "path": "zaqar/templates/configmap-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.configmap_etc }}\n{{- $envAll := . }}\n\n{{- if empty .Values.conf.zaqar.keystone_authtoken.identity_uri -}}\n{{- $_ := tuple \"identity\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\"| set .Values.conf.zaqar.keystone_authtoken \"identity_uri\" -}}\n{{- end -}}\n{{- if empty .Values.conf.zaqar.keystone_authtoken.memcached_servers -}}\n{{- $_ := tuple \"oslo_cache\" \"internal\" \"memcache\" . | include \"helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup\" | set .Values.conf.zaqar.keystone_authtoken \"memcached_servers\" -}}\n{{- end -}}\n{{- if empty .Values.conf.zaqar.keystone_authtoken.memcache_secret_key -}}\n{{- $_ := set .Values.conf.zaqar.keystone_authtoken \"memcache_secret_key\" ( default ( randAlphaNum 64 ) .Values.endpoints.oslo_cache.auth.memcache_secret_key ) -}}\n{{- end -}}\n\n{{- if empty (index .Values.conf.zaqar \"drivers:management_store:sqlalchemy\").uri -}}\n{{- $_ := tuple \"oslo_db\" \"internal\" \"zaqar\" \"mysql\" . | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\"| set (index .Values.conf.zaqar \"drivers:management_store:sqlalchemy\") \"uri\" -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.handler_fluent) (has \"fluent\" .Values.conf.logging.handlers.keys) -}}\n{{- $fluentd_host := tuple \"fluentd\" \"internal\" $envAll | include \"helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup\" }}\n{{- $fluentd_port := tuple \"fluentd\" \"internal\" \"service\" $envAll | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- $fluent_args := printf \"('%s.%s', '%s', %s)\" .Release.Namespace .Release.Name $fluentd_host $fluentd_port }}\n{{- $handler_fluent := dict \"class\" \"fluent.handler.FluentHandler\" \"formatter\" \"fluent\" \"args\" $fluent_args -}}\n{{- $_ := set .Values.conf.logging \"handler_fluent\" $handler_fluent -}}\n{{- end -}}\n\n{{- if and (empty .Values.conf.logging.formatter_fluent) (has \"fluent\" .Values.conf.logging.formatters.keys) -}}\n{{- $formatter_fluent := dict \"class\" \"oslo_log.formatters.FluentFormatter\" -}}\n{{- $_ := set .Values.conf.logging \"formatter_fluent\" $formatter_fluent -}}\n{{- end -}}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: zaqar-etc\ntype: Opaque\ndata:\n  rally_tests.yaml: {{ toYaml .Values.conf.rally_tests.tests | b64enc }}\n  zaqar.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.zaqar | b64enc }}\n  logging.conf: {{ include \"helm-toolkit.utils.to_oslo_conf\" .Values.conf.logging | b64enc }}\n  api-paste.ini: {{ include \"helm-toolkit.utils.to_ini\" .Values.conf.api_paste | b64enc }}\n  policy.yaml: {{ toYaml .Values.conf.policy | b64enc }}\n{{- end }}\n"
  },
  {
    "path": "zaqar/templates/deployment-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"zaqarApiLivenessProbeTemplate\" }}\ntcpSocket:\n  port: {{ tuple \"messaging\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- define \"zaqarApiReadinessProbeTemplate\" }}\ntcpSocket:\n  port: {{ tuple \"messaging\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{- end }}\n\n{{- if .Values.manifests.deployment_api }}\n{{- $envAll := . }}\n\n{{- $mounts_zaqar_api := .Values.pod.mounts.zaqar_api.zaqar_api }}\n{{- $mounts_zaqar_api_init := .Values.pod.mounts.zaqar_api.init_container }}\n\n{{- $serviceAccountName := \"zaqar-api\" }}\n{{ tuple $envAll \"api\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: zaqar-api\n  annotations:\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n  labels:\n{{ tuple $envAll \"zaqar\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n  replicas: {{ .Values.pod.replicas.api }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"zaqar\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{ tuple $envAll | include \"helm-toolkit.snippets.kubernetes_upgrades_deployment\" | indent 2 }}\n  template:\n    metadata:\n      labels:\n{{ tuple $envAll \"zaqar\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 8 }}\n      annotations:\n{{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" | indent 8 }}\n        configmap-bin-hash: {{ tuple \"configmap-bin.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n        configmap-etc-hash: {{ tuple \"configmap-etc.yaml\" . | include \"helm-toolkit.utils.hash\" }}\n{{ tuple \"zaqar_api\" . | include \"helm-toolkit.snippets.custom_pod_annotations\" | indent 8 }}\n{{ dict \"envAll\" $envAll \"podName\" \"zaqar-api\" \"containerNames\" (list \"zaqar-api\" \"init\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 8 }}\n    spec:\n{{ tuple \"zaqar_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 6 }}\n{{ tuple \"zaqar_api\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 6 }}\n      serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"zaqar\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 6 }}\n      affinity:\n{{ tuple $envAll \"zaqar\" \"api\" | include \"helm-toolkit.snippets.kubernetes_pod_anti_affinity\" | indent 8 }}\n      nodeSelector:\n        {{ .Values.labels.api.node_selector_key }}: {{ .Values.labels.api.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.zaqar.enabled }}\n{{ tuple $envAll \"zaqar\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 6 }}\n{{ end }}\n      terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.api.timeout | default \"30\" }}\n      initContainers:\n{{ tuple $envAll \"api\" $mounts_zaqar_api_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 8 }}\n      containers:\n        - name: zaqar-api\n{{ tuple $envAll \"zaqar_api\" | include \"helm-toolkit.snippets.image\" | indent 10 }}\n{{ tuple $envAll $envAll.Values.pod.resources.api | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"application\" \"zaqar\" \"container\" \"zaqar_api\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 10 }}\n{{- if or .Values.manifests.certificates .Values.tls.identity }}\n          env:\n            - name: REQUESTS_CA_BUNDLE\n              value: \"/etc/zaqar/certs/ca.crt\"\n{{- end }}\n          command:\n            - /tmp/zaqar-api.sh\n            - start\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /tmp/zaqar-api.sh\n                  - stop\n          ports:\n            - name: z-api\n              containerPort: {{ tuple \"messaging\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n{{ dict \"envAll\" $envAll \"component\" \"api\" \"container\" \"default\" \"type\" \"liveness\" \"probeTemplate\" (include \"zaqarApiLivenessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n{{ dict \"envAll\" $envAll \"component\" \"api\" \"container\" \"default\" \"type\" \"readiness\" \"probeTemplate\" (include \"zaqarApiReadinessProbeTemplate\" $envAll | fromYaml) | include \"helm-toolkit.snippets.kubernetes_probe\" | indent 10 }}\n          volumeMounts:\n            - name: pod-tmp\n              mountPath: /tmp\n            - name: oslo-lock-path\n              mountPath: {{ .Values.conf.zaqar.oslo_concurrency.lock_path }}\n            - name: pod-etc-zaqar\n              mountPath: /etc/zaqar\n            - name: zaqar-bin\n              mountPath: /tmp/zaqar-api.sh\n              subPath: zaqar-api.sh\n              readOnly: true\n            - name: zaqar-etc\n              mountPath: /etc/zaqar/zaqar.conf\n              subPath: zaqar.conf\n              readOnly: true\n            - name: zaqar-etc-snippets\n              mountPath: /etc/zaqar/zaqar.conf.d/\n              readOnly: true\n            {{- if .Values.conf.zaqar.DEFAULT.log_config_append }}\n            - name: zaqar-etc\n              mountPath: {{ .Values.conf.zaqar.DEFAULT.log_config_append }}\n              subPath: {{ base .Values.conf.zaqar.DEFAULT.log_config_append }}\n              readOnly: true\n            {{- end }}\n            - name: zaqar-etc\n              mountPath: /etc/zaqar/api-paste.ini\n              subPath: api-paste.ini\n              readOnly: true\n            - name: zaqar-etc\n              mountPath: /etc/zaqar/policy.yaml\n              subPath: policy.yaml\n              readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal \"path\" \"/etc/mysql/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.messaging.api.internal \"path\" \"/etc/zaqar/certs\" | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 12 }}\n{{ if $mounts_zaqar_api.volumeMounts }}{{ toYaml $mounts_zaqar_api.volumeMounts | indent 12 }}{{ end }}\n      volumes:\n        - name: pod-tmp\n          emptyDir: {}\n        - name: oslo-lock-path\n          emptyDir: {}\n        - name: pod-etc-zaqar\n          emptyDir: {}\n        - name: zaqar-bin\n          configMap:\n            name: zaqar-bin\n            defaultMode: 0555\n        - name: zaqar-etc\n          secret:\n            secretName: zaqar-etc\n            defaultMode: 0444\n        - name: zaqar-etc-snippets\n          projected:\n            sources:\n              - secret:\n                  name: zaqar-ks-etc\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{- dict \"enabled\" (or .Values.manifests.certificates .Values.tls.identity) \"name\" .Values.secrets.tls.messaging.api.internal | include \"helm-toolkit.snippets.tls_volume\" | indent 8 }}\n{{ if $mounts_zaqar_api.volumes }}{{ toYaml $mounts_zaqar_api.volumes | indent 8 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "zaqar/templates/extra-manifests.yaml",
    "content": "{{ range .Values.extraObjects }}\n---\n{{ if typeIs \"string\" . }}\n    {{- tpl . $ }}\n{{- else }}\n    {{- tpl (toYaml .) $ }}\n{{- end }}\n{{ end }}\n"
  },
  {
    "path": "zaqar/templates/ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.ingress_api .Values.network.api.ingress.public }}\n{{- $envAll := . }}\n{{- $ingressOpts := dict \"envAll\" $envAll \"backendService\" \"api\" \"backendServiceType\" \"messaging\" \"backendPort\" \"z-api\" -}}\n{{- $secretName := index $envAll.Values.secrets.tls.messaging.api ($envAll.Values.network.api.ingress.classes.namespace | replace \"-\" \"_\") -}}\n{{- if $envAll.Values.tls.identity -}}\n{{- $_ := set $ingressOpts \"certIssuer\" $envAll.Values.endpoints.identity.auth.zaqar.tls.ca -}}\n{{- end -}}\n{{- if hasKey $envAll.Values.secrets.tls.messaging.api $envAll.Values.network.api.ingress.classes.namespace -}}\n{{- $_ := set $ingressOpts \"tlsSecret\" $secretName -}}\n{{- end -}}\n{{ $ingressOpts | include \"helm-toolkit.manifests.ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "zaqar/templates/job-bootstrap.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}\n{{- $bootstrapJob := dict \"envAll\" . \"serviceName\" \"zaqar\" \"keystoneUser\" .Values.bootstrap.ks_user -}}\n{{- if .Values.pod.tolerations.zaqar.enabled -}}\n{{- $_ := set $bootstrapJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $bootstrapJob | include \"helm-toolkit.manifests.job_bootstrap\" }}\n{{- end }}\n"
  },
  {
    "path": "zaqar/templates/job-db-drop.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.job_db_drop }}\n{{- $dbDropJob := dict \"envAll\" . \"serviceName\" \"zaqar\" -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbDropJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- if .Values.pod.tolerations.zaqar.enabled -}}\n{{- $_ := set $dbDropJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbDropJob | include \"helm-toolkit.manifests.job_db_drop_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "zaqar/templates/job-db-init.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_init\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-5\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_init }}\n{{- $dbToInit := dict \"adminSecret\" .Values.secrets.oslo_db.admin \"configFile\" \"/etc/zaqar/zaqar.conf\" \"logConfigFile\" \"/etc/zaqar/logging.conf\" \"configDbSection\" \"drivers:management_store:sqlalchemy\" \"configDbKey\" \"uri\" -}}\n{{- $dbInitJob := dict \"envAll\" . \"serviceName\" \"zaqar\" \"dbToInit\" $dbToInit -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbInitJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbInitJob \"jobAnnotations\" (include \"metadata.annotations.job.db_init\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.zaqar.enabled -}}\n{{- $_ := set $dbInitJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbInitJob | include \"helm-toolkit.manifests.job_db_init_mysql\" }}\n{{- end }}\n"
  },
  {
    "path": "zaqar/templates/job-db-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.db_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-4\"\n{{- end }}\n\n{{- if .Values.manifests.job_db_sync }}\n{{- $dbSyncJob := dict \"envAll\" . \"serviceName\" \"zaqar\" \"podVolMounts\" .Values.pod.mounts.zaqar_db_sync.zaqar_db_sync.valumeMounts \"podVols\" .Values.pod.mounts.zaqar_db_sync.zaqar_db_sync.volumes \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) -}}\n{{- if .Values.manifests.certificates -}}\n{{- $_ := set $dbSyncJob \"dbAdminTlsSecret\" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}\n{{- end -}}\n{{- $_ := set $dbSyncJob \"jobAnnotations\" (include \"metadata.annotations.job.db_sync\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.zaqar.enabled -}}\n{{- $_ := set $dbSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $dbSyncJob | include \"helm-toolkit.manifests.job_db_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "zaqar/templates/job-image-repo-sync.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.repo_sync\" }}\nhelm.sh/hook: post-install,post-upgrade\n{{- end }}\n\n{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}\n{{- $imageRepoSyncJob := dict \"envAll\" . \"serviceName\" \"zaqar\" \"jobAnnotations\" (include \"metadata.annotations.job.repo_sync\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.zaqar.enabled -}}\n{{- $_ := set $imageRepoSyncJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $imageRepoSyncJob | include \"helm-toolkit.manifests.job_image_repo_sync\" }}\n{{- end }}\n"
  },
  {
    "path": "zaqar/templates/job-ks-endpoints.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_endpoints\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-2\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_endpoints }}\n{{- $ksEndpointsJob := dict \"envAll\" . \"serviceName\" \"zaqar\" \"serviceTypes\" ( tuple \"messaging\" ) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksEndpointsJob \"tlsSecret\" .Values.secrets.tls.messaging.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksEndpointsJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_endpoints\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.zaqar.enabled -}}\n{{- $_ := set $ksEndpointsJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksEndpointsJob | include \"helm-toolkit.manifests.job_ks_endpoints\" }}\n{{- end }}\n"
  },
  {
    "path": "zaqar/templates/job-ks-service.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_service\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-3\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_service }}\n{{- $ksServiceJob := dict \"envAll\" . \"serviceName\" \"zaqar\" \"serviceTypes\" ( tuple \"messaging\" ) -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksServiceJob \"tlsSecret\" .Values.secrets.tls.messaging.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksServiceJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_service\" . | fromYaml) }}\n{{- if .Values.pod.tolerations.zaqar.enabled -}}\n{{- $_ := set $ksServiceJob \"tolerationsEnabled\" true -}}\n{{- end }}\n{{ $ksServiceJob | include \"helm-toolkit.manifests.job_ks_service\" }}\n{{- end }}\n"
  },
  {
    "path": "zaqar/templates/job-ks-user.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- define \"metadata.annotations.job.ks_user\" }}\nhelm.sh/hook: post-install,post-upgrade\nhelm.sh/hook-weight: \"-1\"\n{{- end }}\n\n{{- if .Values.manifests.job_ks_user }}\n{{- $ksUserJob := dict \"envAll\" . \"serviceName\" \"zaqar\" -}}\n{{- if or .Values.manifests.certificates .Values.tls.identity -}}\n{{- $_ := set $ksUserJob \"tlsSecret\" .Values.secrets.tls.messaging.api.internal -}}\n{{- end -}}\n{{- $_ := set $ksUserJob \"jobAnnotations\" (include \"metadata.annotations.job.ks_user\" . | fromYaml) -}}\n{{- if .Values.pod.tolerations.zaqar.enabled -}}\n{{- $_ := set $ksUserJob \"tolerationsEnabled\" true -}}\n{{- end -}}\n{{ $ksUserJob | include \"helm-toolkit.manifests.job_ks_user\" }}\n{{- end }}\n"
  },
  {
    "path": "zaqar/templates/network_policy.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n{{- if .Values.manifests.network_policy -}}\n{{- $netpol_opts := dict \"envAll\" . \"name\" \"application\" \"label\" \"zaqar\" -}}\n{{ $netpol_opts | include \"helm-toolkit.manifests.kubernetes_network_policy\" }}\n{{- end -}}\n"
  },
  {
    "path": "zaqar/templates/pdb-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pdb_api }}\n{{- $envAll := . }}\n---\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: zaqar-api\n  labels:\n{{ tuple $envAll \"zaqar\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\nspec:\n{{- if .Values.pod.lifecycle.disruption_budget.api.min_available }}\n  minAvailable: {{ .Values.pod.lifecycle.disruption_budget.api.min_available }}\n{{- else }}\n  maxUnavailable: {{ .Values.pod.lifecycle.disruption_budget.api.max_unavailable | default 1 }}\n{{- end }}\n  selector:\n    matchLabels:\n{{ tuple $envAll \"zaqar\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 6 }}\n{{- end }}\n"
  },
  {
    "path": "zaqar/templates/pod-rally-test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pod_test }}\n{{- $envAll := . }}\n{{- $dependencies := .Values.dependencies.static.tests }}\n\n{{- $mounts_zaqar_tests := .Values.pod.mounts.zaqar_tests.zaqar_tests }}\n{{- $mounts_zaqar_tests_init := .Values.pod.mounts.zaqar_tests.init_container }}\n\n{{- $serviceAccountName := print .Release.Name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: \"{{ .Release.Name }}-test\"\n  labels:\n{{ tuple $envAll \"zaqar\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"zaqar-test\" \"containerNames\" (list \"init\" \"zaqar-test\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n{{ tuple \"zaqar_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 2 }}\n{{ tuple \"zaqar_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 2 }}\n  serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 2 }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.zaqar.enabled }}\n{{ tuple $envAll \"zaqar\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 2 }}\n{{ end }}\n  restartPolicy: Never\n  initContainers:\n{{ tuple $envAll \"tests\" $mounts_zaqar_tests_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n    - name: zaqar-test-ks-user\n{{ tuple $envAll \"ks_user\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ tuple $envAll $envAll.Values.pod.resources.jobs.ks_user | include \"helm-toolkit.snippets.kubernetes_resources\" | indent 6 }}\n      command:\n        - /tmp/ks-user.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: zaqar-bin\n          mountPath: /tmp/ks-user.sh\n          subPath: ks-user.sh\n          readOnly: true\n{{- dict \"enabled\" .Values.manifests.certificates \"name\" .Values.secrets.tls.network.server.internal | include \"helm-toolkit.snippets.tls_volume_mount\" | indent 8 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin \"useCA\" .Values.manifests.certificates }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_SERVICE_NAME\n          value: \"test\"\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.test }}\n{{- include \"helm-toolkit.snippets.keystone_user_create_env_vars\" $env | indent 8 }}\n{{- end }}\n        - name: SERVICE_OS_ROLE\n          value: {{ .Values.endpoints.identity.auth.test.role | quote }}\n  containers:\n    - name: zaqar-test\n{{ tuple $envAll \"scripted_test\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" \"container\" \"zaqar_test\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n      command:\n        - /tmp/rally-test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: zaqar-etc\n          mountPath: /etc/rally/rally_tests.yaml\n          subPath: rally_tests.yaml\n          readOnly: true\n        - name: zaqar-bin\n          mountPath: /tmp/rally-test.sh\n          subPath: rally-test.sh\n          readOnly: true\n{{ if $mounts_zaqar_tests.volumeMounts }}{{ toYaml $mounts_zaqar_tests.volumeMounts | indent 8 }}{{ end }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: zaqar-etc\n      secret:\n        secretName: zaqar-etc\n        defaultMode: 0444\n    - name: zaqar-bin\n      configMap:\n        name: zaqar-bin\n        defaultMode: 0555\n{{ if $mounts_zaqar_tests.volumes }}{{ toYaml $mounts_zaqar_tests.volumes | indent 4 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "zaqar/templates/pod-test.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.pod_test }}\n{{- $envAll := . }}\n{{- $dependencies := .Values.dependencies.static.tests }}\n\n{{- $mounts_zaqar_tests := .Values.pod.mounts.zaqar_tests.zaqar_tests }}\n{{- $mounts_zaqar_tests_init := .Values.pod.mounts.zaqar_tests.init_container }}\n\n{{- $serviceAccountName := print .Release.Name \"-test\" }}\n{{ tuple $envAll \"tests\" $serviceAccountName | include \"helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount\" }}\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: \"{{ .Release.Name }}-test\"\n  labels:\n{{ tuple $envAll \"zaqar\" \"test\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  annotations:\n    \"helm.sh/hook\": test-success\n    {{ tuple $envAll | include \"helm-toolkit.snippets.release_uuid\" }}\n{{ dict \"envAll\" $envAll \"podName\" \"zaqar-test\" \"containerNames\" (list \"init\" \"zaqar-test\") | include \"helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation\" | indent 4 }}\nspec:\n{{ tuple \"zaqar_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_priority_class\" | indent 2 }}\n{{ tuple \"zaqar_tests\" . | include \"helm-toolkit.snippets.kubernetes_pod_runtime_class\" | indent 2 }}\n  serviceAccountName: {{ $serviceAccountName }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" | include \"helm-toolkit.snippets.kubernetes_pod_security_context\" | indent 2 }}\n  nodeSelector:\n    {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}\n{{ if $envAll.Values.pod.tolerations.zaqar.enabled }}\n{{ tuple $envAll \"zaqar\" | include \"helm-toolkit.snippets.kubernetes_tolerations\" | indent 2 }}\n{{ end }}\n  restartPolicy: Never\n  initContainers:\n{{ tuple $envAll \"tests\" $mounts_zaqar_tests_init | include \"helm-toolkit.snippets.kubernetes_entrypoint_init_container\" | indent 4 }}\n  containers:\n    - name: zaqar-test\n{{ tuple $envAll \"scripted_test\" | include \"helm-toolkit.snippets.image\" | indent 6 }}\n{{ dict \"envAll\" $envAll \"application\" \"test\" \"container\" \"zaqar_test\" | include \"helm-toolkit.snippets.kubernetes_container_security_context\" | indent 6 }}\n      env:\n{{- with $env := dict \"ksUserSecret\" .Values.secrets.identity.admin }}\n{{- include \"helm-toolkit.snippets.keystone_openrc_env_vars\" $env | indent 8 }}\n{{- end }}\n      command:\n        - /tmp/zaqar-test.sh\n      volumeMounts:\n        - name: pod-tmp\n          mountPath: /tmp\n        - name: zaqar-bin\n          mountPath: /tmp/zaqar-test.sh\n          subPath: zaqar-test.sh\n          readOnly: true\n{{ if $mounts_zaqar_tests.volumeMounts }}{{ toYaml $mounts_zaqar_tests.volumeMounts | indent 8 }}{{ end }}\n  volumes:\n    - name: pod-tmp\n      emptyDir: {}\n    - name: zaqar-bin\n      configMap:\n        name: zaqar-bin\n        defaultMode: 0555\n{{ if $mounts_zaqar_tests.volumes }}{{ toYaml $mounts_zaqar_tests.volumes | indent 4 }}{{ end }}\n{{- end }}\n"
  },
  {
    "path": "zaqar/templates/secret-db.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_db }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"zaqar\" }}\n{{- $secretName := index $envAll.Values.secrets.oslo_db $userClass }}\n{{- $connection := tuple \"oslo_db\" \"internal\" $userClass \"mysql\" $envAll | include \"helm-toolkit.endpoints.authenticated_endpoint_uri_lookup\" }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"oslo_db\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- if $envAll.Values.manifests.certificates }}\n  DB_CONNECTION: {{ (printf \"%s?charset=utf8&ssl_ca=/etc/mysql/certs/ca.crt&ssl_key=/etc/mysql/certs/tls.key&ssl_cert=/etc/mysql/certs/tls.crt&ssl_verify_cert\" $connection ) | b64enc -}}\n{{- else }}\n  DB_CONNECTION: {{  $connection | b64enc -}}\n{{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "zaqar/templates/secret-ingress-tls.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ingress_tls }}\n{{- include \"helm-toolkit.manifests.secret_ingress_tls\" ( dict \"envAll\" . \"backendServiceType\" \"messaging\" ) }}\n{{- end }}\n"
  },
  {
    "path": "zaqar/templates/secret-keystone.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_keystone }}\n{{- $envAll := . }}\n{{- range $key1, $userClass := tuple \"admin\" \"zaqar\" \"test\" }}\n{{- $secretName := index $envAll.Values.secrets.identity $userClass }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: {{ $secretName }}\n  annotations:\n{{ tuple \"identity\" $userClass $envAll | include \"helm-toolkit.snippets.custom_secret_annotations\" | indent 4 }}\ntype: Opaque\ndata:\n{{- tuple $userClass \"internal\" $envAll | include \"helm-toolkit.snippets.keystone_secret_openrc\" | indent 2 -}}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "zaqar/templates/secret-ks-etc.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.secret_ks_etc }}\n{{- $envAll := . -}}\n{{/* The endpoints.identity.auth sections with the oslo config sections they get rendered to */}}\n{{- $ksUsers := dict\n  \"zaqar\" \"keystone_authtoken\"\n-}}\n{{ dict\n  \"envAll\" $envAll\n  \"serviceName\" \"zaqar\"\n  \"serviceUserSections\" $ksUsers\n  | include \"helm-toolkit.manifests.secret_ks_etc\"\n}}\n{{- end }}\n"
  },
  {
    "path": "zaqar/templates/secret-registry.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}\n{{ include \"helm-toolkit.manifests.secret_registry\" ( dict \"envAll\" . \"registryUser\" .Chart.Name ) }}\n{{- end }}\n"
  },
  {
    "path": "zaqar/templates/service-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if .Values.manifests.service_api }}\n{{- $envAll := . }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: {{ tuple \"messaging\" \"internal\" . | include \"helm-toolkit.endpoints.hostname_short_endpoint_lookup\" }}\nspec:\n  ports:\n    - name: z-api\n      port: {{ tuple \"messaging\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\n      {{- if .Values.network.api.node_port.enabled }}\n      nodePort: {{ .Values.network.api.node_port.port }}\n      {{- end }}\n  selector:\n{{ tuple $envAll \"zaqar\" \"api\" | include \"helm-toolkit.snippets.kubernetes_metadata_labels\" | indent 4 }}\n  {{- if .Values.network.api.node_port.enabled }}\n  type: NodePort\n  {{- if .Values.network.api.external_policy_local }}\n  externalTrafficPolicy: Local\n  {{- end }}\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "zaqar/templates/service-ingress-api.yaml",
    "content": "{{/*\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/}}\n\n{{- if and .Values.manifests.service_ingress_api .Values.network.api.ingress.public }}\n{{- $serviceIngressOpts := dict \"envAll\" . \"backendServiceType\" \"messaging\" -}}\n{{ $serviceIngressOpts | include \"helm-toolkit.manifests.service_ingress\" }}\n{{- end }}\n"
  },
  {
    "path": "zaqar/values.yaml",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\nlabels:\n  api:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  job:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n  test:\n    node_selector_key: openstack-control-plane\n    node_selector_value: enabled\n\nrelease_group: null\n\nimages:\n  tags:\n    test: docker.io/xrally/xrally-openstack:2.0.0\n    bootstrap: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    db_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    db_drop: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_user: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_service: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    ks_endpoints: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy\n    zaqar_db_sync: quay.io/airshipit/zaqar:2025.1-ubuntu_jammy\n    zaqar_api: quay.io/airshipit/zaqar:2025.1-ubuntu_jammy\n    dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy\n    image_repo_sync: docker.io/docker:17.07.0\n  pull_policy: \"IfNotPresent\"\n  local_registry:\n    active: false\n    exclude:\n      - dep_check\n      - image_repo_sync\n\nnetwork:\n  api:\n    ingress:\n      public: true\n      classes:\n        namespace: \"ingress-openstack\"\n        cluster: \"ingress-cluster\"\n      annotations:\n        nginx.ingress.kubernetes.io/rewrite-target: /\n        haproxy.org/path-rewrite: /\n    external_policy_local: false\n    node_port:\n      enabled: false\n      port: 30780\n\ndependencies:\n  dynamic:\n    common:\n      local_image_registry:\n        jobs:\n          - zaqar-image-repo-sync\n        services:\n          - endpoint: node\n            service: local_image_registry\n  static:\n    api:\n      jobs:\n        - zaqar-db-sync\n        - zaqar-ks-user\n        - zaqar-ks-endpoints\n      services:\n        - endpoint: internal\n          service: oslo_cache\n        - endpoint: internal\n          service: oslo_db\n        - endpoint: internal\n          service: identity\n    bootstrap:\n      services:\n        - endpoint: internal\n          service: identity\n        - endpoint: internal\n          service: messaging\n    db_init:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_drop:\n      services:\n        - endpoint: internal\n          service: oslo_db\n    db_sync:\n      jobs:\n        - zaqar-db-init\n      services:\n        - endpoint: internal\n          service: oslo_db\n    ks_endpoints:\n      jobs:\n        - zaqar-ks-service\n      services:\n        - endpoint: internal\n          service: identity\n    ks_service:\n      services:\n        - endpoint: internal\n          service: identity\n    ks_user:\n      services:\n        - endpoint: internal\n          service: identity\n\nsecrets:\n  identity:\n    admin: zaqar-keystone-admin\n    zaqar: zaqar-keystone-user\n    service: zaqar-keystone-service\n    test: zaqar-keystone-test\n  oslo_db:\n    admin: zaqar-db-admin\n    zaqar: zaqar-db-user\n  tls:\n    messaging:\n      api:\n        admin: zaqar-tls-admin\n        public: zaqar-tls-public\n        internal: zaqar-tls-internal\n        nginx: zaqar-tls-nginx\n        nginx_cluster: zaqar-tls-nginx-cluster\n\nendpoints:\n  cluster_domain_suffix: cluster.local\n  local_image_registry:\n    name: docker-registry\n    namespace: docker-registry\n    hosts:\n      default: localhost\n      internal: docker-registry\n      node: localhost\n    host_fqdn_override:\n      default: null\n    port:\n      registry:\n        node: 5000\n  identity:\n    name: keystone\n    auth:\n      admin:\n        region_name: RegionOne\n        username: admin\n        password: password\n        project_name: admin\n        user_domain_name: default\n        project_domain_name: default\n      zaqar:\n        role: admin,service\n        region_name: RegionOne\n        username: zaqar\n        password: password\n        project_name: service\n        user_domain_name: service\n        project_domain_name: service\n      test:\n        role: admin\n        region_name: RegionOne\n        username: zaqar-test\n        password: password\n        project_name: test\n        user_domain_name: service\n        project_domain_name: service\n    hosts:\n      default: keystone\n      internal: keystone-api\n    host_fqdn_override:\n      default: null\n    path:\n      default: /v3\n    scheme:\n      default: http\n    port:\n      api:\n        default: 80\n        internal: 5000\n  messaging:\n    name: zaqar\n    hosts:\n      default: zaqar-api\n      public: zaqar\n    host_fqdn_override:\n      default: null\n    path:\n      default: /\n    scheme:\n      default: \"http\"\n      service: \"http\"\n    port:\n      api:\n        default: 8888\n        public: 80\n        service: 8888\n  oslo_db:\n    auth:\n      admin:\n        username: root\n        password: password\n        secret:\n          tls:\n            internal: mariadb-tls-direct\n      zaqar:\n        username: zaqar\n        password: password\n    hosts:\n      default: mariadb\n    host_fqdn_override:\n      default: null\n    path: /zaqar\n    scheme: mysql+pymysql\n    port:\n      mysql:\n        default: 3306\n  oslo_cache:\n    auth:\n      memcache_secret_key: null\n    hosts:\n      default: memcached\n    host_fqdn_override:\n      default: null\n    port:\n      memcache:\n        default: 11211\n  fluentd:\n    namespace: null\n    name: fluentd\n    hosts:\n      default: fluentd-logging\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: \"http\"\n    port:\n      service:\n        default: 24224\n      metrics:\n        default: 24220\n  kube_dns:\n    namespace: kube-system\n    name: kubernetes-dns\n    hosts:\n      default: kube-dns\n    host_fqdn_override:\n      default: null\n    path:\n      default: null\n    scheme: http\n    port:\n      dns:\n        default: 53\n        protocol: UDP\n  ingress:\n    namespace: null\n    name: ingress\n    hosts:\n      default: ingress\n    port:\n      ingress:\n        default: 80\n\npod:\n  probes:\n    rpc_timeout: 60\n    rpc_retries: 2\n    api:\n      default:\n        liveness:\n          enabled: true\n          params:\n            initialDelaySeconds: 60\n            periodSeconds: 10\n            timeoutSeconds: 5\n        readiness:\n          enabled: true\n          params:\n            initialDelaySeconds: 60\n            periodSeconds: 10\n            timeoutSeconds: 5\n  security_context:\n    zaqar:\n      pod:\n        runAsUser: 42424\n      container:\n        zaqar_api:\n          runAsUser: 0\n    test:\n      pod:\n        runAsUser: 42424\n      container:\n        zaqar_test_ks_user:\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n        zaqar_test:\n          runAsUser: 65500\n          readOnlyRootFilesystem: true\n          allowPrivilegeEscalation: false\n  affinity:\n    anti:\n      type:\n        default: preferredDuringSchedulingIgnoredDuringExecution\n      topologyKey:\n        default: kubernetes.io/hostname\n      weight:\n        default: 10\n  tolerations:\n    zaqar:\n      enabled: false\n      tolerations:\n        - key: node-role.kubernetes.io/master\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/control-plane\n          operator: Exists\n          effect: NoSchedule\n  mounts:\n    zaqar_api:\n      init_container: null\n      zaqar_api:\n        volumeMounts:\n        volumes:\n    zaqar_bootstrap:\n      init_container: null\n      zaqar_bootstrap:\n        volumeMounts:\n        volumes:\n    zaqar_db_sync:\n      zaqar_db_sync:\n        volumeMounts:\n        volumes:\n    zaqar_tests:\n      init_container: null\n      zaqar_tests:\n        volumeMounts:\n        volumes:\n  replicas:\n    api: 1\n  lifecycle:\n    upgrades:\n      deployments:\n        revision_history: 3\n        pod_replacement_strategy: RollingUpdate\n        rolling_update:\n          max_unavailable: 1\n          max_surge: 3\n    disruption_budget:\n      api:\n        min_available: 0\n    termination_grace_period:\n      api:\n        timeout: 30\n  resources:\n    enabled: false\n    api:\n      requests:\n        memory: \"128Mi\"\n        cpu: \"100m\"\n      limits:\n        memory: \"1024Mi\"\n        cpu: \"2000m\"\n    jobs:\n      bootstrap:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_init:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      db_drop:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_endpoints:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_service:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      ks_user:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      tests:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n      image_repo_sync:\n        requests:\n          memory: \"128Mi\"\n          cpu: \"100m\"\n        limits:\n          memory: \"1024Mi\"\n          cpu: \"2000m\"\n\njobs:\n  bootstrap:\n    backoffLimit: 5\n    activeDeadlineSeconds: 600\n  db_init:\n    backoffLimit: 5\n    activeDeadlineSeconds: 600\n  db_drop:\n    backoffLimit: 5\n    activeDeadlineSeconds: 600\n  db_sync:\n    backoffLimit: 5\n    activeDeadlineSeconds: 600\n  ks_endpoints:\n    backoffLimit: 5\n    activeDeadlineSeconds: 600\n  ks_service:\n    backoffLimit: 5\n    activeDeadlineSeconds: 600\n  ks_user:\n    backoffLimit: 5\n    activeDeadlineSeconds: 600\n  tests:\n    backoffLimit: 5\n    activeDeadlineSeconds: 600\n  image_repo_sync:\n    backoffLimit: 5\n    activeDeadlineSeconds: 600\n\nconf:\n  zaqar:\n    DEFAULT:\n      log_config_append: /etc/zaqar/logging.conf\n    drivers:\n      transport: wsgi\n      message_store: redis\n      management_store: sqlalchemy\n    keystone_authtoken:\n      service_token_roles: service\n      service_token_roles_required: true\n      auth_type: password\n      auth_version: v3\n      memcache_security_strategy: ENCRYPT\n      service_type: messaging\n    cache:\n      backend: dogpile.cache.memory\n    drivers:management_store:sqlalchemy:\n      uri:\n    drivers:message_store:redis:\n      uri: redis://redis:6379\n    drivers:transport:wsgi:\n      bind: 0.0.0.0\n      port: 8888\n    signed_url:\n      secret_key: SOMELONGSECRETKEY\n    oslo_concurrency:\n      lock_path: /var/lock\n  logging:\n    loggers:\n      keys:\n        - root\n        - zaqar\n    handlers:\n      keys:\n        - stdout\n        - stderr\n        - \"null\"\n    formatters:\n      keys:\n        - context\n        - default\n    logger_root:\n      level: WARNING\n      handlers: \"null\"\n    logger_zaqar:\n      level: INFO\n      handlers:\n        - stdout\n      qualname: zaqar\n    logger_amqp:\n      level: WARNING\n      handlers: stderr\n      qualname: amqp\n    logger_amqplib:\n      level: WARNING\n      handlers: stderr\n      qualname: amqplib\n    logger_eventletwsgi:\n      level: WARNING\n      handlers: stderr\n      qualname: eventlet.wsgi.server\n    logger_sqlalchemy:\n      level: WARNING\n      handlers: stderr\n      qualname: sqlalchemy\n    logger_boto:\n      level: WARNING\n      handlers: stderr\n      qualname: boto\n    handler_null:\n      class: logging.NullHandler\n      formatter: default\n      args: ()\n    handler_stdout:\n      class: StreamHandler\n      args: (sys.stdout,)\n      formatter: context\n    handler_stderr:\n      class: StreamHandler\n      args: (sys.stderr,)\n      formatter: context\n    formatter_context:\n      class: oslo_log.formatters.ContextFormatter\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n    formatter_default:\n      format: \"%(message)s\"\n      datefmt: \"%Y-%m-%d %H:%M:%S\"\n  api_paste:\n    composite:messaging:\n      use: \"egg:Paste#urlmap\"\n      \"/\": zaqarversions\n      \"/v1\": zaqarapi_v1\n    composite:zaqarapi_v1:\n      use: \"call:zaqar.transport.wsgi:pipeline_factory\"\n      noauth: \"request_id faultwrap sizelimit noauth zaqarapi_v1\"\n      keystone: \"request_id faultwrap sizelimit authtoken keystonecontext zaqarapi_v1\"\n    app:zaqarversions:\n      paste.app_factory: \"zaqar.api.versions:Versions.factory\"\n    app:zaqarapi_v1:\n      paste.app_factory: \"zaqar.api.v1.app:make_app\"\n    filter:request_id:\n      paste.filter_factory: \"oslo_middleware:RequestId.factory\"\n    filter:faultwrap:\n      paste.filter_factory: \"zaqar.api.middleware:FaultWrapper.factory\"\n    filter:noauth:\n      paste.filter_factory: \"zaqar.api.middleware:NoAuthMiddleware.factory\"\n    filter:sizelimit:\n      paste.filter_factory: \"oslo_middleware:RequestBodySizeLimiter.factory\"\n    filter:authtoken:\n      paste.filter_factory: \"keystonemiddleware.auth_token:filter_factory\"\n    filter:keystonecontext:\n      paste.filter_factory: \"zaqar.api.middleware:KeystoneContextMiddleware.factory\"\n  policy: {}\n  rally_tests:\n    run_tempest: false\n    clean_up: \"\"\n    tests:\n      Zaqar.queues:\n        - args:\n            queue_name: \"test-queue\"\n            messages:\n              - \"message1\"\n              - \"message2\"\n              - \"message3\"\n            ttl: 3600\n          runner:\n            type: \"constant\"\n            times: 10\n            concurrency: 3\n          sla:\n            failure_rate:\n              max: 0\n      Zaqar.publish:\n        - args:\n            queue_name: \"publish-queue\"\n            messages:\n              - \"hello\"\n              - \"world\"\n          runner:\n            type: \"constant\"\n            times: 15\n            concurrency: 4\n          sla:\n            failure_rate:\n              max: 0\n      Zaqar.consume:\n        - args:\n            queue_name: \"publish-queue\"\n            max_messages: 5\n          runner:\n            type: \"constant\"\n            times: 10\n            concurrency: 2\n          sla:\n            failure_rate:\n              max: 0\n\nbootstrap:\n  enabled: false\n  ks_user: zaqar\n  script: |\n    openstack token issue\n\n    # create a test queue in Zaqar\n    openstack queue create test-queue || echo \"Queue already exists\"\n\nmanifests:\n  certificates: false\n  configmap_bin: true\n  configmap_etc: true\n  deployment_api: true\n  ingress_api: true\n  job_bootstrap: true\n  job_db_init: true\n  job_db_drop: false\n  job_db_sync: true\n  job_image_repo_sync: true\n  job_ks_endpoints: true\n  job_ks_service: true\n  job_ks_user: true\n  pdb_api: true\n  pod_rally_test: true\n  secret_db: true\n  secret_keystone: true\n  secret_ks_etc: true\n  service_api: true\n  service_ingress_api: true\n\nnetwork_policy:\n  zaqar:\n    ingress:\n      - {}\n    egress:\n      - {}\n\ntls:\n  identity: false\n  oslo_db: false\n  messaging:\n    api:\n      public: false\n# -- Array of extra K8s manifests to deploy\n## Note: Supports use of custom Helm templates\nextraObjects: []\n  # - apiVersion: secrets-store.csi.x-k8s.io/v1\n  #   kind: SecretProviderClass\n  #   metadata:\n  #     name: osh-secrets-store\n  #   spec:\n  #     provider: aws\n  #     parameters:\n  #       objects: |\n  #         - objectName: \"osh\"\n  #           objectType: \"secretsmanager\"\n  #           jmesPath:\n  #               - path: \"client_id\"\n  #                 objectAlias: \"client_id\"\n  #               - path: \"client_secret\"\n  #                 objectAlias: \"client_secret\"\n  #     secretObjects:\n  #     - data:\n  #       - key: client_id\n  #         objectName: client_id\n  #       - key: client_secret\n  #         objectName: client_secret\n  #       secretName: osh-secrets-store\n  #       type: Opaque\n  #       labels:\n  #         app.kubernetes.io/part-of: osh\n...\n"
  },
  {
    "path": "zuul.d/2024.2.yaml",
    "content": "---\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n- job:\n    name: openstack-helm-cinder-2024-2-ubuntu_jammy\n    parent: openstack-helm-cinder-rook\n    vars:\n      osh_params:\n        openstack_release: \"2024.2\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-trove-2024-2-ubuntu_jammy\n    parent: openstack-helm-trove\n    nodeset: openstack-helm-3nodes-ubuntu_jammy\n    vars:\n      osh_params:\n        openstack_release: \"2024.2\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-compute-kit-2024-2-ubuntu_jammy\n    parent: openstack-helm-compute-kit\n    nodeset: openstack-helm-3nodes-ubuntu_jammy\n    vars:\n      osh_params:\n        openstack_release: \"2024.2\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-compute-kit-cilium-2024-2-ubuntu_jammy\n    parent: openstack-helm-compute-kit\n    nodeset: openstack-helm-1node-3nodes-ubuntu_jammy\n    vars:\n      calico_setup: false\n      cilium_setup: true\n      osh_params:\n        openstack_release: \"2024.2\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-compute-kit-flannel-2024-2-ubuntu_jammy\n    parent: openstack-helm-compute-kit\n    nodeset: openstack-helm-1node-3nodes-ubuntu_jammy\n    vars:\n      calico_setup: false\n      flannel_setup: true\n      osh_params:\n        openstack_release: \"2024.2\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-compute-kit-helm-repo-public-2024-2-ubuntu_jammy\n    parent: openstack-helm-compute-kit-helm-repo-public\n    nodeset: openstack-helm-1node-3nodes-ubuntu_jammy\n    vars:\n      osh_params:\n        openstack_release: \"2024.2\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-compute-kit-ovn-2024-2-ubuntu_jammy\n    parent: openstack-helm-compute-kit-ovn\n    nodeset: openstack-helm-3nodes-ubuntu_jammy\n    vars:\n      osh_params:\n        openstack_release: \"2024.2\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: ovn,gateway\n\n- job:\n    name: openstack-helm-tacker-2024-2-ubuntu_jammy\n    parent: openstack-helm-tacker\n    nodeset: openstack-helm-3nodes-ubuntu_jammy\n    vars:\n      osh_params:\n        openstack_release: \"2024.2\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n...\n"
  },
  {
    "path": "zuul.d/2025.1.yaml",
    "content": "---\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n- job:\n    name: openstack-helm-cinder-2025-1-ubuntu_jammy\n    parent: openstack-helm-cinder-rook\n    nodeset: openstack-helm-5nodes-ubuntu_jammy\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-trove-2025-1-ubuntu_jammy\n    parent: openstack-helm-trove\n    nodeset: openstack-helm-5nodes-ubuntu_jammy\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-compute-kit-2025-1-ubuntu_jammy\n    parent: openstack-helm-compute-kit\n    nodeset: openstack-helm-3nodes-ubuntu_jammy\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-compute-kit-ovn-2025-1-ubuntu_jammy\n    parent: openstack-helm-compute-kit-ovn\n    nodeset: openstack-helm-3nodes-ubuntu_jammy\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: ovn,gateway\n\n- job:\n    name: openstack-helm-skyline-2025-1-ubuntu_jammy\n    parent: openstack-helm-skyline\n    nodeset: openstack-helm-3nodes-ubuntu_jammy\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-cinder-2025-1-ubuntu_noble\n    parent: openstack-helm-cinder-rook\n    nodeset: openstack-helm-5nodes-ubuntu_noble\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: noble\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-cinder-2025-1-ubuntu_noble_loci\n    parent: openstack-helm-cinder-rook\n    nodeset: openstack-helm-5nodes-ubuntu_noble\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: noble\n        feature_gates: loci,gateway\n\n- job:\n    name: openstack-helm-trove-2025-1-ubuntu_noble\n    parent: openstack-helm-trove\n    nodeset: openstack-helm-5nodes-ubuntu_noble\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: noble\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-compute-kit-2025-1-ubuntu_noble\n    parent: openstack-helm-compute-kit\n    nodeset: openstack-helm-3nodes-ubuntu_noble\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: noble\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-compute-kit-2025-1-ubuntu_noble_loci\n    parent: openstack-helm-compute-kit\n    nodeset: openstack-helm-3nodes-ubuntu_noble\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: noble\n        feature_gates: loci,gateway\n\n- job:\n    name: openstack-helm-octavia-2025-1-ubuntu_jammy\n    parent: openstack-helm-octavia\n    nodeset: openstack-helm-4nodes-ubuntu_jammy\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-watcher-2025-1-ubuntu_jammy\n    parent: openstack-helm-watcher\n    nodeset: openstack-helm-3nodes-ubuntu_jammy\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-blazar-2025-1-ubuntu_jammy\n    parent: openstack-helm-blazar\n    nodeset: openstack-helm-3nodes-ubuntu_jammy\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-cloudkitty-2025-1-ubuntu_jammy\n    parent: openstack-helm-cloudkitty\n    nodeset: openstack-helm-3nodes-ubuntu_jammy\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-freezer-2025-1-ubuntu_jammy\n    parent: openstack-helm-freezer\n    nodeset: openstack-helm-3nodes-ubuntu_jammy\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-zaqar-2025-1-ubuntu_jammy\n    parent: openstack-helm-zaqar\n    nodeset: openstack-helm-3nodes-ubuntu_jammy\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-compute-kit-dpdk-2025-1-ubuntu_jammy\n    description: |\n      Run the openstack-helm compute-kit job with DPDK enabled.\n      We use single node environment to run this job which means\n      that the job only tests that QEMU and OVS-DPDK are working\n      together. The job does not assume having specific DPDK hardware.\n    parent: openstack-helm-compute-kit\n    pre-run:\n      - playbooks/enable-hugepages.yaml\n      - playbooks/prepare-hosts.yaml\n    nodeset: openstack-helm-1node-32GB-ubuntu_jammy\n    vars:\n      hugepages:\n        enabled: true\n        size: \"2M\"\n        number: 2048\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: dpdk,gateway\n    files:\n      - ^roles/.*\n      - ^openvswitch/.*\n      - ^nova/.*\n      - ^neutron/.*\n\n- job:\n    name: openstack-helm-horizon-2025-1-ubuntu_jammy\n    parent: openstack-helm-horizon\n    nodeset: openstack-helm-1node-ubuntu_jammy\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: logo,gateway\n\n- job:\n    name: openstack-helm-compute-kit-cilium-2025-1-ubuntu_jammy\n    parent: openstack-helm-compute-kit\n    nodeset: openstack-helm-1node-3nodes-ubuntu_jammy\n    vars:\n      calico_setup: false\n      cilium_setup: true\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-compute-kit-helm-repo-public-2025-1-ubuntu_jammy\n    parent: openstack-helm-compute-kit-helm-repo-public\n    nodeset: openstack-helm-1node-3nodes-ubuntu_jammy\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-tacker-2025-1-ubuntu_jammy\n    parent: openstack-helm-tacker\n    nodeset: openstack-helm-3nodes-ubuntu_jammy\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n...\n"
  },
  {
    "path": "zuul.d/2025.2-ubuntu_noble.yaml",
    "content": "---\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n- job:\n    name: openstack-helm-cinder-2025-2-ubuntu_noble\n    parent: openstack-helm-cinder-rook\n    nodeset: openstack-helm-5nodes-ubuntu_noble\n    vars:\n      osh_params:\n        openstack_release: \"2025.2\"\n        container_distro_name: ubuntu\n        container_distro_version: noble\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-cinder-2025-2-ubuntu_noble_loci\n    parent: openstack-helm-cinder-rook\n    nodeset: openstack-helm-5nodes-ubuntu_noble\n    vars:\n      osh_params:\n        openstack_release: \"2025.2\"\n        container_distro_name: ubuntu\n        container_distro_version: noble\n        feature_gates: loci,gateway\n\n- job:\n    name: openstack-helm-trove-2025-2-ubuntu_noble\n    parent: openstack-helm-trove\n    nodeset: openstack-helm-5nodes-ubuntu_noble\n    vars:\n      osh_params:\n        openstack_release: \"2025.2\"\n        container_distro_name: ubuntu\n        container_distro_version: noble\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-compute-kit-2025-2-ubuntu_noble\n    parent: openstack-helm-compute-kit\n    nodeset: openstack-helm-3nodes-ubuntu_noble\n    vars:\n      osh_params:\n        openstack_release: \"2025.2\"\n        container_distro_name: ubuntu\n        container_distro_version: noble\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-compute-kit-2025-2-ubuntu_noble_loci\n    parent: openstack-helm-compute-kit\n    nodeset: openstack-helm-3nodes-ubuntu_noble\n    vars:\n      osh_params:\n        openstack_release: \"2025.2\"\n        container_distro_name: ubuntu\n        container_distro_version: noble\n        feature_gates: loci,gateway\n\n- job:\n    name: openstack-helm-compute-kit-ovn-2025-2-ubuntu_noble\n    parent: openstack-helm-compute-kit-ovn\n    nodeset: openstack-helm-3nodes-ubuntu_noble\n    vars:\n      osh_params:\n        openstack_release: \"2025.2\"\n        container_distro_name: ubuntu\n        container_distro_version: noble\n        feature_gates: ovn,gateway\n\n- job:\n    name: openstack-helm-octavia-2025-2-ubuntu_noble\n    parent: openstack-helm-octavia\n    nodeset: openstack-helm-4nodes-ubuntu_noble\n    vars:\n      osh_params:\n        openstack_release: \"2025.2\"\n        container_distro_name: ubuntu\n        container_distro_version: noble\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-skyline-2025-2-ubuntu_noble\n    parent: openstack-helm-skyline\n    nodeset: openstack-helm-3nodes-ubuntu_noble\n    vars:\n      osh_params:\n        openstack_release: \"2025.2\"\n        container_distro_name: ubuntu\n        container_distro_version: noble\n        feature_gates: gateway\n\n- job:\n    name: openstack-helm-swift-2025-2-ubuntu_noble\n    parent: openstack-helm-swift\n    nodeset: openstack-helm-3nodes-ubuntu_noble\n    vars:\n      osh_params:\n        openstack_release: \"2025.2\"\n        container_distro_name: ubuntu\n        container_distro_version: noble\n        feature_gates: gateway\n...\n"
  },
  {
    "path": "zuul.d/base.yaml",
    "content": "---\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n- job:\n    name: openstack-helm-linter\n    run: playbooks/lint.yaml\n    nodeset: openstack-helm-1node-ubuntu_noble\n    required-projects:\n      - openstack/openstack-helm\n    irrelevant-files:\n      - ^.*\\.rst$\n      - ^releasenotes/.*$\n    pre-run:\n      - playbooks/inject-keys.yaml\n\n- job:\n    name: openstack-helm-pre-commit\n    parent: tox-linters\n\n- job:\n    name: openstack-helm-bandit\n    nodeset: openstack-helm-1node-ubuntu_noble\n    roles:\n      - zuul: openstack/openstack-helm\n      - zuul: zuul/zuul-jobs\n    required-projects:\n      - openstack/openstack-helm\n    files:\n      - ^.*\\.py\\.tpl$\n      - ^.*\\.py$\n      - ^playbooks/osh-bandit.yaml$\n    pre-run: playbooks/prepare-hosts.yaml\n    post-run: playbooks/collect-logs.yaml\n    run: playbooks/osh-bandit.yaml\n    vars:\n      bandit_version: \"1.7.1\"\n\n- job:\n    name: openstack-helm-publish-charts\n    parent: publish-openstack-artifacts\n    run: playbooks/build-chart.yaml\n    required-projects:\n      - openstack/openstack-helm\n    post-run: playbooks/publish/post.yaml\n    vars:\n      base_version: \"2025.2.0\"\n\n- job:\n    name: openstack-helm-deploy\n    abstract: true\n    roles:\n      - zuul: openstack/openstack-helm\n      - zuul: zuul/zuul-jobs\n    required-projects:\n      - openstack/openstack-helm\n      - openstack/openstack-helm-plugin\n    irrelevant-files:\n      - ^.*\\.rst$\n      - ^.*\\.rst.gotmpl$\n      - ^doc/.*$\n      - ^releasenotes/.*$\n    timeout: 10800\n    pre-run:\n      - playbooks/prepare-hosts.yaml\n      - playbooks/mount-volumes.yaml\n      - playbooks/inject-keys.yaml\n    post-run: playbooks/collect-logs.yaml\n    run:\n      - playbooks/deploy-env.yaml\n      - playbooks/run-scripts.yaml\n    vars:\n      osh_params:\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n      osh_values_overrides_path: \"../openstack-helm/values_overrides\"\n      gate_scripts_relative_path: \"../openstack-helm\"\n      overlay_network_setup: true\n      extra_volume:\n        size: 80G\n        type: Linux\n        mount_point: /opt/ext_vol\n      docker:\n        root_path: \"/opt/ext_vol/docker\"\n      containerd:\n        root_path: \"/opt/ext_vol/containerd\"\n      kubeadm:\n        pod_network_cidr: \"10.244.0.0/16\"\n        service_cidr: \"10.96.0.0/16\"\n      osh_plugin_repo: \"{{ zuul.project.src_dir }}/../openstack-helm-plugin\"\n      loopback_setup: true\n      loopback_device: /dev/loop100\n      loopback_image: \"/opt/ext_vol/openstack-helm/ceph-loop.img\"\n      ceph_osd_data_device: /dev/loop100\n      kube_version_repo: \"v1.35\"\n      kube_version: \"1.35.0-1.1\"\n      calico_setup: true\n      calico_version: \"v3.31.3\"\n      cilium_setup: false\n      cilium_version: \"1.17.4\"\n      flannel_setup: false\n      flannel_version: v0.26.7\n      metallb_setup: true\n      metallb_version: \"0.15.3\"\n      coredns_resolver_setup: false\n      ingress_setup: false\n      ingress_implementation: \"haproxy\"\n      gatewayapi_setup: true\n      gatewayapi_implementation: \"envoy\"\n      gatewayapi_envoy_version: \"v1.7.0\"\n      crictl_version: \"v1.35.0\"\n      run_helm_tests: \"no\"\n      floating_network_setup: true\n\n- job:\n    name: openstack-helm-deploy-kubespray\n    parent: openstack-helm-deploy\n    abstract: true\n    # NOTE(kozhukalov): Temporarily disable voting for this job\n    # due to image pull rate limits on Docker Hub.\n    voting: false\n    run:\n      - playbooks/deploy-env-kubespray.yaml\n      - playbooks/run-scripts.yaml\n    vars:\n      kube_version_kubespray: \"v1.29.5\"\n\n- job:\n    name: openstack-helm-compute-kit\n    parent: openstack-helm-deploy\n    abstract: true\n    vars:\n      gate_scripts:\n        - ./tools/deployment/common/prepare-bashrc.sh\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/setup-client.sh\n        - export VOLUME_HELM_ARGS=\"--set volume.enabled=false\"; ./tools/deployment/component/common/rabbitmq.sh\n        - ./tools/deployment/db/mariadb.sh\n        - >-\n          export OSH_EXTRA_HELM_ARGS=\"--values ../openstack-helm/values_overrides/memcached/exporter.yaml\";\n          ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/component/keystone/keystone.sh\n        - ./tools/deployment/component/heat/heat.sh\n        - export GLANCE_BACKEND=local; ./tools/deployment/component/glance/glance.sh\n        - >-\n          export OSH_EXTRA_HELM_ARGS=\"--values ../openstack-helm/values_overrides/openvswitch/exporter.yaml\";\n          ./tools/deployment/component/compute-kit/openvswitch.sh\n        - >-\n          export OSH_EXTRA_HELM_ARGS=\"--values ../openstack-helm/values_overrides/libvirt/inovex_exporter.yaml\";\n          ./tools/deployment/component/compute-kit/libvirt.sh\n        - ./tools/deployment/component/compute-kit/compute-kit.sh\n        - ./tools/deployment/monitoring/openstack-exporter.sh\n        - export OSH_TEST_TIMEOUT=1200; ./tools/deployment/common/run-helm-tests.sh neutron\n        - ./tools/deployment/common/run-helm-tests.sh nova\n        - ./tools/deployment/common/run-helm-tests.sh glance\n        - ./tools/deployment/common/run-helm-tests.sh keystone\n        - ./tools/deployment/common/use-it.sh\n        - ./tools/deployment/common/force-cronjob-run.sh\n\n- job:\n    name: openstack-helm-compute-kit-kubespray\n    parent: openstack-helm-deploy-kubespray\n    abstract: true\n    vars:\n      gate_scripts:\n        - ./tools/deployment/common/prepare-bashrc.sh\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/setup-client.sh\n        - export VOLUME_HELM_ARGS=\"--set volume.enabled=false\"; ./tools/deployment/component/common/rabbitmq.sh\n        - ./tools/deployment/db/mariadb.sh\n        - ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/component/keystone/keystone.sh\n        - ./tools/deployment/component/heat/heat.sh\n        - export GLANCE_BACKEND=local; ./tools/deployment/component/glance/glance.sh\n        - ./tools/deployment/component/compute-kit/openvswitch.sh\n        - ./tools/deployment/component/compute-kit/libvirt.sh\n        - ./tools/deployment/component/compute-kit/compute-kit.sh\n        - ./tools/deployment/monitoring/openstack-exporter.sh\n        - export OSH_TEST_TIMEOUT=1200;./tools/deployment/common/run-helm-tests.sh neutron\n        - ./tools/deployment/common/run-helm-tests.sh nova\n        - ./tools/deployment/common/run-helm-tests.sh glance\n        - ./tools/deployment/common/run-helm-tests.sh keystone\n        - ./tools/deployment/common/use-it.sh\n        - ./tools/deployment/common/force-cronjob-run.sh\n\n- job:\n    name: openstack-helm-compute-kit-rook\n    parent: openstack-helm-deploy\n    nodeset: openstack-helm-5nodes-ubuntu_jammy\n    abstract: true\n    vars:\n      gate_scripts:\n        - ./tools/deployment/common/prepare-bashrc.sh\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/setup-client.sh\n        - ./tools/deployment/ceph/ceph-rook.sh\n        - ./tools/deployment/ceph/ceph-adapter-rook.sh\n        - export VOLUME_HELM_ARGS=\" \"; ./tools/deployment/component/common/rabbitmq.sh\n        - export VOLUME_HELM_ARGS=\" \"; ./tools/deployment/db/mariadb.sh\n        - ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/component/keystone/keystone.sh\n        - ./tools/deployment/component/cinder/cinder.sh\n        - ./tools/deployment/component/heat/heat.sh\n        - ./tools/deployment/component/glance/glance.sh\n        - ./tools/deployment/component/compute-kit/openvswitch.sh\n        - ./tools/deployment/component/compute-kit/libvirt.sh\n        - ./tools/deployment/component/compute-kit/compute-kit.sh\n        - ./tools/deployment/monitoring/openstack-exporter.sh\n        - ./tools/deployment/common/use-it.sh\n        - ./tools/deployment/common/force-cronjob-run.sh\n\n- job:\n    name: openstack-helm-compute-kit-helm-repo-local\n    parent: openstack-helm-deploy\n    abstract: true\n    vars:\n      osh_helm_repo: openstack-helm\n      gate_scripts:\n        - ./tools/deployment/common/prepare-bashrc.sh\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/prepare-helm-repos-local.sh\n        - ./tools/deployment/common/setup-client.sh\n        - ./tools/deployment/component/common/rabbitmq.sh\n        - ./tools/deployment/db/mariadb.sh\n        - ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/component/keystone/keystone.sh\n        - ./tools/deployment/component/heat/heat.sh\n        - export GLANCE_BACKEND=local; ./tools/deployment/component/glance/glance.sh\n        - ./tools/deployment/component/compute-kit/openvswitch.sh\n        - ./tools/deployment/component/compute-kit/libvirt.sh\n        - ./tools/deployment/component/compute-kit/compute-kit.sh\n        - ./tools/deployment/monitoring/openstack-exporter.sh\n        - export OSH_TEST_TIMEOUT=1200;./tools/deployment/common/run-helm-tests.sh neutron\n        - ./tools/deployment/common/run-helm-tests.sh nova\n        - ./tools/deployment/common/run-helm-tests.sh glance\n        - ./tools/deployment/common/run-helm-tests.sh keystone\n        - ./tools/deployment/common/use-it.sh\n        - ./tools/deployment/common/force-cronjob-run.sh\n\n- job:\n    name: openstack-helm-compute-kit-helm-repo-public\n    parent: openstack-helm-deploy\n    abstract: true\n    vars:\n      osh_helm_repo: openstack-helm\n      download_overrides: \"-d\"\n      gate_scripts:\n        - ./tools/deployment/common/prepare-bashrc.sh\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/prepare-helm-repos-public.sh\n        - ./tools/deployment/common/setup-client.sh\n        - ./tools/deployment/component/common/rabbitmq.sh\n        - ./tools/deployment/db/mariadb.sh\n        - ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/component/keystone/keystone.sh\n        - ./tools/deployment/component/heat/heat.sh\n        - export GLANCE_BACKEND=local; ./tools/deployment/component/glance/glance.sh\n        - ./tools/deployment/component/compute-kit/openvswitch.sh\n        - ./tools/deployment/component/compute-kit/libvirt.sh\n        - ./tools/deployment/component/compute-kit/compute-kit.sh\n        - ./tools/deployment/monitoring/openstack-exporter.sh\n        - export OSH_TEST_TIMEOUT=1200;./tools/deployment/common/run-helm-tests.sh neutron\n        - ./tools/deployment/common/run-helm-tests.sh nova\n        - ./tools/deployment/common/run-helm-tests.sh glance\n        - ./tools/deployment/common/run-helm-tests.sh keystone\n        - ./tools/deployment/common/use-it.sh\n        - ./tools/deployment/common/force-cronjob-run.sh\n\n- job:\n    name: openstack-helm-compute-kit-ovn\n    parent: openstack-helm-deploy\n    abstract: true\n    files:\n      - ^ovn/.*$\n      - ^openvswitch/.*$\n      - ^neutron/.*$\n      - ^zuul\\.d/.*$\n      - ^tools/deployment/component/ovn/.*$\n    vars:\n      gate_scripts:\n        - ./tools/deployment/common/prepare-bashrc.sh\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/setup-client.sh\n        - export VOLUME_HELM_ARGS=\"--set volume.enabled=false\"; ./tools/deployment/component/common/rabbitmq.sh\n        - ./tools/deployment/db/mariadb.sh\n        - ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/component/keystone/keystone.sh\n        - ./tools/deployment/component/heat/heat.sh\n        - export GLANCE_BACKEND=local; ./tools/deployment/component/glance/glance.sh\n        - ./tools/deployment/component/compute-kit/openvswitch.sh\n        - ./tools/deployment/component/compute-kit/libvirt.sh\n        - ./tools/deployment/component/ovn/ovn.sh\n        - ./tools/deployment/component/compute-kit/compute-kit.sh\n        - ./tools/deployment/monitoring/openstack-exporter.sh\n        - export OSH_TEST_TIMEOUT=1200;./tools/deployment/common/run-helm-tests.sh neutron\n        - ./tools/deployment/common/run-helm-tests.sh nova\n        - ./tools/deployment/common/run-helm-tests.sh glance\n        - ./tools/deployment/common/run-helm-tests.sh keystone\n        - ./tools/deployment/common/use-it.sh\n        - ./tools/deployment/common/force-cronjob-run.sh\n\n- job:\n    name: openstack-helm-keystone-ldap\n    parent: openstack-helm-deploy\n    abstract: true\n    files:\n      - ^keystone/.*$\n      - ^zuul\\.d/.*$\n    vars:\n      gate_scripts:\n        - ./tools/deployment/common/prepare-bashrc.sh\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/setup-client.sh\n        - ./tools/deployment/common/cert-manager.sh\n        - ./tools/deployment/db/mariadb.sh\n        - ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/component/common/rabbitmq.sh\n        - ./tools/deployment/component/common/ldap.sh\n        - ./tools/deployment/component/keystone/keystone.sh\n\n- job:\n    name: openstack-helm-cinder\n    parent: openstack-helm-deploy\n    abstract: true\n    files:\n      - ^cinder/.*$\n      - ^zuul\\.d/.*$\n      - ^tools/deployment/component/cinder/.*$\n    vars:\n      gate_scripts:\n        - ./tools/deployment/common/prepare-bashrc.sh\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/setup-client.sh\n        - ./tools/deployment/ceph/ceph.sh\n        - ./tools/deployment/ceph/ceph-ns-activate.sh\n        - ./tools/deployment/db/mariadb.sh\n        - ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/component/common/rabbitmq.sh\n        - ./tools/deployment/component/keystone/keystone.sh\n        - ./tools/deployment/component/cinder/cinder.sh\n        - ./tools/deployment/common/force-cronjob-run.sh\n\n- job:\n    name: openstack-helm-cinder-rook\n    parent: openstack-helm-deploy\n    nodeset: openstack-helm-5nodes-ubuntu_jammy\n    abstract: true\n    files:\n      - ^cinder/.*$\n      - ^zuul\\.d/.*$\n      - ^tools/deployment/component/cinder/.*$\n      - ^tools/deployment/ceph/.*$\n    vars:\n      gate_scripts:\n        - ./tools/deployment/common/prepare-bashrc.sh\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/setup-client.sh\n        - ./tools/deployment/ceph/ceph-rook.sh\n        - ./tools/deployment/ceph/ceph-adapter-rook.sh\n        - ./tools/deployment/db/mariadb.sh\n        - ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/component/common/rabbitmq.sh\n        - ./tools/deployment/component/keystone/keystone.sh\n        - ./tools/deployment/component/cinder/cinder.sh\n        - ./tools/deployment/common/force-cronjob-run.sh\n\n- job:\n    name: openstack-helm-trove\n    parent: openstack-helm-deploy\n    abstract: true\n    nodeset: openstack-helm-5nodes-ubuntu_noble\n    files:\n      - ^trove/.*$\n      - ^zuul\\.d/.*$\n      - ^tools/deployment/component/trove/.*$\n    vars:\n      gate_scripts:\n        - ./tools/deployment/common/prepare-bashrc.sh\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/setup-client.sh\n        - ./tools/deployment/ceph/ceph-rook.sh\n        - ./tools/deployment/ceph/ceph-adapter-rook.sh\n        - export VOLUME_HELM_ARGS=\" \"; ./tools/deployment/component/common/rabbitmq.sh\n        - export VOLUME_HELM_ARGS=\" \"; ./tools/deployment/db/mariadb.sh\n        - ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/component/keystone/keystone.sh\n        - ./tools/deployment/component/cinder/cinder.sh\n        - ./tools/deployment/component/heat/heat.sh\n        - ./tools/deployment/component/glance/glance.sh\n        - ./tools/deployment/component/compute-kit/openvswitch.sh\n        - ./tools/deployment/component/compute-kit/libvirt.sh\n        - ./tools/deployment/component/compute-kit/compute-kit.sh\n        - ./tools/deployment/monitoring/openstack-exporter.sh\n        - ./tools/deployment/component/trove/trove.sh\n\n- job:\n    name: openstack-helm-horizon\n    parent: openstack-helm-deploy\n    abstract: true\n    files:\n      - ^horizon/.*$\n      - ^zuul\\.d/.*$\n      - ^tools/deployment/component/horizon/.*$\n    vars:\n      gate_scripts:\n        - ./tools/deployment/common/prepare-bashrc.sh\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/setup-client.sh\n        - ./tools/deployment/db/mariadb.sh\n        - ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/component/common/rabbitmq.sh\n        - ./tools/deployment/component/keystone/keystone.sh\n        - ./tools/deployment/component/horizon/horizon.sh\n\n- job:\n    name: openstack-helm-tls\n    parent: openstack-helm-deploy\n    nodeset: openstack-helm-5nodes-ubuntu_jammy\n    abstract: true\n    # NOTE(kozhukalov): The job is quite unstable now. Let's\n    # temporarily disable voting for this job to unblock the gate.\n    voting: false\n    vars:\n      gate_scripts:\n        - ./tools/deployment/common/prepare-bashrc.sh\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/setup-client.sh\n        - ./tools/deployment/common/cert-manager.sh\n        - ./tools/deployment/ceph/ceph-rook.sh\n        - ./tools/deployment/ceph/ceph-adapter-rook.sh\n        - ./tools/deployment/db/mariadb.sh\n        - ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/component/common/rabbitmq.sh\n        - ./tools/deployment/component/keystone/keystone.sh\n        - ./tools/deployment/component/cinder/cinder.sh\n        - ./tools/deployment/component/heat/heat.sh\n        - ./tools/deployment/component/glance/glance.sh\n        - ./tools/deployment/component/compute-kit/openvswitch.sh\n        - ./tools/deployment/component/compute-kit/libvirt.sh\n        - ./tools/deployment/component/compute-kit/compute-kit.sh\n        - ./tools/deployment/monitoring/openstack-exporter.sh\n        - export OSH_TEST_TIMEOUT=1200; ./tools/deployment/common/run-helm-tests.sh neutron\n        - ./tools/deployment/common/run-helm-tests.sh nova\n        - ./tools/deployment/common/run-helm-tests.sh glance\n        - ./tools/deployment/common/run-helm-tests.sh keystone\n        - ./tools/deployment/common/run-helm-tests.sh cinder\n        - ./tools/deployment/common/use-it.sh\n        - ./tools/deployment/common/force-cronjob-run.sh\n\n- job:\n    name: openstack-helm-tacker\n    parent: openstack-helm-deploy\n    files:\n      - tacker/.*\n    abstract: true\n    timeout: 7200\n    vars:\n      gate_scripts:\n        - ./tools/deployment/common/prepare-bashrc.sh\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/setup-client.sh\n        - ./tools/deployment/component/common/rabbitmq.sh\n        - ./tools/deployment/db/mariadb.sh\n        - ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/component/keystone/keystone.sh\n        - ./tools/deployment/component/heat/heat.sh\n        - export GLANCE_BACKEND=local; ./tools/deployment/component/glance/glance.sh\n        - ./tools/deployment/component/compute-kit/openvswitch.sh\n        - ./tools/deployment/component/compute-kit/libvirt.sh\n        - ./tools/deployment/component/compute-kit/compute-kit.sh\n        - ./tools/deployment/component/barbican/barbican.sh\n        - ./tools/deployment/monitoring/openstack-exporter.sh\n        - ./tools/deployment/component/nfs-provisioner/nfs-provisioner.sh\n        - ./tools/deployment/component/tacker/tacker.sh\n        - ./tools/deployment/common/run-helm-tests.sh tacker\n\n- job:\n    name: openstack-helm-skyline\n    parent: openstack-helm-deploy\n    timeout: 10800\n    files:\n      - skyline/.*\n    vars:\n      gate_scripts:\n        - ./tools/deployment/common/prepare-bashrc.sh\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/setup-client.sh\n        - export VOLUME_HELM_ARGS=\"--set volume.enabled=false\"; ./tools/deployment/component/common/rabbitmq.sh\n        - ./tools/deployment/db/mariadb.sh\n        - ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/component/keystone/keystone.sh\n        - ./tools/deployment/component/heat/heat.sh\n        - export GLANCE_BACKEND=local; ./tools/deployment/component/glance/glance.sh\n        - ./tools/deployment/component/compute-kit/openvswitch.sh\n        - ./tools/deployment/component/compute-kit/libvirt.sh\n        - ./tools/deployment/component/compute-kit/compute-kit.sh\n        - ./tools/deployment/monitoring/openstack-exporter.sh\n        - ./tools/deployment/component/skyline/skyline.sh\n        - ./tools/gate/selenium/skyline-selenium.sh\n\n- job:\n    name: openstack-helm-octavia\n    parent: openstack-helm-deploy\n    timeout: 10800\n    vars:\n      gate_scripts:\n        - ./tools/deployment/common/prepare-bashrc.sh\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/setup-client.sh\n        - export VOLUME_HELM_ARGS=\"--set volume.enabled=false\"; ./tools/deployment/component/common/rabbitmq.sh\n        - ./tools/deployment/db/mariadb.sh\n        - ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/component/keystone/keystone.sh\n        - ./tools/deployment/component/heat/heat.sh\n        - export GLANCE_BACKEND=local; ./tools/deployment/component/glance/glance.sh\n        - ./tools/deployment/component/compute-kit/openvswitch.sh\n        - ./tools/deployment/component/compute-kit/libvirt.sh\n        - ./tools/deployment/component/compute-kit/compute-kit.sh\n        - ./tools/deployment/monitoring/openstack-exporter.sh\n        - ./tools/deployment/component/barbican/barbican.sh\n        - ./tools/deployment/component/octavia/octavia_resources.sh\n        - ./tools/deployment/component/octavia/octavia_certs.sh\n        - ./tools/deployment/component/octavia/octavia.sh\n        - ./tools/deployment/component/octavia/octavia_test.sh\n\n- job:\n    name: openstack-helm-watcher\n    parent: openstack-helm-deploy\n    timeout: 10800\n    files:\n      - watcher/.*\n    abstract: true\n    vars:\n      gate_scripts:\n        - ./tools/deployment/common/prepare-bashrc.sh\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/setup-client.sh\n        - export VOLUME_HELM_ARGS=\"--set volume.enabled=false\"; ./tools/deployment/component/common/rabbitmq.sh\n        - ./tools/deployment/db/mariadb.sh\n        - ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/component/keystone/keystone.sh\n        - ./tools/deployment/component/heat/heat.sh\n        - export GLANCE_BACKEND=local; ./tools/deployment/component/glance/glance.sh\n        - ./tools/deployment/component/compute-kit/openvswitch.sh\n        - ./tools/deployment/component/compute-kit/libvirt.sh\n        - ./tools/deployment/component/compute-kit/compute-kit.sh\n        - ./tools/deployment/monitoring/openstack-exporter.sh\n        - ./tools/deployment/component/watcher/watcher.sh\n\n- job:\n    name: openstack-helm-blazar\n    parent: openstack-helm-deploy\n    timeout: 7200\n    files:\n      - blazar/.*\n    abstract: true\n    vars:\n      gate_scripts:\n        - ./tools/deployment/common/prepare-bashrc.sh\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/setup-client.sh\n        - ./tools/deployment/component/common/rabbitmq.sh\n        - ./tools/deployment/db/mariadb.sh\n        - ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/component/keystone/keystone.sh\n        - export GLANCE_BACKEND=local; ./tools/deployment/component/glance/glance.sh\n        - ./tools/deployment/component/compute-kit/openvswitch.sh\n        - ./tools/deployment/component/compute-kit/libvirt.sh\n        - ./tools/deployment/component/compute-kit/compute-kit.sh\n        - ./tools/deployment/monitoring/openstack-exporter.sh\n        - ./tools/deployment/component/blazar/blazar.sh\n        - ./tools/deployment/component/blazar/blazar_smoke_test.sh\n        - ./tools/deployment/common/run-helm-tests.sh blazar\n\n- job:\n    name: openstack-helm-cloudkitty\n    parent: openstack-helm-deploy\n    timeout: 7200\n    files:\n      - cloudkitty/.*\n    abstract: true\n    vars:\n      gate_scripts:\n        - ./tools/deployment/common/prepare-bashrc.sh\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/setup-client.sh\n        - export VOLUME_HELM_ARGS=\"--set volume.enabled=false\"; ./tools/deployment/component/common/rabbitmq.sh\n        - ./tools/deployment/db/mariadb.sh\n        - ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/component/keystone/keystone.sh\n        - ./tools/deployment/component/heat/heat.sh\n        - export GLANCE_BACKEND=local; ./tools/deployment/component/glance/glance.sh\n        - ./tools/deployment/component/compute-kit/openvswitch.sh\n        - ./tools/deployment/component/compute-kit/libvirt.sh\n        - ./tools/deployment/component/compute-kit/compute-kit.sh\n        - ./tools/deployment/component/cloudkitty/cloudkitty.sh\n        - ./tools/deployment/monitoring/openstack-exporter.sh\n\n- job:\n    name: openstack-helm-freezer\n    parent: openstack-helm-deploy\n    timeout: 10800\n    files:\n      - freezer/.*\n    abstract: true\n    vars:\n      gate_scripts:\n        - ./tools/deployment/common/prepare-bashrc.sh\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/setup-client.sh\n        - export VOLUME_HELM_ARGS=\"--set volume.enabled=false\"; ./tools/deployment/component/common/rabbitmq.sh\n        - ./tools/deployment/db/mariadb.sh\n        - ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/component/keystone/keystone.sh\n        - ./tools/deployment/component/freezer/freezer.sh\n        - ./tools/deployment/component/freezer/freezer_smoke_test.sh\n\n- job:\n    name: openstack-helm-zaqar\n    parent: openstack-helm-deploy\n    timeout: 10800\n    files:\n      - zaqar/.*\n    abstract: true\n    vars:\n      gate_scripts:\n        - ./tools/deployment/common/prepare-bashrc.sh\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/setup-client.sh\n        - export VOLUME_HELM_ARGS=\"--set volume.enabled=false\"; ./tools/deployment/component/common/rabbitmq.sh\n        - ./tools/deployment/db/mariadb.sh\n        - ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/component/keystone/keystone.sh\n        - ./tools/deployment/component/redis/redis.sh\n        - ./tools/deployment/component/zaqar/zaqar.sh\n        - ./tools/deployment/component/zaqar/zaqar_smoke_test.sh\n\n- job:\n    name: openstack-helm-swift\n    parent: openstack-helm-deploy\n    timeout: 10800\n    files:\n      - swift/.*\n    abstract: true\n    vars:\n      loopback_format: true\n      loopback_format_fs_type: ext4\n      loopback_mount: true\n      loopback_mount_path: /srv/node/loop100\n      loopback_image: \"/opt/ext_vol/openstack-helm/swift-loop.img\"\n      gate_scripts:\n        - ./tools/deployment/common/prepare-bashrc.sh\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/setup-client.sh\n        - ./tools/deployment/component/nfs-provisioner/nfs-provisioner.sh\n        - export VOLUME_HELM_ARGS=\"--set volume.enabled=false\"; ./tools/deployment/component/common/rabbitmq.sh\n        - ./tools/deployment/db/mariadb.sh\n        - ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/component/keystone/keystone.sh\n        - ./tools/deployment/component/swift/swift.sh\n...\n"
  },
  {
    "path": "zuul.d/infra_jobs.yaml",
    "content": "---\n# Copyright 2018 SUSE LINUX GmbH.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n- job:\n    name: openstack-helm-logging\n    parent: openstack-helm-deploy\n    nodeset: openstack-helm-5nodes-ubuntu_jammy\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n      gate_scripts:\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/ceph/ceph-rook.sh\n        - ./tools/deployment/ceph/ceph-adapter-rook.sh\n        - export NAMESPACE=osh-infra; ./tools/deployment/component/common/ldap.sh\n        - ./tools/deployment/logging/elasticsearch.sh\n        - ./tools/deployment/logging/fluentd.sh\n        - ./tools/deployment/logging/kibana.sh\n        - ./tools/gate/selenium/kibana-selenium.sh || true\n\n- job:\n    name: openstack-helm-monitoring\n    parent: openstack-helm-deploy\n    nodeset: openstack-helm-1node-ubuntu_jammy\n    timeout: 10800\n    vars:\n      ingress_osh_infra_setup: true\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n      gate_scripts:\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/deploy-docker-registry.sh\n        - export NAMESPACE=osh-infra; ./tools/deployment/component/common/ldap.sh\n        - export NAMESPACE=osh-infra; ./tools/deployment/db/mariadb.sh\n        - export NAMESPACE=osh-infra; ./tools/deployment/db/postgresql.sh\n        - ./tools/deployment/monitoring/prometheus.sh\n        - ./tools/deployment/monitoring/alertmanager.sh\n        - ./tools/deployment/monitoring/kube-state-metrics.sh\n        - ./tools/deployment/monitoring/node-problem-detector.sh\n        - ./tools/deployment/monitoring/node-exporter.sh\n        - ./tools/deployment/monitoring/process-exporter.sh\n        - ./tools/deployment/monitoring/blackbox-exporter.sh\n        - ./tools/deployment/monitoring/grafana.sh\n        - ./tools/deployment/monitoring/nagios.sh\n        - ./tools/gate/selenium/grafana-selenium.sh || true\n        - ./tools/gate/selenium/prometheus-selenium.sh || true\n        - ./tools/gate/selenium/nagios-selenium.sh || true\n\n- job:\n    name: openstack-helm-mariadb-operator-2025-1-ubuntu_jammy\n    parent: openstack-helm-deploy\n    nodeset: openstack-helm-5nodes-ubuntu_jammy\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: \"ldap,prometheus,backups,gateway\"\n      gate_scripts:\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        - ./tools/deployment/common/setup-client.sh\n        - ./tools/deployment/common/namespace-config.sh\n        - ./tools/deployment/ceph/ceph-rook.sh\n        - ./tools/deployment/ceph/ceph-adapter-rook.sh\n        - ./tools/deployment/component/common/rabbitmq.sh\n        - ./tools/deployment/component/common/memcached.sh\n        - ./tools/deployment/db/mariadb-operator-cluster.sh\n        - ./tools/deployment/component/common/ldap.sh\n        - |\n          export OSH_EXTRA_HELM_ARGS=\"--set endpoints.oslo_db.hosts.default=mariadb-server-primary ${OSH_EXTRA_HELM_ARGS}\"\n          ./tools/deployment/openstack/keystone.sh\n        - ./tools/deployment/db/mariadb-backup.sh\n        - ./tools/deployment/monitoring/mysql-exporter.sh\n    files:\n      - ^roles/.*\n      - ^mariadb-cluster/.*\n      - ^tools/.*\n\n- job:\n    name: openstack-helm-mariadb-ingress-2025-1-ubuntu_jammy\n    parent: openstack-helm-compute-kit-2025-1-ubuntu_jammy\n    vars:\n      osh_params:\n        feature_gates: \"ingress-service,gateway\"\n    files:\n      - ^helm-toolkit/.*\n      - ^roles/.*\n      - ^rabbitmq/.*\n      - ^mariadb/.*\n      - ^memcached/.*\n      - ^libvrit/.*\n      - ^openvswitch/.*\n\n- job:\n    name: openstack-helm-ceph-migrate\n    description: |\n      This job is for testing the migration procedure from\n      a Ceph cluster managed by legacy OSH ceph* charts\n      to a Ceph cluster managed by Rook-Ceph operator.\n    parent: openstack-helm-deploy\n    nodeset: openstack-helm-5nodes-ubuntu_jammy\n    timeout: 10800\n    pre-run:\n      - playbooks/prepare-hosts.yaml\n      - playbooks/mount-volumes.yaml\n      - playbooks/inject-keys.yaml\n    files:\n      - ^helm-toolkit/.*\n      - ^roles/.*\n      - ^ceph.*\n      - ^tools/deployment/ceph/.*\n    vars:\n      osh_params:\n        openstack_release: \"2025.1\"\n        container_distro_name: ubuntu\n        container_distro_version: jammy\n        feature_gates: gateway\n      gate_scripts:\n        - ./tools/deployment/common/prepare-k8s.sh\n        - ./tools/deployment/common/prepare-charts.sh\n        # Deploy Ceph cluster using legacy OSH charts\n        - ./tools/deployment/ceph/ceph_legacy.sh\n        # Deploy stateful applications\n        - |\n          export NAMESPACE=openstack\n          export MONITORING_HELM_ARGS=\" \"\n          export RUN_HELM_TESTS=no\n          export VOLUME_HELM_ARGS=\" \"\n          ./tools/deployment/db/mariadb.sh\n        - |\n          export NAMESPACE=openstack\n          export VOLUME_HELM_ARGS=\" \"\n          ./tools/deployment/component/common/rabbitmq.sh\n        # Migrate legacy Ceph to Rook\n        - ./tools/deployment/ceph/migrate-before.sh\n        - ./tools/deployment/ceph/migrate-values.sh\n        - ./tools/deployment/ceph/migrate-to-rook-ceph.sh\n        - ./tools/deployment/ceph/migrate-after.sh\n...\n"
  },
  {
    "path": "zuul.d/nodesets.yaml",
    "content": "---\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n- nodeset:\n    name: openstack-helm-1node-ubuntu_jammy\n    nodes:\n      - name: primary\n        label: ubuntu-jammy\n    groups:\n      - name: primary\n        nodes:\n          - primary\n      - name: k8s_cluster\n        nodes:\n          - primary\n      - name: k8s_control_plane\n        nodes:\n          - primary\n\n- nodeset:\n    name: openstack-helm-3nodes-ubuntu_jammy\n    nodes:\n      - name: primary\n        label: ubuntu-jammy\n      - name: node-1\n        label: ubuntu-jammy\n      - name: node-2\n        label: ubuntu-jammy\n    groups:\n      - name: primary\n        nodes:\n          - primary\n      - name: nodes\n        nodes:\n          - node-1\n          - node-2\n      - name: k8s_cluster\n        nodes:\n          - primary\n          - node-1\n          - node-2\n      - name: k8s_control_plane\n        nodes:\n          - primary\n      - name: k8s_nodes\n        nodes:\n          - node-1\n          - node-2\n\n- nodeset:\n    name: openstack-helm-1node-2nodes-ubuntu_jammy\n    nodes:\n      - name: primary\n        label: ubuntu-jammy\n      - name: node-1\n        label: ubuntu-jammy\n      - name: node-2\n        label: ubuntu-jammy\n    groups:\n      - name: primary\n        nodes:\n          - primary\n      - name: k8s_cluster\n        nodes:\n          - node-1\n          - node-2\n      - name: k8s_control_plane\n        nodes:\n          - node-1\n      - name: k8s_nodes\n        nodes:\n          - node-2\n\n- nodeset:\n    name: openstack-helm-1node-3nodes-ubuntu_jammy\n    nodes:\n      - name: primary\n        label: ubuntu-jammy\n      - name: node-1\n        label: ubuntu-jammy\n      - name: node-2\n        label: ubuntu-jammy\n      - name: node-3\n        label: ubuntu-jammy\n    groups:\n      - name: primary\n        nodes:\n          - primary\n      - name: k8s_cluster\n        nodes:\n          - node-1\n          - node-2\n          - node-3\n      - name: k8s_control_plane\n        nodes:\n          - node-1\n      - name: k8s_nodes\n        nodes:\n          - node-2\n          - node-3\n\n- nodeset:\n    name: openstack-helm-1node-32GB-ubuntu_jammy\n    nodes:\n      - name: primary\n        # This label is available in vexxhost ca-ymq-1 region\n        # The flavor v3-standard-8 in this particular region has\n        # 32GB nodes. The number of such nodes is extremely limited.\n        label: ubuntu-jammy-32GB\n    groups:\n      - name: primary\n        nodes:\n          - primary\n      - name: k8s_cluster\n        nodes:\n          - primary\n      - name: k8s_control_plane\n        nodes:\n          - primary\n\n- nodeset:\n    name: openstack-helm-4nodes-ubuntu_jammy\n    nodes:\n      - name: primary\n        label: ubuntu-jammy\n      - name: node-1\n        label: ubuntu-jammy\n      - name: node-2\n        label: ubuntu-jammy\n      - name: node-3\n        label: ubuntu-jammy\n    groups:\n      - name: primary\n        nodes:\n          - primary\n      - name: nodes\n        nodes:\n          - node-1\n          - node-2\n          - node-3\n      - name: k8s_cluster\n        nodes:\n          - primary\n          - node-1\n          - node-2\n          - node-3\n      - name: k8s_control_plane\n        nodes:\n          - primary\n      - name: k8s_nodes\n        nodes:\n          - node-1\n          - node-2\n          - node-3\n\n- nodeset:\n    name: openstack-helm-5nodes-ubuntu_jammy\n    nodes:\n      - name: primary\n        label: ubuntu-jammy\n      - name: node-1\n        label: ubuntu-jammy\n      - name: node-2\n        label: ubuntu-jammy\n      - name: node-3\n        label: ubuntu-jammy\n      - name: node-4\n        label: ubuntu-jammy\n    groups:\n      - name: primary\n        nodes:\n          - primary\n      - name: nodes\n        nodes:\n          - node-1\n          - node-2\n          - node-3\n          - node-4\n      - name: k8s_cluster\n        nodes:\n          - primary\n          - node-1\n          - node-2\n          - node-3\n          - node-4\n      - name: k8s_control_plane\n        nodes:\n          - primary\n      - name: k8s_nodes\n        nodes:\n          - node-1\n          - node-2\n          - node-3\n          - node-4\n\n- nodeset:\n    name: openstack-helm-1node-ubuntu_noble\n    nodes:\n      - name: primary\n        label: ubuntu-noble\n    groups:\n      - name: primary\n        nodes:\n          - primary\n      - name: k8s_cluster\n        nodes:\n          - primary\n      - name: k8s_control_plane\n        nodes:\n          - primary\n\n- nodeset:\n    name: openstack-helm-3nodes-ubuntu_noble\n    nodes:\n      - name: primary\n        label: ubuntu-noble\n      - name: node-1\n        label: ubuntu-noble\n      - name: node-2\n        label: ubuntu-noble\n    groups:\n      - name: primary\n        nodes:\n          - primary\n      - name: nodes\n        nodes:\n          - node-1\n          - node-2\n      - name: k8s_cluster\n        nodes:\n          - primary\n          - node-1\n          - node-2\n      - name: k8s_control_plane\n        nodes:\n          - primary\n      - name: k8s_nodes\n        nodes:\n          - node-1\n          - node-2\n\n- nodeset:\n    name: openstack-helm-1node-2nodes-ubuntu_noble\n    nodes:\n      - name: primary\n        label: ubuntu-noble\n      - name: node-1\n        label: ubuntu-noble\n      - name: node-2\n        label: ubuntu-noble\n    groups:\n      - name: primary\n        nodes:\n          - primary\n      - name: k8s_cluster\n        nodes:\n          - node-1\n          - node-2\n      - name: k8s_control_plane\n        nodes:\n          - node-1\n      - name: k8s_nodes\n        nodes:\n          - node-2\n\n- nodeset:\n    name: openstack-helm-1node-3nodes-ubuntu_noble\n    nodes:\n      - name: primary\n        label: ubuntu-noble\n      - name: node-1\n        label: ubuntu-noble\n      - name: node-2\n        label: ubuntu-noble\n      - name: node-3\n        label: ubuntu-noble\n    groups:\n      - name: primary\n        nodes:\n          - primary\n      - name: k8s_cluster\n        nodes:\n          - node-1\n          - node-2\n          - node-3\n      - name: k8s_control_plane\n        nodes:\n          - node-1\n      - name: k8s_nodes\n        nodes:\n          - node-2\n          - node-3\n\n- nodeset:\n    name: openstack-helm-1node-32GB-ubuntu_noble\n    nodes:\n      - name: primary\n        # This label is available in vexxhost ca-ymq-1 region\n        # The flavor v3-standard-8 in this particular region has\n        # 32GB nodes. The number of such nodes is extremely limited.\n        label: ubuntu-noble-32GB\n    groups:\n      - name: primary\n        nodes:\n          - primary\n      - name: k8s_cluster\n        nodes:\n          - primary\n      - name: k8s_control_plane\n        nodes:\n          - primary\n\n- nodeset:\n    name: openstack-helm-4nodes-ubuntu_noble\n    nodes:\n      - name: primary\n        label: ubuntu-noble\n      - name: node-1\n        label: ubuntu-noble\n      - name: node-2\n        label: ubuntu-noble\n      - name: node-3\n        label: ubuntu-noble\n    groups:\n      - name: primary\n        nodes:\n          - primary\n      - name: nodes\n        nodes:\n          - node-1\n          - node-2\n          - node-3\n      - name: k8s_cluster\n        nodes:\n          - primary\n          - node-1\n          - node-2\n          - node-3\n      - name: k8s_control_plane\n        nodes:\n          - primary\n      - name: k8s_nodes\n        nodes:\n          - node-1\n          - node-2\n          - node-3\n\n- nodeset:\n    name: openstack-helm-5nodes-ubuntu_noble\n    nodes:\n      - name: primary\n        label: ubuntu-noble\n      - name: node-1\n        label: ubuntu-noble\n      - name: node-2\n        label: ubuntu-noble\n      - name: node-3\n        label: ubuntu-noble\n      - name: node-4\n        label: ubuntu-noble\n    groups:\n      - name: primary\n        nodes:\n          - primary\n      - name: nodes\n        nodes:\n          - node-1\n          - node-2\n          - node-3\n          - node-4\n      - name: k8s_cluster\n        nodes:\n          - primary\n          - node-1\n          - node-2\n          - node-3\n          - node-4\n      - name: k8s_control_plane\n        nodes:\n          - primary\n      - name: k8s_nodes\n        nodes:\n          - node-1\n          - node-2\n          - node-3\n          - node-4\n...\n"
  },
  {
    "path": "zuul.d/project.yaml",
    "content": "---\n# Copyright 2018, SUSE LINUX GmbH.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n- project:\n    vars:\n      # shared across all jobs\n      helm_version: \"4.1.1\"\n      chart_testing_version: \"3.11.0\"\n      virtualenv: \"{{ ansible_user_dir }}/venv\"\n      base_version: \"2025.2.0\"\n\n    templates:\n      - publish-openstack-docs-pti\n      - release-notes-jobs-python3\n    check:\n      jobs:\n        - openstack-helm-linter\n        - openstack-helm-pre-commit\n        - openstack-helm-bandit\n        # 2024.2\n        - openstack-helm-cinder-2024-2-ubuntu_jammy  # 3 nodes rook\n        - openstack-helm-compute-kit-2024-2-ubuntu_jammy  # 1 node + 3 nodes\n        # 2025.1 Ubuntu Jammy\n        - openstack-helm-cinder-2025-1-ubuntu_jammy  # 3 nodes rook\n        - openstack-helm-compute-kit-2025-1-ubuntu_jammy  # 1 node + 3 nodes\n        - openstack-helm-octavia-2025-1-ubuntu_jammy  # 4 nodes\n        - openstack-helm-blazar-2025-1-ubuntu_jammy  # 3 nodes; run only if blazar changed\n        - openstack-helm-cloudkitty-2025-1-ubuntu_jammy  # 3 nodes; run only if cloudkitty changed\n        - openstack-helm-freezer-2025-1-ubuntu_jammy  # 3 nodes; run only if freezer changed\n        - openstack-helm-zaqar-2025-1-ubuntu_jammy  # 3 nodes; run only if zaqar changed\n        # TODO: Configure Cilium not to setup it's own overlay and\n        # use existing VXLAN overlay interface for internal K8s communication\n        # - openstack-helm-compute-kit-cilium-2025-1-ubuntu_jammy  # 1 node + 3 nodes\n        - openstack-helm-horizon-2025-1-ubuntu_jammy  # 1 node\n        - openstack-helm-compute-kit-dpdk-2025-1-ubuntu_jammy  # 32GB node\n        # 2025.1 Ubuntu Noble\n        - openstack-helm-cinder-2025-1-ubuntu_noble  # 5 nodes rook\n        - openstack-helm-compute-kit-2025-1-ubuntu_noble  # 1 node + 3 nodes\n        # 2025.2 Ubuntu Noble\n        - openstack-helm-cinder-2025-2-ubuntu_noble  # 5 nodes rook\n        - openstack-helm-compute-kit-2025-2-ubuntu_noble  # 1 node + 3 nodes\n        - openstack-helm-compute-kit-ovn-2025-2-ubuntu_noble  # 1 node + 3 nodes\n        - openstack-helm-skyline-2025-2-ubuntu_noble  # 3 nodes\n        - openstack-helm-trove-2025-2-ubuntu_noble  # 5 nodes rook\n        - openstack-helm-swift-2025-2-ubuntu_noble  # 3 nodes\n        # Infra jobs\n        - openstack-helm-logging\n        - openstack-helm-monitoring\n    gate:\n      jobs:\n        - openstack-helm-linter\n        - openstack-helm-cinder-2025-1-ubuntu_jammy\n        - openstack-helm-compute-kit-2025-1-ubuntu_jammy\n        # - openstack-helm-logging\n        # - openstack-helm-monitoring\n    post:\n      jobs:\n        - openstack-helm-publish-charts\n    periodic:\n      jobs:\n        - openstack-helm-compute-kit-helm-repo-public-2025-1-ubuntu_jammy  # 1 node + 3 nodes\n        - openstack-helm-watcher-2025-1-ubuntu_jammy  # 3 nodes\n        - openstack-helm-blazar-2025-1-ubuntu_jammy  # 3 nodes\n        - openstack-helm-cloudkitty-2025-1-ubuntu_jammy  # 3 nodes\n        - openstack-helm-freezer-2025-1-ubuntu_jammy  # 3 nodes\n        - openstack-helm-tacker-2025-1-ubuntu_jammy  # 3 nodes\n...\n"
  }
]